From 4bd29e1a04f961fc30e9a571a93960fa218a13fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Cardoso?= Date: Wed, 4 Oct 2023 23:22:38 +0100 Subject: [PATCH] Drop apso-log because scalalogging provides all the features --- README.md | 89 ++++++++----------- akka-http/build.sbt | 1 + .../apso/akka/http/ProxySupport.scala | 7 +- aws/build.sbt | 1 + .../com/velocidi/apso/aws/S3Bucket.scala | 40 ++++----- build.sbt | 13 ++- docs/README.md | 15 ---- elasticsearch/build.sbt | 1 + .../ElasticsearchBulkInserter.scala | 23 +++-- .../velocidi/apso/encryption/Decryptor.scala | 13 ++- .../velocidi/apso/encryption/Encryptor.scala | 21 +++-- .../apso/io/InsistentInputStream.scala | 6 +- .../apso/io/LocalFileDescriptor.scala | 11 +-- .../velocidi/apso/io/S3FileDescriptor.scala | 4 +- .../velocidi/apso/io/SftpFileDescriptor.scala | 15 ++-- log/build.sbt | 3 - .../scala/com/velocidi/apso/Logging.scala | 21 ----- .../velocidi/apso/profiling/SimpleJmx.scala | 9 +- 18 files changed, 116 insertions(+), 177 deletions(-) delete mode 100644 log/build.sbt delete mode 100644 log/src/main/scala/com/velocidi/apso/Logging.scala diff --git a/README.md b/README.md index b5475b30..45db6fb0 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,6 @@ Please take into account that the library is still in an experimental stage and - [Geo](#geo) - [Implicits](#implicits) - [JreVersionHelper](#jreversionhelper) - - [Logging](#logging) - [ProgressBar](#progressbar) - [Reflect](#reflect) - [Retry](#retry) @@ -184,20 +183,6 @@ import com.velocidi.apso.JreVersionHelper JreVersionHelper.jreVersion ``` -### Logging - -The `Logging` and `StrictLogging` traits allows mixing in SLF4J `Logger` objects. The difference between the two is that in the former the `Logger` object is initialized lazily, while in the latter it is initialized strictly: - -```scala -import com.velocidi.apso.Logging - -class A extends Logging {} - -val a = new A - -a.log.info("test") -``` - ### ProgressBar The `ProgressBar` represents a widget to print a dynamic progress bar in a console. @@ -271,7 +256,7 @@ def f: Future[Int] = { } Await.result(Retry.retryFuture(10)(f), Duration.Inf) -// res21: Int = 6 +// res20: Int = 6 var attempts = 0 // attempts: Int = 0 @@ -285,7 +270,7 @@ def m() = { } Retry.retry(10)(m()) -// res22: util.Try[Int] = Success(value = 6) +// res21: util.Try[Int] = Success(value = 6) ``` ## Akka HTTP @@ -390,11 +375,11 @@ val cachedFn = ((i: Int) => { // cachedFn: MemoizeFn1[scalacache.package.Id, Int, Int] = cachedFn(2) -// res26: scalacache.package.Id[Int] = 0 +// res25: scalacache.package.Id[Int] = 0 cachedFn(2) -// res27: scalacache.package.Id[Int] = 0 +// res26: scalacache.package.Id[Int] = 0 x -// res28: AtomicInteger = 2 +// res27: AtomicInteger = 2 val y = new AtomicInteger(0) // y: AtomicInteger = 3 @@ -406,11 +391,11 @@ val cachedFutFn = ((i: Int) => Future { // cachedFutFn: MemoizeFn1[Future, Int, Int] = Await.result(cachedFutFn(3), Duration.Inf) -// res29: Int = 0 +// res28: Int = 0 Await.result(cachedFutFn(3), Duration.Inf) -// res30: Int = 0 +// res29: Int = 0 y -// res31: AtomicInteger = 3 +// res30: AtomicInteger = 3 ``` ## Collections @@ -484,13 +469,13 @@ val nt = t.set("one", 1).set("two", 2).set("three", 3).set("four", 4) // ... nt.get("one") -// res33: Option[Int] = Some(value = 1) +// res32: Option[Int] = Some(value = 1) nt.get("two") -// res34: Option[Int] = Some(value = 2) +// res33: Option[Int] = Some(value = 2) nt.get("five") -// res35: Option[Int] = None +// res34: Option[Int] = None ``` ### TypedMap @@ -504,25 +489,25 @@ val m = TypedMap("one", 2, 3L) // m: TypedMap[Any] = Map(java.lang.String -> one, Int -> 2, Long -> 3) m[String] -// res37: String = "one" +// res36: String = "one" m[Int] -// res38: Int = 2 +// res37: Int = 2 m[Long] -// res39: Long = 3L +// res38: Long = 3L m.get[String] -// res40: Option[String] = Some(value = "one") +// res39: Option[String] = Some(value = "one") m.get[Int] -// res41: Option[Int] = Some(value = 2) +// res40: Option[Int] = Some(value = 2) m.get[Long] -// res42: Option[Long] = Some(value = 3L) +// res41: Option[Long] = Some(value = 3L) m.get[Char] -// res43: Option[Char] = None +// res42: Option[Char] = None ``` ### Iterators @@ -540,7 +525,7 @@ val circularIterator = CircularIterator(List(1, 2, 3).iterator) // circularIterator: CircularIterator[Int] = non-empty iterator circularIterator.take(10).toList -// res45: List[Int] = List(1, 2, 3, 1, 2, 3, 1, 2, 3, 1) +// res44: List[Int] = List(1, 2, 3, 1, 2, 3, 1, 2, 3, 1) ``` #### MergedBufferedIterator @@ -558,7 +543,7 @@ val it1 = MergedBufferedIterator(List( // it1: MergedBufferedIterator[Int] = empty iterator it1.toList -// res47: List[Int] = List( +// res46: List[Int] = List( // 0, // 0, // 0, @@ -608,7 +593,7 @@ val it2 = MergedBufferedIterator(List( // it2: MergedBufferedIterator[Int] = non-empty iterator it2.mergeSorted(Iterator(4, 6).buffered).toList -// res48: List[Int] = List(1, 2, 3, 4, 5, 6) +// res47: List[Int] = List(1, 2, 3, 4, 5, 6) ``` ## Encryption @@ -651,10 +636,10 @@ libraryDependencies += "com.velocidi" %% "apso-hashing" % "0.18.8" import com.velocidi.apso.hashing.Implicits._ "abcd".md5 -// res51: String = "e2fc714c4727ee9395f324cd2e7f331f" +// res50: String = "e2fc714c4727ee9395f324cd2e7f331f" "abcd".murmurHash -// res52: Long = 7785666560123423118L +// res51: Long = 7785666560123423118L ``` ## IO @@ -729,7 +714,7 @@ val js2 = Json.obj( ``` ```scala js1.deepMerge(js2).spaces2 -// res57: String = """{ +// res56: String = """{ // "c" : 4, // "d" : { // "e" : 5, @@ -745,7 +730,7 @@ fromFullPaths(Seq( "b.d" -> 3.asJson, "e" -> "xpto".asJson, "f.g.h" -> 5.asJson)).spaces2 -// res58: String = """{ +// res57: String = """{ // "f" : { // "g" : { // "h" : 5 @@ -760,26 +745,26 @@ fromFullPaths(Seq( // }""" js1.getField[Int]("a") -// res59: Option[Int] = Some(value = 2) +// res58: Option[Int] = Some(value = 2) js1.getField[Int]("d.f") -// res60: Option[Int] = Some(value = 6) +// res59: Option[Int] = Some(value = 6) js1.getField[Int]("x") -// res61: Option[Int] = None +// res60: Option[Int] = None js1.deleteField("a") -// res62: Json = JObject( +// res61: Json = JObject( // value = object[b -> 3,d -> { // "f" : 6 // }] // ) js1.deleteField("d.f") -// res63: Json = JObject( +// res62: Json = JObject( // value = object[a -> 2,b -> 3,d -> { // // }] // ) js1.deleteField("x") -// res64: Json = JObject( +// res63: Json = JObject( // value = object[a -> 2,b -> 3,d -> { // "f" : 6 // }] @@ -793,13 +778,13 @@ The `JsonConvert` object contains helpers for converting between JSON values and import com.velocidi.apso.circe._ JsonConvert.toJson("abcd") -// res66: io.circe.Json = JString(value = "abcd") +// res65: io.circe.Json = JString(value = "abcd") JsonConvert.toJson(1) -// res67: io.circe.Json = JNumber(value = JsonLong(value = 1L)) +// res66: io.circe.Json = JNumber(value = JsonLong(value = 1L)) JsonConvert.toJson(Map(1 -> 2, 3 -> 4)) -// res68: io.circe.Json = JObject(value = object[1 -> 2,3 -> 4]) +// res67: io.circe.Json = JObject(value = object[1 -> 2,3 -> 4]) ``` ## Profiling @@ -840,10 +825,10 @@ import com.velocidi.apso.time._ import com.velocidi.apso.time.Implicits._ (new DateTime("2012-01-01") to new DateTime("2012-01-01")).toList -// res70: List[DateTime] = List(2012-01-01T00:00:00.000Z) +// res69: List[DateTime] = List(2012-01-01T00:00:00.000Z) (new DateTime("2012-02-01") until new DateTime("2012-03-01") by 1.day) -// res71: IterableInterval = IndexedSeq( +// res70: IterableInterval = IndexedSeq( // 2012-02-01T00:00:00.000Z, // 2012-02-02T00:00:00.000Z, // 2012-02-03T00:00:00.000Z, @@ -876,7 +861,7 @@ import com.velocidi.apso.time.Implicits._ // )) (new DateTime("2012-01-01") until new DateTime("2012-02-01") by 2.minutes) -// res72: IterableInterval = IndexedSeq( +// res71: IterableInterval = IndexedSeq( // 2012-01-01T00:00:00.000Z, // 2012-01-01T00:02:00.000Z, // 2012-01-01T00:04:00.000Z, diff --git a/akka-http/build.sbt b/akka-http/build.sbt index abaef5e4..d5ba2506 100644 --- a/akka-http/build.sbt +++ b/akka-http/build.sbt @@ -1,6 +1,7 @@ import Dependencies._ libraryDependencies ++= Seq( + ScalaLogging, AkkaActor % Provided, AkkaHttp % Provided, AkkaHttpCore % Provided, diff --git a/akka-http/src/main/scala/com/velocidi/apso/akka/http/ProxySupport.scala b/akka-http/src/main/scala/com/velocidi/apso/akka/http/ProxySupport.scala index ecc9b22d..e0a328f9 100644 --- a/akka-http/src/main/scala/com/velocidi/apso/akka/http/ProxySupport.scala +++ b/akka-http/src/main/scala/com/velocidi/apso/akka/http/ProxySupport.scala @@ -15,8 +15,7 @@ import akka.stream.Materializer import akka.stream.QueueOfferResult.{Dropped, Enqueued, Failure => OfferFailure, QueueClosed} import akka.stream.scaladsl.{Keep, Sink, Source} import com.typesafe.config.ConfigFactory - -import com.velocidi.apso.Logging +import com.typesafe.scalalogging.LazyLogging /** Adds proxy to akka-http services to proxy requests to other hosts. * @@ -160,7 +159,7 @@ trait ProxySupport extends ClientIPDirectives { reqQueueSize: Int = defaultQueueSize, strictTimeout: Option[FiniteDuration] = None )(implicit system: ActorSystem, mat: Materializer) - extends Logging { + extends LazyLogging { import system.dispatcher @@ -207,7 +206,7 @@ trait ProxySupport extends ClientIPDirectives { case OfferFailure(ex) => Future.failed(new RuntimeException("Queue offering failed", ex)) case QueueClosed => Future.failed(new RuntimeException("Queue is completed before call!?")) case Dropped => - log.warn(s"Request queue for $host:$port is full") + logger.warn(s"Request queue for $host:$port is full") if (failOnDrop) Future.failed(new RuntimeException("Dropping request (Queue is full)")) else Future.successful(Complete(HttpResponse(StatusCodes.ServiceUnavailable))) } diff --git a/aws/build.sbt b/aws/build.sbt index 3f2c446b..219d63ca 100644 --- a/aws/build.sbt +++ b/aws/build.sbt @@ -1,6 +1,7 @@ import Dependencies._ libraryDependencies ++= Seq( + ScalaLogging, AwsJavaSdkS3, AwsJavaSdkCore, ScalaCollectionCompat, diff --git a/aws/src/main/scala/com/velocidi/apso/aws/S3Bucket.scala b/aws/src/main/scala/com/velocidi/apso/aws/S3Bucket.scala index 15f21384..f9143b0a 100644 --- a/aws/src/main/scala/com/velocidi/apso/aws/S3Bucket.scala +++ b/aws/src/main/scala/com/velocidi/apso/aws/S3Bucket.scala @@ -13,8 +13,8 @@ import com.amazonaws.services.s3.transfer.{TransferManager, TransferManagerBuild import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} import com.amazonaws.{AmazonClientException, AmazonServiceException, ClientConfiguration} import com.typesafe.config.ConfigFactory +import com.typesafe.scalalogging.LazyLogging -import com.velocidi.apso.Logging import com.velocidi.apso.aws.S3Bucket.S3ObjectDownloader /** A representation of an Amazon's S3 bucket. This class wraps an `AmazonS3Client` and provides a higher level @@ -29,7 +29,7 @@ import com.velocidi.apso.aws.S3Bucket.S3ObjectDownloader class S3Bucket( val bucketName: String, private val credentialsProvider: () => AWSCredentialsProvider = () => CredentialStore -) extends Logging +) extends LazyLogging with Serializable { private[this] lazy val config = ConfigFactory.load() @@ -133,11 +133,11 @@ class S3Bucket( * a list of objects in a bucket matching a given prefix. */ def getObjectsWithMatchingPrefix(prefix: String, includeDirectories: Boolean = false): Iterator[S3ObjectSummary] = { - log.info(s"Finding files matching prefix '$prefix'...") + logger.info(s"Finding files matching prefix '$prefix'...") val listings = Iterator.iterate(s3.listObjects(bucketName, sanitizeKey(prefix))) { listing => if (listing.isTruncated) { - log.debug("Asking for another batch of objects...") + logger.debug("Asking for another batch of objects...") s3.listNextBatchOfObjects(listing) } else null } @@ -166,7 +166,7 @@ class S3Bucket( * true if the push was successful, false otherwise. */ def push(key: String, file: File): Boolean = retry { - log.info(s"Pushing file '${file.getPath}' to 's3://$bucketName/$key'") + logger.info(s"Pushing file '${file.getPath}' to 's3://$bucketName/$key'") transferManager .upload(new PutObjectRequest(bucketName, sanitizeKey(key), file)) .waitForUploadResult() @@ -184,7 +184,7 @@ class S3Bucket( * true if the push was successful, false otherwise. */ def push(key: String, inputStream: InputStream, length: Option[Long]): Boolean = retry { - log.info(s"Pushing to 's3://$bucketName/$key'") + logger.info(s"Pushing to 's3://$bucketName/$key'") val metadata = new ObjectMetadata() length.foreach(metadata.setContentLength) transferManager @@ -249,7 +249,7 @@ class S3Bucket( * the `CannedAccessControlList` to be applied to the Amazon S3 object */ def setAcl(key: String, acl: CannedAccessControlList) = { - log.info(s"Setting 's3://$bucketName/$key' permissions to '$acl'") + logger.info(s"Setting 's3://$bucketName/$key' permissions to '$acl'") s3.setObjectAcl(bucketName, key, acl) } @@ -261,7 +261,7 @@ class S3Bucket( * true if the directory was created successfully, false otherwise. */ def createDirectory(key: String): Boolean = retry { - log.info(s"Creating directory in 's3://$bucketName/$key'") + logger.info(s"Creating directory in 's3://$bucketName/$key'") val emptyContent = new ByteArrayInputStream(Array[Byte]()) val metadata = new ObjectMetadata() @@ -302,13 +302,13 @@ class S3Bucket( * true if the pull was successful, false otherwise */ def pull(key: String, destination: String): Boolean = retry { - log.info(s"Pulling 's3://$bucketName/$key' to '$destination'") + logger.info(s"Pulling 's3://$bucketName/$key' to '$destination'") Using(new S3ObjectDownloader(s3, bucketName, sanitizeKey(key), destination))(_.download()).get - log.info(s"Downloaded 's3://$bucketName/$key' to '$destination'. Closing files.") + logger.info(s"Downloaded 's3://$bucketName/$key' to '$destination'. Closing files.") }.isDefined def stream(key: String, offset: Long = 0L): InputStream = { - log.info(s"Streaming 's3://$bucketName/$key' starting at $offset") + logger.info(s"Streaming 's3://$bucketName/$key' starting at $offset") val req = if (offset > 0) new GetObjectRequest(bucketName, sanitizeKey(key)).withRange(offset) else new GetObjectRequest(bucketName, sanitizeKey(key)) @@ -319,11 +319,11 @@ class S3Bucket( case ex: AmazonS3Exception => ex.getStatusCode match { case 404 => - log.error("The specified file does not exist", ex); true // no need to retry + logger.error("The specified file does not exist", ex); true // no need to retry case 403 => - log.error("No permission to access the file", ex); true // no need to retry + logger.error("No permission to access the file", ex); true // no need to retry case _ => - log.error( + logger.error( s"""|S3 service error: ${ex.getMessage}. Extended request id: ${ex.getExtendedRequestId} |Additional details: ${ex.getAdditionalDetails}""".stripMargin, ex @@ -332,28 +332,28 @@ class S3Bucket( } case ex: AmazonServiceException => - log.error(s"Service error: ${ex.getMessage}", ex); ex.isRetryable + logger.error(s"Service error: ${ex.getMessage}", ex); ex.isRetryable case ex: AmazonClientException if ex.getMessage == "Unable to load AWS credentials from any provider in the chain" => - log.error("Unable to load AWS credentials", ex); true + logger.error("Unable to load AWS credentials", ex); true case ex: AmazonClientException => - log.error("Client error pulling file", ex); ex.isRetryable + logger.error("Client error pulling file", ex); ex.isRetryable case ex: Exception => - log.error("An error occurred", ex); false + logger.error("An error occurred", ex); false } private[this] def retry[T](f: => T, tries: Int = 3, sleepTime: Int = 5000): Option[T] = - if (tries == 0) { log.error("Max retries reached. Aborting S3 operation"); None } + if (tries == 0) { logger.error("Max retries reached. Aborting S3 operation"); None } else Try(f) match { case Success(res) => Some(res) case Failure(e) if !handler(e) => if (tries > 1) { - log.warn(s"Error during S3 operation. Retrying in ${sleepTime}ms (${tries - 1} more times)") + logger.warn(s"Error during S3 operation. Retrying in ${sleepTime}ms (${tries - 1} more times)") Thread.sleep(sleepTime) } retry(f, tries - 1, sleepTime) diff --git a/build.sbt b/build.sbt index 93fe5e90..b286fd1c 100644 --- a/build.sbt +++ b/build.sbt @@ -16,18 +16,17 @@ def module(project: Project, moduleName: String) = .settings(commonSettings: _*) lazy val akka = module(project, "akka") -lazy val akkaHttp = module(project, "akka-http").dependsOn(log, core % Test, testkit % Test) -lazy val aws = module(project, "aws").dependsOn(core, log) +lazy val akkaHttp = module(project, "akka-http").dependsOn(core % Test, testkit % Test) +lazy val aws = module(project, "aws").dependsOn(core) lazy val caching = module(project, "caching").enablePlugins(BoilerplatePlugin) lazy val circe = module(project, "circe") lazy val collections = module(project, "collections") lazy val core = module(project, "core").dependsOn(testkit % Test) -lazy val elasticsearch = module(project, "elasticsearch").dependsOn(log, testkit % Test) -lazy val encryption = module(project, "encryption").dependsOn(log) +lazy val elasticsearch = module(project, "elasticsearch").dependsOn(testkit % Test) +lazy val encryption = module(project, "encryption") lazy val hashing = module(project, "hashing") lazy val io = module(project, "io").dependsOn(aws, testkit % Test) -lazy val log = module(project, "log") -lazy val profiling = module(project, "profiling").dependsOn(log) +lazy val profiling = module(project, "profiling") lazy val testkit = module(project, "testkit") lazy val time = module(project, "time") @@ -46,7 +45,6 @@ lazy val apso = (project in file(".")) encryption, hashing, io, - log, profiling, time ) @@ -62,7 +60,6 @@ lazy val apso = (project in file(".")) encryption, hashing, io, - log, profiling, testkit, time diff --git a/docs/README.md b/docs/README.md index 4f54b36b..7cd19b82 100644 --- a/docs/README.md +++ b/docs/README.md @@ -33,7 +33,6 @@ Please take into account that the library is still in an experimental stage and - [Geo](#geo) - [Implicits](#implicits) - [JreVersionHelper](#jreversionhelper) - - [Logging](#logging) - [ProgressBar](#progressbar) - [Reflect](#reflect) - [Retry](#retry) @@ -173,20 +172,6 @@ JreVersionHelper.jreVersion // res0: (Int, Int) = (1, 8) ``` -### Logging - -The `Logging` and `StrictLogging` traits allows mixing in SLF4J `Logger` objects. The difference between the two is that in the former the `Logger` object is initialized lazily, while in the latter it is initialized strictly: - -```scala mdoc:compile-only -import com.velocidi.apso.Logging - -class A extends Logging {} - -val a = new A - -a.log.info("test") -``` - ### ProgressBar The `ProgressBar` represents a widget to print a dynamic progress bar in a console. diff --git a/elasticsearch/build.sbt b/elasticsearch/build.sbt index 276be991..57bd39de 100644 --- a/elasticsearch/build.sbt +++ b/elasticsearch/build.sbt @@ -1,6 +1,7 @@ import Dependencies._ libraryDependencies ++= Seq( + ScalaLogging, AkkaActor % Provided, ApacheHttpAsyncClient, ApacheHttpClient, diff --git a/elasticsearch/src/main/scala/com/velocidi/apso/elasticsearch/ElasticsearchBulkInserter.scala b/elasticsearch/src/main/scala/com/velocidi/apso/elasticsearch/ElasticsearchBulkInserter.scala index 73feff98..ddb00adf 100644 --- a/elasticsearch/src/main/scala/com/velocidi/apso/elasticsearch/ElasticsearchBulkInserter.scala +++ b/elasticsearch/src/main/scala/com/velocidi/apso/elasticsearch/ElasticsearchBulkInserter.scala @@ -11,10 +11,9 @@ import com.sksamuel.elastic4s.ElasticDsl._ import com.sksamuel.elastic4s.requests.bulk.{BulkResponse, BulkResponseItem} import com.sksamuel.elastic4s.requests.indexes.IndexRequest import com.sksamuel.elastic4s.{ElasticClient, Indexable} +import com.typesafe.scalalogging.LazyLogging import io.circe.Json -import com.velocidi.apso.Logging - /** An actor responsible for inserting tracking events into Elasticsearch. This actor buffers requests until either the * configured flush timer is triggered or the buffer hits the max size. */ @@ -23,7 +22,7 @@ class ElasticsearchBulkInserter( logErrorsAsWarnings: Boolean, timeoutOnStop: FiniteDuration = 3.seconds ) extends Actor - with Logging { + with LazyLogging { import ElasticsearchBulkInserter._ implicit private[this] val ec: ExecutionContext = context.system.dispatcher @@ -48,10 +47,10 @@ class ElasticsearchBulkInserter( private[this] def logErrorOrWarning(msg: => String, throwable: Option[Throwable] = None): Unit = { (logErrorsAsWarnings, throwable) match { - case (true, Some(t)) => log.warn(msg, t) - case (true, None) => log.warn(msg) - case (false, Some(t)) => log.error(msg, t) - case (false, None) => log.error(msg) + case (true, Some(t)) => logger.warn(msg, t) + case (true, None) => logger.warn(msg) + case (false, Some(t)) => logger.error(msg, t) + case (false, None) => logger.error(msg) } } @@ -84,7 +83,7 @@ class ElasticsearchBulkInserter( Nil } else { tryCountMap(msg) = tryCount - log.info( + logger.info( "Error inserting document in Elasticsearch: {}. Will retry {} more times", item.error, maxTryCount - tryCount @@ -163,7 +162,7 @@ class ElasticsearchBulkInserter( case msg: Message => buffer = msg :: buffer case ElasticsearchUp => - log.info("Elasticsearch is up. Bulk inserter will start sending requests") + logger.info("Elasticsearch is up. Bulk inserter will start sending requests") becomeElasticsearchUp() case ElasticsearchDown => @@ -210,7 +209,7 @@ class ElasticsearchBulkInserter( checkElasticsearch().collect { case true => self ! ElasticsearchUp } case ElasticsearchUp => - log.info("Elasticsearch is up. Bulk inserting started") + logger.info("Elasticsearch is up. Bulk inserting started") periodicCheck.cancel() becomeElasticsearchUp() self ! Flush @@ -219,7 +218,7 @@ class ElasticsearchBulkInserter( override def postStop() = { super.postStop() - log.info("Stopping Bulk Inserter...") + logger.info("Stopping Bulk Inserter...") val stop = if (buffer.nonEmpty) flush().andThen { case _ => client.close() } else Future(client.close()) @@ -233,7 +232,7 @@ class ElasticsearchBulkInserter( /** Companion object for the `ElasticsearchBulkInserter` actor. */ -object ElasticsearchBulkInserter extends Logging { +object ElasticsearchBulkInserter { implicit object JsonIndexable extends Indexable[Json] { def json(js: Json) = js.noSpaces } diff --git a/encryption/src/main/scala/com/velocidi/apso/encryption/Decryptor.scala b/encryption/src/main/scala/com/velocidi/apso/encryption/Decryptor.scala index 19b5acf2..599ca0c5 100644 --- a/encryption/src/main/scala/com/velocidi/apso/encryption/Decryptor.scala +++ b/encryption/src/main/scala/com/velocidi/apso/encryption/Decryptor.scala @@ -5,10 +5,9 @@ import java.security.{Key, MessageDigest} import javax.crypto.Cipher import javax.crypto.spec.SecretKeySpec +import com.typesafe.scalalogging.LazyLogging import org.apache.commons.codec.binary.Base64 -import com.velocidi.apso.Logging - /** Utility class to handle decrypting data to string format and, optionally, handle base64 encoded data. * * @param decryptor @@ -30,19 +29,19 @@ class Decryptor(decryptor: Cipher) extends EncryptionErrorHandling { /** Provides the `apply` methods that allow to more easily create a [[Decryptor]] object by directly specifying the * transformation and key, or a keystore holding the key parameters. */ -object Decryptor extends EncryptionUtils with Logging { +object Decryptor extends EncryptionUtils with LazyLogging { private def loadDecryptionCipher(transformation: String, key: Key): Option[Cipher] = handle( { - log.debug(s"Building Decryptor using Transformation '$transformation' and Key Algorithm '${key.getAlgorithm}'") + logger.debug(s"Building Decryptor using Transformation '$transformation' and Key Algorithm '${key.getAlgorithm}'") val cipher = Cipher.getInstance(transformation, provider) cipher.init(Cipher.DECRYPT_MODE, key) cipher }, { ex: Throwable => - log.warn(s"Cipher Transformation: $transformation") - log.warn("Cipher Key: " + key) - log.warn(s"Impossible to create Decryption Cipher!", ex) + logger.warn(s"Cipher Transformation: $transformation") + logger.warn("Cipher Key: " + key) + logger.warn(s"Impossible to create Decryption Cipher!", ex) } ) diff --git a/encryption/src/main/scala/com/velocidi/apso/encryption/Encryptor.scala b/encryption/src/main/scala/com/velocidi/apso/encryption/Encryptor.scala index 7db8bd0a..038f5a6b 100644 --- a/encryption/src/main/scala/com/velocidi/apso/encryption/Encryptor.scala +++ b/encryption/src/main/scala/com/velocidi/apso/encryption/Encryptor.scala @@ -5,50 +5,49 @@ import java.security.{Key, MessageDigest} import javax.crypto.Cipher import javax.crypto.spec.SecretKeySpec +import com.typesafe.scalalogging.LazyLogging import org.apache.commons.codec.binary.Base64 -import com.velocidi.apso.Logging - /** Utility class to handle encrypting data to string format and, optionally, handle base64 encoded data. * * @param encryptor * the underlying Cipher object that allows to encrypt the data. */ -class Encryptor(encryptor: Cipher) extends EncryptionErrorHandling with Logging { +class Encryptor(encryptor: Cipher) extends EncryptionErrorHandling with LazyLogging { def apply(data: String) = encrypt(data) def encrypt(data: String, pad: Boolean = true): Option[String] = handle( EncryptionUtils.paddedUrlSafebase64(encryptor.doFinal(data.getBytes("UTF-8")), pad), - { ex: Throwable => log.warn(s"Error while trying to encrypt data with padding '$pad': $data", ex) } + { ex: Throwable => logger.warn(s"Error while trying to encrypt data with padding '$pad': $data", ex) } ) def encryptToSafeString(data: Array[Byte]): Option[String] = handle( Base64.encodeBase64URLSafeString(encryptor.doFinal(data)), - { ex: Throwable => log.warn(s"Error while trying to encrypt data: $data", ex) } + { ex: Throwable => logger.warn(s"Error while trying to encrypt data: $data", ex) } ) def encryptToSafeString(data: String): Option[String] = handle( Base64.encodeBase64URLSafeString(encryptor.doFinal(data.getBytes("UTF-8"))), - { ex: Throwable => log.warn(s"Error while trying to encrypt data: $data", ex) } + { ex: Throwable => logger.warn(s"Error while trying to encrypt data: $data", ex) } ) } /** Provides the `apply` methods that allow to more easily create a [[Encryptor]] object by directly specifying the * transformation and key, or a keystore holding the key parameters. */ -object Encryptor extends EncryptionUtils with Logging { +object Encryptor extends EncryptionUtils with LazyLogging { private def loadEncryptionCipher(transformation: String, key: Key): Option[Cipher] = handle( { - log.debug(s"Building Encryptor using Transformation '$transformation' and Key Algorithm '${key.getAlgorithm}'") + logger.debug(s"Building Encryptor using Transformation '$transformation' and Key Algorithm '${key.getAlgorithm}'") val cipher = Cipher.getInstance(transformation, provider) cipher.init(Cipher.ENCRYPT_MODE, key) cipher }, { ex: Throwable => - log.warn(s"Cipher Transformation: $transformation") - log.warn("Cipher Key: " + key) - log.warn(s"Impossible to create Encryption Cipher!", ex) + logger.warn(s"Cipher Transformation: $transformation") + logger.warn("Cipher Key: " + key) + logger.warn(s"Impossible to create Encryption Cipher!", ex) } ) diff --git a/io/src/main/scala/com/velocidi/apso/io/InsistentInputStream.scala b/io/src/main/scala/com/velocidi/apso/io/InsistentInputStream.scala index 85e41428..7d2abd32 100644 --- a/io/src/main/scala/com/velocidi/apso/io/InsistentInputStream.scala +++ b/io/src/main/scala/com/velocidi/apso/io/InsistentInputStream.scala @@ -6,7 +6,7 @@ import scala.annotation.tailrec import scala.concurrent.duration._ import scala.util.{Failure, Success, Try} -import com.velocidi.apso.Logging +import com.typesafe.scalalogging.LazyLogging /** A InputStream that wraps another InputStream, retrying failed reads. This is useful for input streams that can have * transient failures (eg HTTP input streams). @@ -23,7 +23,7 @@ class InsistentInputStream( maxRetries: Int = 10, backoff: Option[FiniteDuration] = None ) extends InputStream - with Logging { + with LazyLogging { def this(streamBuilder: () => InputStream, maxRetries: Int, backoff: Option[FiniteDuration]) = this( @@ -73,7 +73,7 @@ class InsistentInputStream( case Failure(t) => if (remainingTries <= 0) throw t else { - log.warn(s"Failed to read from stream: ${t.getMessage}") + logger.warn(s"Failed to read from stream: ${t.getMessage}") val nextRemainingTries = retryStreamCreation(remainingTries - 1) readRetries(nextRemainingTries, f) } diff --git a/io/src/main/scala/com/velocidi/apso/io/LocalFileDescriptor.scala b/io/src/main/scala/com/velocidi/apso/io/LocalFileDescriptor.scala index 6c43dde1..e455d687 100644 --- a/io/src/main/scala/com/velocidi/apso/io/LocalFileDescriptor.scala +++ b/io/src/main/scala/com/velocidi/apso/io/LocalFileDescriptor.scala @@ -6,10 +6,11 @@ import java.nio.file.{Files, Path, Paths, StandardCopyOption} import scala.io.Source import scala.util.{Failure, Success, Try} +import com.typesafe.scalalogging.LazyLogging + import com.velocidi.apso.Implicits.ApsoCloseable -import com.velocidi.apso.Logging -case class LocalFileDescriptor(initialPath: String) extends FileDescriptor with Logging { +case class LocalFileDescriptor(initialPath: String) extends FileDescriptor with LazyLogging { @transient private[this] var _normalizedPath: Path = _ @@ -75,7 +76,7 @@ case class LocalFileDescriptor(initialPath: String) extends FileDescriptor with result match { case Success(_) => if (safeDownloading) downloadFile.rename(localTarget) - case Failure(ex) => log.warn(s"File copy failed ($ex)") + case Failure(ex) => logger.warn(s"File copy failed ($ex)") } result.isSuccess @@ -93,7 +94,7 @@ case class LocalFileDescriptor(initialPath: String) extends FileDescriptor with result match { case Success(_) => - case Failure(ex) => log.warn(s"File copy failed ($ex)") + case Failure(ex) => logger.warn(s"File copy failed ($ex)") } result.isSuccess @@ -111,7 +112,7 @@ case class LocalFileDescriptor(initialPath: String) extends FileDescriptor with result match { case Success(_) => - case Failure(ex) => log.warn(s"File copy failed ($ex)") + case Failure(ex) => logger.warn(s"File copy failed ($ex)") } result.isSuccess diff --git a/io/src/main/scala/com/velocidi/apso/io/S3FileDescriptor.scala b/io/src/main/scala/com/velocidi/apso/io/S3FileDescriptor.scala index eb574af0..f5b53705 100644 --- a/io/src/main/scala/com/velocidi/apso/io/S3FileDescriptor.scala +++ b/io/src/main/scala/com/velocidi/apso/io/S3FileDescriptor.scala @@ -7,7 +7,6 @@ import scala.collection.concurrent.TrieMap import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials} import com.amazonaws.services.s3.model.S3ObjectSummary -import com.velocidi.apso.Logging import com.velocidi.apso.aws.{S3Bucket, SerializableAWSCredentials} case class S3FileDescriptor( @@ -15,8 +14,7 @@ case class S3FileDescriptor( protected val elements: List[String], private var summary: Option[S3ObjectSummary] = None ) extends FileDescriptor - with RemoteFileDescriptor - with Logging { + with RemoteFileDescriptor { type Self = S3FileDescriptor diff --git a/io/src/main/scala/com/velocidi/apso/io/SftpFileDescriptor.scala b/io/src/main/scala/com/velocidi/apso/io/SftpFileDescriptor.scala index ae3669bd..19d62900 100644 --- a/io/src/main/scala/com/velocidi/apso/io/SftpFileDescriptor.scala +++ b/io/src/main/scala/com/velocidi/apso/io/SftpFileDescriptor.scala @@ -8,6 +8,7 @@ import scala.jdk.CollectionConverters._ import scala.util.{Properties, Try} import com.typesafe.config.ConfigFactory +import com.typesafe.scalalogging.LazyLogging import io.github.andrebeat.pool.{Lease, Pool} import net.schmizz.sshj._ import net.schmizz.sshj.common.SSHException @@ -16,8 +17,6 @@ import net.schmizz.sshj.transport.verification._ import net.schmizz.sshj.userauth.password.PasswordUtils import net.schmizz.sshj.xfer.InMemorySourceFile -import com.velocidi.apso.Logging - /** A `FileDescriptor` for files served over SFTP. This file descriptor only supports absolute paths. The SSH * connections for a given host are pooled. * @@ -61,7 +60,7 @@ case class SftpFileDescriptor( @transient private var _fileAttributes: Option[FileAttributes] = None ) extends FileDescriptor with RemoteFileDescriptor - with Logging { + with LazyLogging { type Self = SftpFileDescriptor @@ -82,8 +81,8 @@ case class SftpFileDescriptor( case e: SFTPException if e.getStatusCode == Response.StatusCode.NO_SUCH_FILE => throw new FileNotFoundException(toString) case e @ (_: SSHException | _: IOException) if retries > 0 => - log.warn(s"[$host] ${e.getMessage}. Retrying in 10 seconds...") - log.debug(s"Failure cause: ${e.getCause}") + logger.warn(s"[$host] ${e.getMessage}. Retrying in 10 seconds...") + logger.debug(s"Failure cause: ${e.getCause}") Thread.sleep(10000) doConnect(retries - 1) } @@ -141,7 +140,7 @@ case class SftpFileDescriptor( require(!localTarget.isDirectory, s"Local file descriptor can't point to a directory: ${localTarget.path}") require(!isDirectory, s"Remote file descriptor can't point to a directory: ${this.path}") - log.info(s"Downloading '$toString' to '$localTarget'") + logger.info(s"Downloading '$toString' to '$localTarget'") if (localTarget.parent().mkdirs()) { val downloadFile = if (safeDownloading) localTarget.sibling(_ + ".tmp") else localTarget @@ -157,7 +156,7 @@ case class SftpFileDescriptor( require(!localTarget.isDirectory, s"Local file descriptor can't point to a directory: ${localTarget.path}") require(!exists || !isDirectory, s"Remote file descriptor can't point to a directory: ${this.path}") - log.info(s"Uploading '$localTarget' to '$toString'") + logger.info(s"Uploading '$localTarget' to '$toString'") parent().mkdirs() && Try(sftp(_.put(localTarget.path, path))).isSuccess @@ -166,7 +165,7 @@ case class SftpFileDescriptor( def upload(inputStream: InputStream, length: Option[Long]): Boolean = { require(!exists || !isDirectory, s"Remote file descriptor can't point to a directory: ${this.path}") - log.info(s"Uploading to '$toString'") + logger.info(s"Uploading to '$toString'") val sourceFile = new InMemorySourceFile { override def getLength: Long = length.getOrElse(0) diff --git a/log/build.sbt b/log/build.sbt deleted file mode 100644 index 881c587e..00000000 --- a/log/build.sbt +++ /dev/null @@ -1,3 +0,0 @@ -import Dependencies._ - -libraryDependencies ++= Seq(ScalaLogging) diff --git a/log/src/main/scala/com/velocidi/apso/Logging.scala b/log/src/main/scala/com/velocidi/apso/Logging.scala deleted file mode 100644 index 500cd3a9..00000000 --- a/log/src/main/scala/com/velocidi/apso/Logging.scala +++ /dev/null @@ -1,21 +0,0 @@ -package com.velocidi.apso - -import com.typesafe.scalalogging.Logger - -/** Trait to mixin an SLF4J `Logger` object. The `Logger` object is initialized lazily. - */ -trait Logging { - - /** The `Logger` object. This logger will have the same name as the concrete class into which this trait is mixed-in. - */ - lazy val log = Logger(getClass) -} - -/** Trait to mixin an SLF4J `Logger` object. The `Logger` object is initialized strictly. - */ -trait StrictLogging { - - /** The `Logger` object. This logger will have the same name as the concrete class into which this trait is mixed-in. - */ - val log = Logger(getClass) -} diff --git a/profiling/src/main/scala/com/velocidi/apso/profiling/SimpleJmx.scala b/profiling/src/main/scala/com/velocidi/apso/profiling/SimpleJmx.scala index 92263fce..dd5bf088 100644 --- a/profiling/src/main/scala/com/velocidi/apso/profiling/SimpleJmx.scala +++ b/profiling/src/main/scala/com/velocidi/apso/profiling/SimpleJmx.scala @@ -7,10 +7,9 @@ import scala.concurrent.Future import scala.util.{Failure, Success, Try} import com.j256.simplejmx.server.JmxServer +import com.typesafe.scalalogging.LazyLogging -import com.velocidi.apso.Logging - -trait SimpleJmx extends Logging { +trait SimpleJmx extends LazyLogging { def startJmx(jmxConfig: config.Jmx): Future[JmxServer] = { @@ -37,11 +36,11 @@ trait SimpleJmx extends Logging { Future { Try(tryStart(jmxConfig.port)).recover { case _ => tryStart() } match { case Success(jmx) => - log.info(s"Bound JMX on port ${jmx.getServerPort}") + logger.info(s"Bound JMX on port ${jmx.getServerPort}") sys.addShutdownHook(jmx.stop()) jmx case Failure(ex) => - log.warn("Could not start JMX server", ex) + logger.warn("Could not start JMX server", ex) throw ex // produce a failed `Future` } }