From 21bdcdd7f607c67781ce26691b2275b42cb66b05 Mon Sep 17 00:00:00 2001 From: Jakob Nybo Nissen Date: Tue, 19 Nov 2024 11:28:27 +0100 Subject: [PATCH] Fix small formatting issues in log file (#371) --- vamb/__main__.py | 12 ++++----- vamb/aamb_encode.py | 18 +++++++------- vamb/encode.py | 22 ++++++++--------- vamb/semisupervised_encode.py | 46 +++++++++++++++++------------------ vamb/taxvamb_encode.py | 44 ++++++++++++++++----------------- 5 files changed, 71 insertions(+), 71 deletions(-) diff --git a/vamb/__main__.py b/vamb/__main__.py index 3d567dc7..12c9a3e7 100755 --- a/vamb/__main__.py +++ b/vamb/__main__.py @@ -114,8 +114,8 @@ def __init__( class AbundancePath: - def __init__(self, abundance: Path): - self.abundance = check_existing_file(abundance) + def __init__(self, path: Path): + self.path = check_existing_file(path) class BAMPaths: @@ -656,7 +656,7 @@ def run( begintime = time.time() logger.info("Starting Vamb version " + vamb.__version_str__) logger.info("Random seed is " + str(general.seed)) - logger.info(f"Invoked with CLI args: 'f{' '.join(sys.argv)}'") + logger.info(f"Invoked with CLI args: '{' '.join(sys.argv)}'") runner() logger.info(f"Completed Vamb in {round(time.time() - begintime, 2)} seconds.") @@ -821,7 +821,7 @@ def calc_tnf( path = options.path if isinstance(path, CompositionPath): - logger.info(f"\tLoading composition from npz at: {path.path}") + logger.info(f'\tLoading composition from npz at: "{path.path}"') composition = vamb.parsecontigs.Composition.load(path.path) composition.filter_min_length(min_contig_length) else: @@ -882,10 +882,10 @@ def calc_abundance( paths = abundance_options.paths if isinstance(paths, AbundancePath): - logger.info(f"\tLoading depths from npz at: {str(paths)}") + logger.info(f'\tLoading depths from npz at: "{str(paths.path)}"') abundance = vamb.parsebam.Abundance.load( - paths.abundance, + paths.path, comp_metadata.refhash if refcheck else None, ) # I don't want this check in any constructors of abundance, since the constructors diff --git a/vamb/aamb_encode.py b/vamb/aamb_encode.py index 0c1cd5df..a541b7d7 100644 --- a/vamb/aamb_encode.py +++ b/vamb/aamb_encode.py @@ -215,19 +215,19 @@ def trainmodel( # Initialize generator and discriminator logger.info("\tNetwork properties:") - logger.info(f"\tCUDA: {self.usecuda}") - logger.info(f"\tAlpha: {self.alpha}") - logger.info(f"\tY length: {self.y_len}") - logger.info(f"\tZ length: {self.ld}") + logger.info(f"\t CUDA: {self.usecuda}") + logger.info(f"\t Alpha: {self.alpha}") + logger.info(f"\t Y length: {self.y_len}") + logger.info(f"\t Z length: {self.ld}") logger.info("\tTraining properties:") - logger.info(f"\tN epochs: {nepochs}") - logger.info(f"\tStarting batch size: {data_loader.batch_size}") + logger.info(f"\t N epochs: {nepochs}") + logger.info(f"\t Starting batch size: {data_loader.batch_size}") batchsteps_string = ( ", ".join(map(str, sorted(batchsteps))) if batchsteps_set else "None" ) - logger.info(f"\tBatchsteps: {batchsteps_string}") - logger.info(f"\tN sequences: {ncontigs}") - logger.info(f"\tN samples: {self.nsamples}") + logger.info(f"\t Batchsteps: {batchsteps_string}") + logger.info(f"\t N sequences: {ncontigs}") + logger.info(f"\t N samples: {self.nsamples}") # we need to separate the paramters due to the adversarial training diff --git a/vamb/encode.py b/vamb/encode.py index aa74b2d8..0224fef8 100644 --- a/vamb/encode.py +++ b/vamb/encode.py @@ -573,21 +573,21 @@ def trainmodel( optimizer = dadaptation.DAdaptAdam(self.parameters(), decouple=True) logger.info("\tNetwork properties:") - logger.info(f"\tCUDA: {self.usecuda}") - logger.info(f"\tAlpha: {self.alpha}") - logger.info(f"\tBeta: {self.beta}") - logger.info(f"\tDropout: {self.dropout}") - logger.info(f"\tN hidden: {', '.join(map(str, self.nhiddens))}") - logger.info(f"\tN latent: {self.nlatent}") + logger.info(f"\t CUDA: {self.usecuda}") + logger.info(f"\t Alpha: {self.alpha}") + logger.info(f"\t Beta: {self.beta}") + logger.info(f"\t Dropout: {self.dropout}") + logger.info(f"\t N hidden: {', '.join(map(str, self.nhiddens))}") + logger.info(f"\t N latent: {self.nlatent}") logger.info("\tTraining properties:") - logger.info(f"\tN epochs: {nepochs}") - logger.info(f"\tStarting batch size: {dataloader.batch_size}") + logger.info(f"\t N epochs: {nepochs}") + logger.info(f"\t Starting batch size: {dataloader.batch_size}") batchsteps_string = ( ", ".join(map(str, sorted(batchsteps_set))) if batchsteps_set else "None" ) - logger.info(f"\tBatchsteps: {batchsteps_string}") - logger.info(f"\tN sequences: {ncontigs}") - logger.info(f"\tN samples: {nsamples}") + logger.info(f"\t Batchsteps: {batchsteps_string}") + logger.info(f"\t N sequences: {ncontigs}") + logger.info(f"\t N samples: {nsamples}") # Train for epoch in range(nepochs): diff --git a/vamb/semisupervised_encode.py b/vamb/semisupervised_encode.py index 858faf19..638ab8f8 100644 --- a/vamb/semisupervised_encode.py +++ b/vamb/semisupervised_encode.py @@ -413,21 +413,21 @@ def trainmodel( optimizer = _Adam(self.parameters(), lr=lrate) logger.info("\tNetwork properties:") - logger.info(f"\tCUDA: {self.usecuda}") - logger.info(f"\tAlpha: {self.alpha}") - logger.info(f"\tBeta: {self.beta}") - logger.info(f"\tDropout: {self.dropout}") - logger.info(f"\tN hidden: {', '.join(map(str, self.nhiddens))}") - logger.info(f"\tN latent: {self.nlatent}") + logger.info(f"\t CUDA: {self.usecuda}") + logger.info(f"\t Alpha: {self.alpha}") + logger.info(f"\t Beta: {self.beta}") + logger.info(f"\t Dropout: {self.dropout}") + logger.info(f"\t N hidden: {', '.join(map(str, self.nhiddens))}") + logger.info(f"\t N latent: {self.nlatent}") logger.info("\tTraining properties:") - logger.info(f"\tN epochs: {nepochs}") - logger.info(f"\tStarting batch size: {dataloader.batch_size}") + logger.info(f"\t N epochs: {nepochs}") + logger.info(f"\t Starting batch size: {dataloader.batch_size}") batchsteps_string = ( ", ".join(map(str, sorted(batchsteps_set))) if batchsteps_set else "None" ) - logger.info(f"\tBatchsteps: {batchsteps_string}") - logger.info(f"\tLearning rate: {lrate}") - logger.info(f"\tN labels: {nlabels}") + logger.info(f"\t Batchsteps: {batchsteps_string}") + logger.info(f"\t Learning rate: {lrate}") + logger.info(f"\t N labels: {nlabels}") # Train for epoch in range(nepochs): @@ -1063,22 +1063,22 @@ def trainmodel( ) logger.info("\tNetwork properties:") - logger.info(f"\tCUDA: {self.VAEVamb.usecuda}") - logger.info(f"\tAlpha: {self.VAEVamb.alpha}") - logger.info(f"\tBeta: {self.VAEVamb.beta}") - logger.info(f"\tDropout: {self.VAEVamb.dropout}") - logger.info(f"\tN hidden: {', '.join(map(str, self.VAEVamb.nhiddens))}") - logger.info(f"\tN latent: {self.VAEVamb.nlatent}") + logger.info(f"\t CUDA: {self.VAEVamb.usecuda}") + logger.info(f"\t Alpha: {self.VAEVamb.alpha}") + logger.info(f"\t Beta: {self.VAEVamb.beta}") + logger.info(f"\t Dropout: {self.VAEVamb.dropout}") + logger.info(f"\t N hidden: {', '.join(map(str, self.VAEVamb.nhiddens))}") + logger.info(f"\t N latent: {self.VAEVamb.nlatent}") logger.info("\tTraining properties:") - logger.info(f"\tN epochs: {nepochs}") - logger.info(f"\tStarting batch size: {dataloader.batch_size}") + logger.info(f"\t N epochs: {nepochs}") + logger.info(f"\t Starting batch size: {dataloader.batch_size}") batchsteps_string = ( ", ".join(map(str, sorted(batchsteps))) if batchsteps_set else "None" ) - logger.info(f"\tBatchsteps: {batchsteps_string}") - logger.info(f"\tLearning rate: {lrate}") - logger.info(f"\tN sequences: {ncontigs}") - logger.info(f"\tN samples: {nsamples}") + logger.info(f"\t Batchsteps: {batchsteps_string}") + logger.info(f"\t Learning rate: {lrate}") + logger.info(f"\t N sequences: {ncontigs}") + logger.info(f"\t N samples: {nsamples}") # Train for epoch in range(nepochs): diff --git a/vamb/taxvamb_encode.py b/vamb/taxvamb_encode.py index 1541b751..a97a08b5 100644 --- a/vamb/taxvamb_encode.py +++ b/vamb/taxvamb_encode.py @@ -378,21 +378,21 @@ def trainmodel( optimizer = dadaptation.DAdaptAdam(self.parameters(), lr=1, decouple=True) logger.info("\tNetwork properties:") - logger.info(f"\tCUDA: {self.usecuda}") - logger.info(f"\tAlpha: {self.alpha}") - logger.info(f"\tBeta: {self.beta}") - logger.info(f"\tDropout: {self.dropout}") - logger.info(f"\tN hidden: {', '.join(map(str, self.nhiddens))}") - logger.info(f"\tN latent: {self.nlatent}") + logger.info(f"\t CUDA: {self.usecuda}") + logger.info(f"\t Alpha: {self.alpha}") + logger.info(f"\t Beta: {self.beta}") + logger.info(f"\t Dropout: {self.dropout}") + logger.info(f"\t N hidden: {', '.join(map(str, self.nhiddens))}") + logger.info(f"\t N latent: {self.nlatent}") logger.info("\tTraining properties:") - logger.info(f"\tN epochs: {nepochs}") - logger.info(f"\tStarting batch size: {dataloader.batch_size}") + logger.info(f"\t N epochs: {nepochs}") + logger.info(f"\t Starting batch size: {dataloader.batch_size}") batchsteps_string = ( ", ".join(map(str, sorted(batchsteps_set))) if batchsteps_set else "None" ) - logger.info(f"\tBatchsteps: {batchsteps_string}") - logger.info(f"\tLearning rate: {lrate}") - logger.info(f"\tN labels: {nlabels}") + logger.info(f"\t Batchsteps: {batchsteps_string}") + logger.info(f"\t Learning rate: {lrate}") + logger.info(f"\t N labels: {nlabels}") # Train for epoch in range(nepochs): @@ -1026,21 +1026,21 @@ def trainmodel( optimizer = dadaptation.DAdaptAdam(self.parameters(), lr=1, decouple=True) logger.info("\tNetwork properties:") - logger.info(f"\tCUDA: {self.usecuda}") - logger.info(f"\tHierarchical loss: {self.hierloss.name}") - logger.info(f"\tAlpha: {self.alpha}") - logger.info(f"\tBeta: {self.beta}") - logger.info(f"\tDropout: {self.dropout}") - logger.info(f"\tN hidden: {', '.join(map(str, self.nhiddens))}") + logger.info(f"\t CUDA: {self.usecuda}") + logger.info(f"\t Hierarchical loss: {self.hierloss.name}") + logger.info(f"\t Alpha: {self.alpha}") + logger.info(f"\t Beta: {self.beta}") + logger.info(f"\t Dropout: {self.dropout}") + logger.info(f"\t N hidden: {', '.join(map(str, self.nhiddens))}") logger.info("\tTraining properties:") - logger.info(f"\tN epochs: {nepochs}") - logger.info(f"\tStarting batch size: {dataloader.batch_size}") + logger.info(f"\t N epochs: {nepochs}") + logger.info(f"\t Starting batch size: {dataloader.batch_size}") batchsteps_string = ( ", ".join(map(str, sorted(batchsteps_set))) if batchsteps_set else "None" ) - logger.info(f"\tBatchsteps: {batchsteps_string}") - logger.info(f"\tLearning rate: {lrate}") - logger.info(f"\tN labels: {nlabels}") + logger.info(f"\t Batchsteps: {batchsteps_string}") + logger.info(f"\t Learning rate: {lrate}") + logger.info(f"\t N labels: {nlabels}") # Train for epoch in range(nepochs):