Skip to content

Commit

Permalink
Fix small formatting issues in log file (#371)
Browse files Browse the repository at this point in the history
  • Loading branch information
jakobnissen authored Nov 19, 2024
1 parent 003735d commit 21bdcdd
Show file tree
Hide file tree
Showing 5 changed files with 71 additions and 71 deletions.
12 changes: 6 additions & 6 deletions vamb/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,8 +114,8 @@ def __init__(


class AbundancePath:
def __init__(self, abundance: Path):
self.abundance = check_existing_file(abundance)
def __init__(self, path: Path):
self.path = check_existing_file(path)


class BAMPaths:
Expand Down Expand Up @@ -656,7 +656,7 @@ def run(
begintime = time.time()
logger.info("Starting Vamb version " + vamb.__version_str__)
logger.info("Random seed is " + str(general.seed))
logger.info(f"Invoked with CLI args: 'f{' '.join(sys.argv)}'")
logger.info(f"Invoked with CLI args: '{' '.join(sys.argv)}'")
runner()
logger.info(f"Completed Vamb in {round(time.time() - begintime, 2)} seconds.")

Expand Down Expand Up @@ -821,7 +821,7 @@ def calc_tnf(
path = options.path

if isinstance(path, CompositionPath):
logger.info(f"\tLoading composition from npz at: {path.path}")
logger.info(f'\tLoading composition from npz at: "{path.path}"')
composition = vamb.parsecontigs.Composition.load(path.path)
composition.filter_min_length(min_contig_length)
else:
Expand Down Expand Up @@ -882,10 +882,10 @@ def calc_abundance(

paths = abundance_options.paths
if isinstance(paths, AbundancePath):
logger.info(f"\tLoading depths from npz at: {str(paths)}")
logger.info(f'\tLoading depths from npz at: "{str(paths.path)}"')

abundance = vamb.parsebam.Abundance.load(
paths.abundance,
paths.path,
comp_metadata.refhash if refcheck else None,
)
# I don't want this check in any constructors of abundance, since the constructors
Expand Down
18 changes: 9 additions & 9 deletions vamb/aamb_encode.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,19 +215,19 @@ def trainmodel(

# Initialize generator and discriminator
logger.info("\tNetwork properties:")
logger.info(f"\tCUDA: {self.usecuda}")
logger.info(f"\tAlpha: {self.alpha}")
logger.info(f"\tY length: {self.y_len}")
logger.info(f"\tZ length: {self.ld}")
logger.info(f"\t CUDA: {self.usecuda}")
logger.info(f"\t Alpha: {self.alpha}")
logger.info(f"\t Y length: {self.y_len}")
logger.info(f"\t Z length: {self.ld}")
logger.info("\tTraining properties:")
logger.info(f"\tN epochs: {nepochs}")
logger.info(f"\tStarting batch size: {data_loader.batch_size}")
logger.info(f"\t N epochs: {nepochs}")
logger.info(f"\t Starting batch size: {data_loader.batch_size}")
batchsteps_string = (
", ".join(map(str, sorted(batchsteps))) if batchsteps_set else "None"
)
logger.info(f"\tBatchsteps: {batchsteps_string}")
logger.info(f"\tN sequences: {ncontigs}")
logger.info(f"\tN samples: {self.nsamples}")
logger.info(f"\t Batchsteps: {batchsteps_string}")
logger.info(f"\t N sequences: {ncontigs}")
logger.info(f"\t N samples: {self.nsamples}")

# we need to separate the paramters due to the adversarial training

Expand Down
22 changes: 11 additions & 11 deletions vamb/encode.py
Original file line number Diff line number Diff line change
Expand Up @@ -573,21 +573,21 @@ def trainmodel(
optimizer = dadaptation.DAdaptAdam(self.parameters(), decouple=True)

logger.info("\tNetwork properties:")
logger.info(f"\tCUDA: {self.usecuda}")
logger.info(f"\tAlpha: {self.alpha}")
logger.info(f"\tBeta: {self.beta}")
logger.info(f"\tDropout: {self.dropout}")
logger.info(f"\tN hidden: {', '.join(map(str, self.nhiddens))}")
logger.info(f"\tN latent: {self.nlatent}")
logger.info(f"\t CUDA: {self.usecuda}")
logger.info(f"\t Alpha: {self.alpha}")
logger.info(f"\t Beta: {self.beta}")
logger.info(f"\t Dropout: {self.dropout}")
logger.info(f"\t N hidden: {', '.join(map(str, self.nhiddens))}")
logger.info(f"\t N latent: {self.nlatent}")
logger.info("\tTraining properties:")
logger.info(f"\tN epochs: {nepochs}")
logger.info(f"\tStarting batch size: {dataloader.batch_size}")
logger.info(f"\t N epochs: {nepochs}")
logger.info(f"\t Starting batch size: {dataloader.batch_size}")
batchsteps_string = (
", ".join(map(str, sorted(batchsteps_set))) if batchsteps_set else "None"
)
logger.info(f"\tBatchsteps: {batchsteps_string}")
logger.info(f"\tN sequences: {ncontigs}")
logger.info(f"\tN samples: {nsamples}")
logger.info(f"\t Batchsteps: {batchsteps_string}")
logger.info(f"\t N sequences: {ncontigs}")
logger.info(f"\t N samples: {nsamples}")

# Train
for epoch in range(nepochs):
Expand Down
46 changes: 23 additions & 23 deletions vamb/semisupervised_encode.py
Original file line number Diff line number Diff line change
Expand Up @@ -413,21 +413,21 @@ def trainmodel(
optimizer = _Adam(self.parameters(), lr=lrate)

logger.info("\tNetwork properties:")
logger.info(f"\tCUDA: {self.usecuda}")
logger.info(f"\tAlpha: {self.alpha}")
logger.info(f"\tBeta: {self.beta}")
logger.info(f"\tDropout: {self.dropout}")
logger.info(f"\tN hidden: {', '.join(map(str, self.nhiddens))}")
logger.info(f"\tN latent: {self.nlatent}")
logger.info(f"\t CUDA: {self.usecuda}")
logger.info(f"\t Alpha: {self.alpha}")
logger.info(f"\t Beta: {self.beta}")
logger.info(f"\t Dropout: {self.dropout}")
logger.info(f"\t N hidden: {', '.join(map(str, self.nhiddens))}")
logger.info(f"\t N latent: {self.nlatent}")
logger.info("\tTraining properties:")
logger.info(f"\tN epochs: {nepochs}")
logger.info(f"\tStarting batch size: {dataloader.batch_size}")
logger.info(f"\t N epochs: {nepochs}")
logger.info(f"\t Starting batch size: {dataloader.batch_size}")
batchsteps_string = (
", ".join(map(str, sorted(batchsteps_set))) if batchsteps_set else "None"
)
logger.info(f"\tBatchsteps: {batchsteps_string}")
logger.info(f"\tLearning rate: {lrate}")
logger.info(f"\tN labels: {nlabels}")
logger.info(f"\t Batchsteps: {batchsteps_string}")
logger.info(f"\t Learning rate: {lrate}")
logger.info(f"\t N labels: {nlabels}")

# Train
for epoch in range(nepochs):
Expand Down Expand Up @@ -1063,22 +1063,22 @@ def trainmodel(
)

logger.info("\tNetwork properties:")
logger.info(f"\tCUDA: {self.VAEVamb.usecuda}")
logger.info(f"\tAlpha: {self.VAEVamb.alpha}")
logger.info(f"\tBeta: {self.VAEVamb.beta}")
logger.info(f"\tDropout: {self.VAEVamb.dropout}")
logger.info(f"\tN hidden: {', '.join(map(str, self.VAEVamb.nhiddens))}")
logger.info(f"\tN latent: {self.VAEVamb.nlatent}")
logger.info(f"\t CUDA: {self.VAEVamb.usecuda}")
logger.info(f"\t Alpha: {self.VAEVamb.alpha}")
logger.info(f"\t Beta: {self.VAEVamb.beta}")
logger.info(f"\t Dropout: {self.VAEVamb.dropout}")
logger.info(f"\t N hidden: {', '.join(map(str, self.VAEVamb.nhiddens))}")
logger.info(f"\t N latent: {self.VAEVamb.nlatent}")
logger.info("\tTraining properties:")
logger.info(f"\tN epochs: {nepochs}")
logger.info(f"\tStarting batch size: {dataloader.batch_size}")
logger.info(f"\t N epochs: {nepochs}")
logger.info(f"\t Starting batch size: {dataloader.batch_size}")
batchsteps_string = (
", ".join(map(str, sorted(batchsteps))) if batchsteps_set else "None"
)
logger.info(f"\tBatchsteps: {batchsteps_string}")
logger.info(f"\tLearning rate: {lrate}")
logger.info(f"\tN sequences: {ncontigs}")
logger.info(f"\tN samples: {nsamples}")
logger.info(f"\t Batchsteps: {batchsteps_string}")
logger.info(f"\t Learning rate: {lrate}")
logger.info(f"\t N sequences: {ncontigs}")
logger.info(f"\t N samples: {nsamples}")

# Train
for epoch in range(nepochs):
Expand Down
44 changes: 22 additions & 22 deletions vamb/taxvamb_encode.py
Original file line number Diff line number Diff line change
Expand Up @@ -378,21 +378,21 @@ def trainmodel(
optimizer = dadaptation.DAdaptAdam(self.parameters(), lr=1, decouple=True)

logger.info("\tNetwork properties:")
logger.info(f"\tCUDA: {self.usecuda}")
logger.info(f"\tAlpha: {self.alpha}")
logger.info(f"\tBeta: {self.beta}")
logger.info(f"\tDropout: {self.dropout}")
logger.info(f"\tN hidden: {', '.join(map(str, self.nhiddens))}")
logger.info(f"\tN latent: {self.nlatent}")
logger.info(f"\t CUDA: {self.usecuda}")
logger.info(f"\t Alpha: {self.alpha}")
logger.info(f"\t Beta: {self.beta}")
logger.info(f"\t Dropout: {self.dropout}")
logger.info(f"\t N hidden: {', '.join(map(str, self.nhiddens))}")
logger.info(f"\t N latent: {self.nlatent}")
logger.info("\tTraining properties:")
logger.info(f"\tN epochs: {nepochs}")
logger.info(f"\tStarting batch size: {dataloader.batch_size}")
logger.info(f"\t N epochs: {nepochs}")
logger.info(f"\t Starting batch size: {dataloader.batch_size}")
batchsteps_string = (
", ".join(map(str, sorted(batchsteps_set))) if batchsteps_set else "None"
)
logger.info(f"\tBatchsteps: {batchsteps_string}")
logger.info(f"\tLearning rate: {lrate}")
logger.info(f"\tN labels: {nlabels}")
logger.info(f"\t Batchsteps: {batchsteps_string}")
logger.info(f"\t Learning rate: {lrate}")
logger.info(f"\t N labels: {nlabels}")

# Train
for epoch in range(nepochs):
Expand Down Expand Up @@ -1026,21 +1026,21 @@ def trainmodel(
optimizer = dadaptation.DAdaptAdam(self.parameters(), lr=1, decouple=True)

logger.info("\tNetwork properties:")
logger.info(f"\tCUDA: {self.usecuda}")
logger.info(f"\tHierarchical loss: {self.hierloss.name}")
logger.info(f"\tAlpha: {self.alpha}")
logger.info(f"\tBeta: {self.beta}")
logger.info(f"\tDropout: {self.dropout}")
logger.info(f"\tN hidden: {', '.join(map(str, self.nhiddens))}")
logger.info(f"\t CUDA: {self.usecuda}")
logger.info(f"\t Hierarchical loss: {self.hierloss.name}")
logger.info(f"\t Alpha: {self.alpha}")
logger.info(f"\t Beta: {self.beta}")
logger.info(f"\t Dropout: {self.dropout}")
logger.info(f"\t N hidden: {', '.join(map(str, self.nhiddens))}")
logger.info("\tTraining properties:")
logger.info(f"\tN epochs: {nepochs}")
logger.info(f"\tStarting batch size: {dataloader.batch_size}")
logger.info(f"\t N epochs: {nepochs}")
logger.info(f"\t Starting batch size: {dataloader.batch_size}")
batchsteps_string = (
", ".join(map(str, sorted(batchsteps_set))) if batchsteps_set else "None"
)
logger.info(f"\tBatchsteps: {batchsteps_string}")
logger.info(f"\tLearning rate: {lrate}")
logger.info(f"\tN labels: {nlabels}")
logger.info(f"\t Batchsteps: {batchsteps_string}")
logger.info(f"\t Learning rate: {lrate}")
logger.info(f"\t N labels: {nlabels}")

# Train
for epoch in range(nepochs):
Expand Down

0 comments on commit 21bdcdd

Please sign in to comment.