Skip to content

Commit

Permalink
Added metrics support to tests advances #2 closes #7
Browse files Browse the repository at this point in the history
  • Loading branch information
pab1s committed Mar 31, 2024
1 parent fbf4cf7 commit 06bcecf
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 7 deletions.
15 changes: 11 additions & 4 deletions tests/test_fine_tuning_pipeline.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import pytest
from trainers import get_trainer
from utils.metrics import Accuracy, Precision, Recall, F1Score
from utils.data_utils import get_dataloaders
from models import get_model
import torch
Expand Down Expand Up @@ -37,6 +38,7 @@ def test_fine_tuning_loop():
criterion = torch.nn.CrossEntropyLoss()
optimizer_class = torch.optim.Adam
optimizer_params = {'lr': CONFIG_TEST['training']['learning_rate']}
metrics = [Accuracy(), Precision(), Recall(), F1Score()]

trainer = get_trainer(CONFIG_TEST['trainer'], model=model, device=device)

Expand All @@ -45,7 +47,8 @@ def test_fine_tuning_loop():
criterion=criterion,
optimizer_class=optimizer_class,
optimizer_params=optimizer_params,
freeze_until_layer=CONFIG_TEST['training'].get('freeze_until_layer')
freeze_until_layer=CONFIG_TEST['training'].get('freeze_until_layer'),
metrics=metrics
)
trainer.train(
train_loader=train_loader,
Expand All @@ -58,7 +61,8 @@ def test_fine_tuning_loop():
criterion=criterion,
optimizer_class=optimizer_class,
optimizer_params={'lr': 0.00001},
freeze_until_layer=None
freeze_until_layer=None,
metrics=metrics
)

trainer.train(
Expand All @@ -67,9 +71,12 @@ def test_fine_tuning_loop():
verbose=False
)

accuracy = trainer.evaluate(
metrics_results = trainer.evaluate(
test_loader=test_loader,
verbose=False
)

assert accuracy >= 0, "Accuracy should be non-negative after fine-tuning"
assert metrics_results[0] >= 0, "Accuracy should be non-negative"
assert metrics_results[1] >= 0, "Precision should be non-negative"
assert metrics_results[2] >= 0, "Recall should be non-negative"
assert metrics_results[3] >= 0, "F1 Score should be non-negative"
12 changes: 9 additions & 3 deletions tests/test_training_pipeline.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import pytest
from trainers import get_trainer
from utils.metrics import Accuracy, Precision, Recall, F1Score
from utils.data_utils import get_dataloaders
from models import get_model
import torch
Expand Down Expand Up @@ -36,22 +37,27 @@ def test_training_loop():
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam
optimizer_params = {'lr': CONFIG_TEST['training']['learning_rate']}
metrics = [Accuracy(), Precision(), Recall(), F1Score()]

trainer = get_trainer(CONFIG_TEST['trainer'], model=model, device=device)

trainer.build(
criterion=criterion,
optimizer_class=optimizer,
optimizer_params=optimizer_params
optimizer_params=optimizer_params,
metrics=metrics
)
trainer.train(
train_loader=train_loader,
num_epochs=CONFIG_TEST['training']['num_epochs'],
verbose=False
)
accuracy = trainer.evaluate(
metrics_results = trainer.evaluate(
test_loader=test_loader,
verbose=False
)

assert accuracy >= 0, "Accuracy should be non-negative"
assert metrics_results[0] >= 0, "Accuracy should be non-negative"
assert metrics_results[1] >= 0, "Precision should be non-negative"
assert metrics_results[2] >= 0, "Recall should be non-negative"
assert metrics_results[3] >= 0, "F1 Score should be non-negative"

0 comments on commit 06bcecf

Please sign in to comment.