From 06bcecf4253256aede4f6fc698c71065acfb1297 Mon Sep 17 00:00:00 2001 From: Pablo Olivares Date: Sun, 31 Mar 2024 03:01:17 +0200 Subject: [PATCH] Added metrics support to tests advances #2 closes #7 --- tests/test_fine_tuning_pipeline.py | 15 +++++++++++---- tests/test_training_pipeline.py | 12 +++++++++--- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/tests/test_fine_tuning_pipeline.py b/tests/test_fine_tuning_pipeline.py index 22e312f..8f592d9 100644 --- a/tests/test_fine_tuning_pipeline.py +++ b/tests/test_fine_tuning_pipeline.py @@ -1,5 +1,6 @@ import pytest from trainers import get_trainer +from utils.metrics import Accuracy, Precision, Recall, F1Score from utils.data_utils import get_dataloaders from models import get_model import torch @@ -37,6 +38,7 @@ def test_fine_tuning_loop(): criterion = torch.nn.CrossEntropyLoss() optimizer_class = torch.optim.Adam optimizer_params = {'lr': CONFIG_TEST['training']['learning_rate']} + metrics = [Accuracy(), Precision(), Recall(), F1Score()] trainer = get_trainer(CONFIG_TEST['trainer'], model=model, device=device) @@ -45,7 +47,8 @@ def test_fine_tuning_loop(): criterion=criterion, optimizer_class=optimizer_class, optimizer_params=optimizer_params, - freeze_until_layer=CONFIG_TEST['training'].get('freeze_until_layer') + freeze_until_layer=CONFIG_TEST['training'].get('freeze_until_layer'), + metrics=metrics ) trainer.train( train_loader=train_loader, @@ -58,7 +61,8 @@ def test_fine_tuning_loop(): criterion=criterion, optimizer_class=optimizer_class, optimizer_params={'lr': 0.00001}, - freeze_until_layer=None + freeze_until_layer=None, + metrics=metrics ) trainer.train( @@ -67,9 +71,12 @@ def test_fine_tuning_loop(): verbose=False ) - accuracy = trainer.evaluate( + metrics_results = trainer.evaluate( test_loader=test_loader, verbose=False ) - assert accuracy >= 0, "Accuracy should be non-negative after fine-tuning" + assert metrics_results[0] >= 0, "Accuracy should be non-negative" + assert metrics_results[1] >= 0, "Precision should be non-negative" + assert metrics_results[2] >= 0, "Recall should be non-negative" + assert metrics_results[3] >= 0, "F1 Score should be non-negative" diff --git a/tests/test_training_pipeline.py b/tests/test_training_pipeline.py index 104c4c8..48f1ff8 100644 --- a/tests/test_training_pipeline.py +++ b/tests/test_training_pipeline.py @@ -1,5 +1,6 @@ import pytest from trainers import get_trainer +from utils.metrics import Accuracy, Precision, Recall, F1Score from utils.data_utils import get_dataloaders from models import get_model import torch @@ -36,22 +37,27 @@ def test_training_loop(): criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam optimizer_params = {'lr': CONFIG_TEST['training']['learning_rate']} + metrics = [Accuracy(), Precision(), Recall(), F1Score()] trainer = get_trainer(CONFIG_TEST['trainer'], model=model, device=device) trainer.build( criterion=criterion, optimizer_class=optimizer, - optimizer_params=optimizer_params + optimizer_params=optimizer_params, + metrics=metrics ) trainer.train( train_loader=train_loader, num_epochs=CONFIG_TEST['training']['num_epochs'], verbose=False ) - accuracy = trainer.evaluate( + metrics_results = trainer.evaluate( test_loader=test_loader, verbose=False ) - assert accuracy >= 0, "Accuracy should be non-negative" + assert metrics_results[0] >= 0, "Accuracy should be non-negative" + assert metrics_results[1] >= 0, "Precision should be non-negative" + assert metrics_results[2] >= 0, "Recall should be non-negative" + assert metrics_results[3] >= 0, "F1 Score should be non-negative"