From d5a32e141d89e45e306abf2591c822cd527ee6d1 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Tue, 19 Mar 2024 16:14:05 +0100 Subject: [PATCH 1/5] Fix old comparative tests script It needs to be run from the root of the repository --- ramp/test/results/.gitkeep | 0 ramp/test/test_run.py | 25 ++++++++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 ramp/test/results/.gitkeep diff --git a/ramp/test/results/.gitkeep b/ramp/test/results/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/ramp/test/test_run.py b/ramp/test/test_run.py index 9717fbb1..7a4f0918 100644 --- a/ramp/test/test_run.py +++ b/ramp/test/test_run.py @@ -8,6 +8,8 @@ # %% Import required modules import pandas as pd import matplotlib.pyplot as plt +from ramp.core.core import UseCase +from ramp.post_process import post_process as pp import os # %% Function to test the output against reference results @@ -74,6 +76,7 @@ def series_to_average(profile_series, num_days): axes[n - 1].legend() axes[n - 1].set_xticklabels([]) axes[n - 2].set_xticklabels([]) + plt.show() # %% Testing the output and providing visual result @@ -84,5 +87,25 @@ def series_to_average(profile_series, num_days): by the tested code changes. If any differences are there, the developers should evaluate whether these are as expected/designed or not """ +from ramp.example.input_file_1 import User_list as User_list1 +from ramp.example.input_file_2 import User_list as User_list2 +from ramp.example.input_file_3 import User_list as User_list3 -test_output("../results", "../test", num_input_files=3) +for i, ul in enumerate([User_list1, User_list2, User_list3]): + of_path = os.path.join(pp.BASE_PATH, "test", "results", f"output_file_{i + 1}.csv") + if os.path.exists(of_path) is False: + uc = UseCase( + users=ul, + parallel_processing=False, + ) + uc.initialize(peak_enlarge=0.15, num_days=30) + + Profiles_list = uc.generate_daily_load_profiles(flat=True) + + pp.export_series(Profiles_list, ofname=of_path) + +test_output( + os.path.join(pp.BASE_PATH, "test", "results"), + os.path.join(pp.BASE_PATH, "test"), + num_input_files=3, +) From 2f808b80b16db2c6d9045906eb0dff9a30cdbd16 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Wed, 10 Apr 2024 11:19:29 +0200 Subject: [PATCH 2/5] Enhance qualitative test user experience --- ramp/test/test_run.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/ramp/test/test_run.py b/ramp/test/test_run.py index 7a4f0918..e9d892af 100644 --- a/ramp/test/test_run.py +++ b/ramp/test/test_run.py @@ -91,6 +91,22 @@ def series_to_average(profile_series, num_days): from ramp.example.input_file_2 import User_list as User_list2 from ramp.example.input_file_3 import User_list as User_list3 +TEST_OUTPUT_PATH = os.path.join(pp.BASE_PATH, "test", "results") + +remove_old_tests = False +for file in os.listdir(TEST_OUTPUT_PATH): + if file.endswith(".csv"): + if remove_old_tests is False: + answer = input( + "Some result file for the qualitative testing exists already, do you want to overwrite them? (y/n)" + ) + if answer == "y" or answer == "yes": + remove_old_tests = True + else: + break + if remove_old_tests is True: + os.remove(os.path.join(TEST_OUTPUT_PATH, file)) + for i, ul in enumerate([User_list1, User_list2, User_list3]): of_path = os.path.join(pp.BASE_PATH, "test", "results", f"output_file_{i + 1}.csv") if os.path.exists(of_path) is False: From 9ccd839e6f0edfe7bb00d14a51301ebf4fcbc439 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Wed, 10 Apr 2024 11:39:27 +0200 Subject: [PATCH 3/5] Adapt Testing guidelines Issue: it was not easy to find that there were extra dependencies for testing Solution: mention it explicitely in the CONTRIBUTING.md --- CONTRIBUTING.md | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4bf3c62b..f2c1cb6d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -44,17 +44,31 @@ Please try to write clear commit messages. One-line messages are fine for small ## Testing -We have a qualitative testing functionality that allows to compare the results arising from a modified version of the code against default ones, for the 3 reference input files provided within the code itself. +Testing is used by RAMP developers to make sure their new feature/bug fix is not breaking existing code. As RAMP is stochastic some tests are only qualitative, other unit tests are ran by GitHub Actions. -This functionality is accessible via `test/test_run.py`. To run the qualitative test, you'll have to go through the following steps: - 1. run your modified code for the 3 reference input files for 30 days each. This will create 3 corresponding output files in the `results` folder - 2. run `test/test_run.py` and visualise the comparison between the results of your code and those obtainable with the latest stable version +Before running the tests locally, you need to install the testing dependencies + +``` +pip install -r tests/requirements.txt +``` + +### Qualitative testing + +The qualitative testing functionality allows to compare the results arising from a modified version of the code against default ones, for the 3 reference input files provided within the code itself. + +To run the qualitative test, you'll have to run + ``` + python ramp/test/test_run.py + ``` +from the root level of this repository. + +If you already ran this script, you will be asked if you want to overwrite the results files (if you decide not to, the results are not going to be regenerated from your latest code version). You should compare the results of your code and those saved from the latest stable version thanks to the image which is displayed after the script ran. Ideally, the difference between reference and new results should be minimal and just due to the stochastic nature of the code. If more pronounced, it should be fully explainable based on the changes made to the code and aligned to the expectations of the developers (i.e. it should reflect a change in the output *wanted* and precisely *sought* with the commit in question). ### Unit tests -Install `pytest` (`pip install pytest`) and run `pytest tests/` form the root of the repository to run the unit tests +Run `pytest tests/` form the root of the repository to run the unit tests. ## Attribution From b4e728a4d82804a1009b0dde4dadabae9246ae77 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Wed, 10 Apr 2024 14:47:58 +0200 Subject: [PATCH 4/5] Modify switch_on test issue: the test was failing randomly when accessing whether the sampled coincidence were following a normal distribution, due to the fact we apply math.ceil to the random.gauss to get integer number of appliances to be switched on simultaneously solution: get an experimental probability density function and fit a normal distribution to it, look how large is the error of mean and std as a proxy of visual inspection of the graph --- tests/test_switch_on.py | 61 ++++++++++++++++++++++++++++++++++++----- 1 file changed, 54 insertions(+), 7 deletions(-) diff --git a/tests/test_switch_on.py b/tests/test_switch_on.py index be7be8ee..1b51f71f 100644 --- a/tests/test_switch_on.py +++ b/tests/test_switch_on.py @@ -7,7 +7,10 @@ """ from ramp import User -from scipy import stats +import numpy as np +import scipy.optimize as opt + +from ramp.core.constants import switch_on_parameters class TestRandSwitchOnWindow: @@ -26,20 +29,64 @@ def test_all_appliances_switched_on_together(self): def test_coincidence_normality_on_peak(self): # Create an instance of the Appliance class with the desired parameters - appliance = self.user.add_appliance(number=10, fixed="no") + N = 100 + appliance = self.user.add_appliance(number=N, fixed="no") # Generate a sample of 'coincidence' values - sample_size = 30 + sample_size = N * 10 coincidence_sample = [] for _ in range(sample_size): coincidence = appliance.calc_coincident_switch_on(inside_peak_window=True) coincidence_sample.append(coincidence) - # Perform the Shapiro-Wilk test for normality - _, p_value = stats.shapiro(coincidence_sample) + def normed_dist(bins, mu, sigma): + return ( + 1 + / (sigma * np.sqrt(2 * np.pi)) + * np.exp(-((bins - mu) ** 2) / (2 * sigma**2)) + ) + + # exclude the tail values i.e. only one appliance is switched on or all of them are, see https://github.com/RAMP-project/RAMP/issues/99 for illustrations + coincidence_sample = np.array(coincidence_sample) + max_val = np.max(coincidence_sample) + coincidence_sample_reduced = coincidence_sample[ + np.where(coincidence_sample != 1) + ] + coincidence_sample_reduced = coincidence_sample_reduced[ + np.where(coincidence_sample_reduced != max_val) + ] + + # compute the experimental probability density function for appliance numbers from 2 to N-1 + exp_pdf, bins = np.histogram( + coincidence_sample_reduced, + bins=[i for i in range(2, N + 1, 1)], + density=True, + ) - # Assert that the p-value is greater than a chosen significance level - assert p_value > 0.05, "The 'coincidence' values are not normally distributed." + s_peak, mu_peak, op_factor = switch_on_parameters() + mu = mu_peak * N + sigma = s_peak * N * mu_peak + + p0 = [mu, sigma] # Inital guess of mean and std + errfunc = ( + lambda p, x, y: normed_dist(x, *p) - y + ) # Distance to the target function + p1, success = opt.leastsq(errfunc, p0[:], args=(bins[:-1], exp_pdf)) + + # if not then the fit did not succeed + assert success in [1, 2, 3, 4] + + fit_mu, fit_stdev = p1 + tolerance_mu = 0.05 # arbitrary + tolerance_sigma = 0.1 # arbitrary + err_mu = np.abs(mu - fit_mu) / mu + err_sigma = np.abs(sigma - fit_stdev) / sigma + assert ( + err_mu < tolerance_mu + ), f"The mean value of a normal fit onto the sampled coincidence histogram ({fit_mu}) divert more than {tolerance_mu*100} % of the provided gaussian mean ({mu})" + assert ( + err_sigma < tolerance_sigma + ), f"The std value of a normal fit onto the sampled coincidence histogram ({fit_stdev}) divert more than {tolerance_sigma*100} % of the provided gaussian std ({sigma})" # Tests that the method returns a list of indexes within the available functioning windows when there are multiple available functioning windows and the random time is larger than the duration of the appliance's function cycle. From 4559bcfd77e611fb7ec32fe850f03736cb949517 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Wed, 10 Apr 2024 14:59:53 +0200 Subject: [PATCH 5/5] Correct the sampling of coincident switch-on on peak Related to https://github.com/RAMP-project/RAMP/issues/99 --- ramp/core/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ramp/core/core.py b/ramp/core/core.py index 579141e2..75f7528f 100644 --- a/ramp/core/core.py +++ b/ramp/core/core.py @@ -1758,7 +1758,7 @@ def calc_coincident_switch_on(self, inside_peak_window: bool = True): 1, math.ceil( random.gauss( - mu=(self.number * mu_peak + 0.5), + mu=(self.number * mu_peak), sigma=(s_peak * self.number * mu_peak), ) ),