From d68be36d3004d6cd34452d68ab9ff7db6efa7ea3 Mon Sep 17 00:00:00 2001 From: Bastien Vallet Date: Sun, 5 Jan 2025 08:32:25 +0100 Subject: [PATCH] [ruff] Enable extra plugins --- csv/convert.py | 6 ++-- graph/chassis.py | 4 +-- graph/graph.py | 14 ++++---- graph/individual.py | 11 +++--- graph/scaling.py | 8 ++--- graph/trace.py | 35 ++++++++++--------- hwbench/bench/benchmark.py | 9 ++--- hwbench/bench/benchmarks.py | 5 +-- hwbench/bench/engine.py | 9 ++--- hwbench/bench/monitoring.py | 11 +++--- hwbench/bench/parameters.py | 3 +- hwbench/bench/test_benchmarks.py | 11 +++--- hwbench/bench/test_benchmarks_common.py | 27 ++++++++------ hwbench/bench/test_helpers.py | 4 ++- hwbench/bench/test_spike.py | 2 +- hwbench/config/config.py | 7 ++-- hwbench/config/config_helpers.py | 4 +-- hwbench/config/test_parse.py | 8 +++-- hwbench/engines/sleep.py | 6 ++-- hwbench/engines/spike.py | 12 +++---- hwbench/engines/stressng.py | 10 +++--- hwbench/engines/stressng_cpu.py | 3 +- hwbench/engines/stressng_memrate.py | 3 +- hwbench/engines/stressng_qsort.py | 3 +- hwbench/engines/stressng_stream.py | 3 +- hwbench/engines/stressng_vnni.py | 5 +-- hwbench/engines/test_parse.py | 11 +++--- hwbench/environment/cpu_cores.py | 6 ++-- hwbench/environment/cpu_info.py | 2 +- hwbench/environment/dmi.py | 4 +-- hwbench/environment/hardware.py | 3 +- hwbench/environment/lspci.py | 2 +- hwbench/environment/numa.py | 13 ++++--- hwbench/environment/packages.py | 2 +- hwbench/environment/software.py | 5 +-- hwbench/environment/test_dell_c6420.py | 3 +- hwbench/environment/test_dell_c6615.py | 3 +- hwbench/environment/test_hpe.py | 5 ++- hwbench/environment/test_vendors.py | 15 ++++---- hwbench/environment/turbostat.py | 21 +++++------ hwbench/environment/vendors/amd/amd.py | 3 +- hwbench/environment/vendors/amd/ami_aptio.py | 2 +- hwbench/environment/vendors/bmc.py | 7 ++-- hwbench/environment/vendors/dell/dell.py | 24 +++++-------- hwbench/environment/vendors/detect.py | 3 +- hwbench/environment/vendors/hpe/hpe.py | 18 ++++------ hwbench/environment/vendors/hpe/ilorest.py | 4 +-- hwbench/environment/vendors/mock.py | 3 +- .../environment/vendors/monitoring_device.py | 10 +++--- hwbench/environment/vendors/pdu.py | 5 +-- hwbench/environment/vendors/pdus/raritan.py | 6 ++-- hwbench/environment/vendors/vendor.py | 3 +- hwbench/hwbench.py | 2 +- hwbench/tuning/drop_caches.py | 2 +- hwbench/tuning/power_profile.py | 2 +- hwbench/tuning/scheduler.py | 2 +- hwbench/tuning/setup.py | 5 +-- hwbench/tuning/turbo_boost.py | 2 +- hwbench/utils/archive.py | 34 ++++++++---------- pyproject.toml | 13 ++++++- 60 files changed, 242 insertions(+), 226 deletions(-) diff --git a/csv/convert.py b/csv/convert.py index 5a44640..52f585a 100755 --- a/csv/convert.py +++ b/csv/convert.py @@ -58,8 +58,8 @@ def create_csv_power(out_file: pathlib.Path, data): job_name = result.get("job_name", "") job_number = result.get("job_number", "") monitoring = result.get("monitoring", {}) - for category in monitoring.keys(): - for typ in monitoring[category].keys(): + for category in monitoring: + for typ in monitoring[category]: measures = monitoring[category][typ] events = measures.get("events", []) unit = measures.get("unit") @@ -108,7 +108,7 @@ def print_memrates(out, results): job_name = result.get("job_name", "") job_number = result.get("job_number", "") workers = result.get("workers") - for key in result.keys(): + for key in result: if isinstance(result[key], dict) and "sum_speed" in result[key]: result_list.append( { diff --git a/graph/chassis.py b/graph/chassis.py index 5066caa..110c7e1 100644 --- a/graph/chassis.py +++ b/graph/chassis.py @@ -107,7 +107,7 @@ def get_marker(category: PowerCategories) -> str: "Time [seconds]", y_label, outdir, - f"time_watt_{base_outfile}_by_{str(graph_type)}", + f"time_watt_{base_outfile}_by_{graph_type!s}", ) if graph_type == PowerCategories.SERVERINCHASSIS: @@ -126,7 +126,7 @@ def get_marker(category: PowerCategories) -> str: y_serie = np.array(sum_serie_to_plot[str(component)])[order] curve_label = str(component) if component in [PowerCategories.SERVER, PowerCategories.SERVERINCHASSIS]: - curve_label = f"sum of {str(component)}" + curve_label = f"sum of {component!s}" graph.get_ax().plot(x_serie, y_serie, "", label=curve_label, marker=get_marker(component)) for trace in args.traces: diff --git a/graph/graph.py b/graph/graph.py index 4fcadd7..914fce4 100644 --- a/graph/graph.py +++ b/graph/graph.py @@ -215,14 +215,14 @@ def generic_graph( components = bench.get_all_metrics(component_type, filter) if not len(components): - title = f"{item_title}: no {str(component_type)} metric found" + title = f"{item_title}: no {component_type!s} metric found" if filter: title += f" with filter = '{filter}'" return 0 samples_count = bench.get_samples_count() unit = bench.get_metric_unit(component_type) - title = f'{item_title} during "{bench.get_bench_name()}" benchmark job\n' f"{args.title}\n" f"\n Stressor: " + title = f'{item_title} during "{bench.get_bench_name()}" benchmark job\n{args.title}\n\n Stressor: ' title += f"{bench.workers()} x {bench.get_title_engine_name()} for {bench.duration()} seconds" title += f"\n{bench.get_system_title()}" graph = Graph( @@ -230,7 +230,7 @@ def generic_graph( title, "Time [seconds]", unit, - output_dir.joinpath(f"{trace.get_name()}/{bench.get_bench_name()}/{str(component_type)}"), + output_dir.joinpath(f"{trace.get_name()}/{bench.get_bench_name()}/{component_type!s}"), outfile, show_source_file=trace, ) @@ -245,7 +245,7 @@ def generic_graph( if args.verbose: print( - f"{trace.get_name()}/{bench.get_bench_name()}: {len(components)} {str(component_type)} to graph with {samples_count} samples" + f"{trace.get_name()}/{bench.get_bench_name()}: {len(components)} {component_type!s} to graph with {samples_count} samples" ) time_serie = [] @@ -280,7 +280,7 @@ def generic_graph( data_serie[component.get_full_name()].append(component.get_mean()[sample]) if second_axis: - for _, entry in bench.get_monitoring_metric(second_axis).items(): + for entry in bench.get_monitoring_metric(second_axis).values(): for sensor, measure in entry.items(): # We don't plot the Cores here # We don't plot sensor on y2 if already plot on y1 @@ -366,7 +366,7 @@ def yerr_graph( ) data_serie[MEAN].append(mean_value) - title = f'{prefix}{component.get_name()} during "{bench.get_bench_name()}" benchmark job\n' f"\n Stressor: " + title = f'{prefix}{component.get_name()} during "{bench.get_bench_name()}" benchmark job\n\n Stressor: ' title += f"{bench.workers()} x {bench.get_title_engine_name()} for {bench.duration()} seconds" title += f"\n{bench.get_system_title()}" @@ -375,7 +375,7 @@ def yerr_graph( title, "Time [seconds]", unit, - output_dir.joinpath(f"{trace.get_name()}/{bench.get_bench_name()}/{str(component_type)}"), + output_dir.joinpath(f"{trace.get_name()}/{bench.get_bench_name()}/{component_type!s}"), f"{prefix}{component.get_name()}", show_source_file=trace, ) diff --git a/graph/individual.py b/graph/individual.py index a6a5c74..4fc06a8 100644 --- a/graph/individual.py +++ b/graph/individual.py @@ -16,7 +16,7 @@ def individual_graph(args, output_dir, job: str, traces_name: list) -> int: benches = args.traces[0].get_benches_by_job_per_emp(job) # For all subjobs sharing the same engine module parameter # i.e int128 - for emp in benches.keys(): + for emp in benches: aggregated_perfs = {} # type: dict[str, dict[str, Any]] aggregated_perfs_watt = {} # type: dict[str, dict[str, Any]] aggregated_watt = {} # type: dict[str, dict[str, Any]] @@ -27,7 +27,7 @@ def individual_graph(args, output_dir, job: str, traces_name: list) -> int: perf_list, unit = benches[emp]["metrics"] # For each metric we need to plot for perf in perf_list: - if perf not in aggregated_perfs.keys(): + if perf not in aggregated_perfs: aggregated_perfs[perf] = {} aggregated_perfs_watt[perf] = {} aggregated_watt[perf] = {} @@ -52,7 +52,7 @@ def individual_graph(args, output_dir, job: str, traces_name: list) -> int: for trace in args.traces: # Let's iterate on each Bench from this trace file matching this em for bench in trace.get_benches_by_job_per_emp(job)[emp]["bench"]: - if bench.workers() not in aggregated_perfs[perf].keys(): + if bench.workers() not in aggregated_perfs[perf]: # If the worker count is not known yet, let's init all structures with as much zeros as the number of traces # This will be the default value in case of the host doesn't have performance results aggregated_perfs[perf][bench.workers()] = [0] * len(traces_name) @@ -90,10 +90,7 @@ def individual_graph(args, output_dir, job: str, traces_name: list) -> int: # Let's define the tree architecture based on the benchmark profile # If the benchmark has multiple performance results, let's put them in a specific directory - if len(perf_list) > 1: - outdir = outdir.joinpath(emp, perf) - else: - outdir = outdir.joinpath(emp) + outdir = outdir.joinpath(emp, perf) if len(perf_list) > 1 else outdir.joinpath(emp) # Select the proper datasource and titles/labels regarding the graph type if graph_type == "perf_watt": diff --git a/graph/scaling.py b/graph/scaling.py index 14e4646..4d9e210 100644 --- a/graph/scaling.py +++ b/graph/scaling.py @@ -20,7 +20,7 @@ def scaling_graph(args, output_dir, job: str, traces_name: list) -> int: print(f"Scaling: working on job '{job}' : {len(benches.keys())} engine_module_parameter to render") # For all subjobs sharing the same engine module parameter # i.e int128 - for emp in benches.keys(): + for emp in benches: aggregated_perfs = {} # type: dict[str, dict[str, Any]] aggregated_perfs_watt = {} # type: dict[str, dict[str, Any]] aggregated_watt = {} # type: dict[str, dict[str, Any]] @@ -38,7 +38,7 @@ def scaling_graph(args, output_dir, job: str, traces_name: list) -> int: # For each metric we need to plot for perf in perf_list: - if perf not in aggregated_perfs.keys(): + if perf not in aggregated_perfs: aggregated_perfs[perf] = {} aggregated_perfs_watt[perf] = {} aggregated_watt[perf] = {} @@ -62,7 +62,7 @@ def scaling_graph(args, output_dir, job: str, traces_name: list) -> int: # for each performance metric we have to plot, # let's prepare the data set to plot - if trace.get_name() not in aggregated_perfs[perf].keys(): + if trace.get_name() not in aggregated_perfs[perf]: aggregated_perfs[perf][trace.get_name()] = [] aggregated_perfs_watt[perf][trace.get_name()] = [] aggregated_watt[perf][trace.get_name()] = [] @@ -112,7 +112,7 @@ def scaling_graph(args, output_dir, job: str, traces_name: list) -> int: outfile = f"scaling_{clean_perf}_{bench.get_title_engine_name().replace(' ','_')}" y_source = aggregated_perfs - title = f'{args.title}\n\n{graph_type_title} via "{job}" benchmark job\n' f"\n Stressor: " + title = f'{args.title}\n\n{graph_type_title} via "{job}" benchmark job\n\n Stressor: ' title += f"{bench.get_title_engine_name()} for {bench.duration()} seconds" xlabel = "Workers" # If we have a constent ratio between cores & workers, let's report them under the Xaxis diff --git a/graph/trace.py b/graph/trace.py index 33cd509..5dd3b37 100644 --- a/graph/trace.py +++ b/graph/trace.py @@ -1,7 +1,7 @@ import json import pathlib from statistics import mean -from typing import Any # noqa: F401 +from typing import Any from graph.common import fatal from hwbench.bench.monitoring_structs import ( @@ -19,6 +19,13 @@ MEAN = "mean" +METRIC_AXIs = { + "Percent": (100, 10, 5), + "RPM": (21000, 1000, 250), + "Celsius": (110, 10, 5), +} + + class Bench: def __init__(self, trace, bench_name: str): self.trace = trace @@ -68,12 +75,12 @@ def load_monitoring(self): self.metrics = {} m = self.get_monitoring() if m: - for metric in m.keys(): + for metric in m: if metric in MonitoringMetadata.list_str(): self.metrics[metric] = m[metric] elif metric in Metrics.list_str(): self.metrics[metric] = {} - for component_family in m[metric].keys(): + for component_family in m[metric]: self.metrics[metric][component_family] = {} for measure in m[metric][component_family]: original_measure = m[metric][component_family][measure] @@ -100,14 +107,7 @@ def get_monitoring_metric_by_name(self, metric: Metrics, metric_name: str) -> Mo def get_monitoring_metric_axis(self, unit: str) -> tuple[Any, Any, Any]: """Return adjusted metric axis values""" - # return y_max, y_major_tick, y_minor_tick - if unit == "Percent": - return 100, 10, 5 - elif unit == "RPM": - return 21000, 1000, 250 - elif unit == "Celsius": - return 110, 10, 5 - return None, None, None + return METRIC_AXIs.get(unit, (None, None, None)) def get_component(self, metric_type: Metrics, component: Any) -> dict[str, MonitorMetric]: return self.get_monitoring_metric(metric_type)[str(component)] @@ -153,8 +153,8 @@ def get_system_title(self): d = self.get_trace().get_dmi() c = self.get_trace().get_cpu() k = self.get_trace().get_kernel() - title = f"System: {d['serial']} {d['product']} Bios " f"v{d['bios']['version']} Linux Kernel {k['release']}" - title += f"\nProcessor: {c['model']} with {c['physical_cores']} cores " f"and {c['numa_domains']} NUMA domains" + title = f"System: {d['serial']} {d['product']} Bios v{d['bios']['version']} Linux Kernel {k['release']}" + title += f"\nProcessor: {c['model']} with {c['physical_cores']} cores and {c['numa_domains']} NUMA domains" return title def job_name(self) -> str: @@ -449,10 +449,11 @@ def validate(self) -> None: def _list_power_metrics(self) -> list[str]: first_bench = self.first_bench() first_bench.load_monitoring() - power_metrics = [] - for name, value in first_bench.get_monitoring_metric(Metrics.POWER_CONSUMPTION).items(): - for v in value: - power_metrics.append(f"{name}.{v}") + power_metrics = [ + f"{name}.{v}" + for name, value in first_bench.get_monitoring_metric(Metrics.POWER_CONSUMPTION).items() + for v in value + ] return power_metrics def list_power_metrics(self): diff --git a/hwbench/bench/benchmark.py b/hwbench/bench/benchmark.py index cc8c56d..f325f0c 100644 --- a/hwbench/bench/benchmark.py +++ b/hwbench/bench/benchmark.py @@ -1,8 +1,9 @@ import time from typing import Any -from ..utils import helpers as h -from ..utils.external import External +from hwbench.utils import helpers as h +from hwbench.utils.external import External + from .engine import EngineModuleBase from .parameters import BenchmarkParameters @@ -50,7 +51,7 @@ def validate_parameters(self): p = self.get_parameters() error = e.validate_module_parameters(p) if error: - h.fatal(f"Unsupported parameter for {e.get_engine().get_name()}/" f"{e.get_name()}: {error}") + h.fatal(f"Unsupported parameter for {e.get_engine().get_name()}/{e.get_name()}: {error}") def run(self): e = self.get_enginemodule() @@ -93,7 +94,7 @@ def fully_skipped_job(self) -> bool: if not self.skip: return False - if self.parameters.get_skip_method() == "wait": + if self.parameters.get_skip_method() == "wait": # noqa: SIM103 # The job is skipped but we were asked to make a no-op run return False diff --git a/hwbench/bench/benchmarks.py b/hwbench/bench/benchmarks.py index 4971829..e99c9fd 100644 --- a/hwbench/bench/benchmarks.py +++ b/hwbench/bench/benchmarks.py @@ -4,8 +4,9 @@ import time from datetime import timedelta -from ..environment.hardware import BaseHardware -from ..utils import helpers as h +from hwbench.environment.hardware import BaseHardware +from hwbench.utils import helpers as h + from .benchmark import Benchmark from .monitoring import Monitoring from .parameters import BenchmarkParameters diff --git a/hwbench/bench/engine.py b/hwbench/bench/engine.py index f4179c8..945eb45 100644 --- a/hwbench/bench/engine.py +++ b/hwbench/bench/engine.py @@ -3,8 +3,9 @@ import abc import pathlib -from ..utils.external import External -from ..utils.helpers import fatal +from hwbench.utils.external import External +from hwbench.utils.helpers import fatal + from .parameters import BenchmarkParameters @@ -51,7 +52,7 @@ def __init__(self, name: str, binary: str, modules: dict[str, EngineModuleBase] self.modules = modules # FIXME: If the import is done at the file level, the mocking is lost here # So I'm importing is_binary_available just before the call :/ - from ..utils.helpers import is_binary_available + from hwbench.utils.helpers import is_binary_available if not is_binary_available(self.binary): fatal(f"Engine {name} requires '{binary}' binary, please install it.") @@ -76,4 +77,4 @@ def get_module(self, module_name: str) -> EngineModuleBase | None: return self.modules.get(module_name) def module_exists(self, module_name) -> bool: - return module_name in self.modules.keys() + return module_name in self.modules diff --git a/hwbench/bench/monitoring.py b/hwbench/bench/monitoring.py index 9a8d798..04282f1 100644 --- a/hwbench/bench/monitoring.py +++ b/hwbench/bench/monitoring.py @@ -2,9 +2,10 @@ from threading import Thread from typing import Any -from ..environment.hardware import BaseHardware -from ..environment.turbostat import Turbostat -from ..utils import helpers as h +from hwbench.environment.hardware import BaseHardware +from hwbench.environment.turbostat import Turbostat +from hwbench.utils import helpers as h + from .monitoring_structs import Metrics, MonitoringMetadata, MonitorMetric @@ -63,10 +64,10 @@ def prepare(self): def check_monitoring(source: str, metric: Metrics): data = self.get_metric(metric) if not len(data): - h.fatal(f"Cannot detect {str(metric)} metrics") + h.fatal(f"Cannot detect {metric!s} metrics") print( - f"Monitoring/{source}: {str(metric)} metrics:" + f"Monitoring/{source}: {metric!s} metrics:" + ", ".join([f"{len(data[pc])}x{pc}" for pc in data if len(data[pc]) > 0]) ) diff --git a/hwbench/bench/parameters.py b/hwbench/bench/parameters.py index 4eadaf9..1573d4a 100644 --- a/hwbench/bench/parameters.py +++ b/hwbench/bench/parameters.py @@ -1,6 +1,7 @@ import pathlib -from ..environment.hardware import BaseHardware +from hwbench.environment.hardware import BaseHardware + from .monitoring import Monitoring diff --git a/hwbench/bench/test_benchmarks.py b/hwbench/bench/test_benchmarks.py index 596cd83..c916389 100644 --- a/hwbench/bench/test_benchmarks.py +++ b/hwbench/bench/test_benchmarks.py @@ -1,6 +1,8 @@ import pathlib from unittest.mock import patch +import pytest + from . import test_benchmarks_common as tbc from .monitoring_structs import Metrics @@ -45,16 +47,13 @@ def test_parsing(self): for job in range(196, 199): self.assert_job(job, "check_physical_core_int8_perf", "cpu", "int8") # Ensure the auto syntax updated the number of engine instances - if job == 198: - instances = 4 - else: - instances = 2 + instances = 4 if job == 198 else 2 assert self.get_bench_parameters(job).get_engine_instances_count() == instances group_count = 0 for job in range(199, 203): group_count += 2 - self.assert_job(job, "check_physical_core_scale_plus_1_int8_perf", "cpu", "int8") # noqa: E501 + self.assert_job(job, "check_physical_core_scale_plus_1_int8_perf", "cpu", "int8") assert self.get_bench_parameters(job).get_engine_instances_count() == group_count assert len(self.get_bench_parameters(job).get_pinned_cpu()) == group_count @@ -89,7 +88,7 @@ def test_stream_short(self): self.load_benches("./hwbench/config/stream.ini") assert self.get_jobs_config().get_config().getint("global", "runtime") == 5 self.get_jobs_config().get_config().set("global", "runtime", "2") - with self.assertRaises(SystemExit): + with pytest.raises(SystemExit): self.parse_jobs_config() # This jobs_config file doesn't need monitoring assert self.benches.need_monitoring() is False diff --git a/hwbench/bench/test_benchmarks_common.py b/hwbench/bench/test_benchmarks_common.py index 5c8e394..3ed1eab 100644 --- a/hwbench/bench/test_benchmarks_common.py +++ b/hwbench/bench/test_benchmarks_common.py @@ -3,12 +3,15 @@ import unittest from unittest.mock import patch -from ..config import config -from ..environment.cpu import MockCPU -from ..environment.cpu_cores import CPU_CORES -from ..environment.cpu_info import CPU_INFO -from ..environment.mock import MockHardware -from ..environment.numa import NUMA +import pytest + +from hwbench.config import config +from hwbench.environment.cpu import MockCPU +from hwbench.environment.cpu_cores import CPU_CORES +from hwbench.environment.cpu_info import CPU_INFO +from hwbench.environment.mock import MockHardware +from hwbench.environment.numa import NUMA + from . import benchmarks @@ -65,10 +68,12 @@ def parse_jobs_config(self, validate_parameters=True): # We mock the run() and check_version() command to get a constant output with patch("hwbench.environment.turbostat.Turbostat.check_version") as cv: cv.return_value = True - with patch("hwbench.environment.turbostat.Turbostat.run") as ts: - with open("hwbench/tests/parsing/turbostat/run") as f: - ts.return_value = ast.literal_eval(f.read()) - return self.benches.parse_jobs_config(validate_parameters) + with ( + patch("hwbench.environment.turbostat.Turbostat.run") as ts, + open("hwbench/tests/parsing/turbostat/run") as f, + ): + ts.return_value = ast.literal_eval(f.read()) + return self.benches.parse_jobs_config(validate_parameters) def get_jobs_config(self) -> config.Config: return self.jobs_config @@ -87,7 +92,7 @@ def bench_emp(self, index) -> str: def should_be_fatal(self, func, *args): """Test if the function func is exiting.""" - with self.assertRaises(SystemExit): + with pytest.raises(SystemExit): func(*args) def assert_job(self, index, name, engine_module, engine_module_parameter=None): diff --git a/hwbench/bench/test_helpers.py b/hwbench/bench/test_helpers.py index 7d6427d..f37a479 100644 --- a/hwbench/bench/test_helpers.py +++ b/hwbench/bench/test_helpers.py @@ -1,3 +1,5 @@ +import pytest + from . import test_benchmarks_common as tbc @@ -62,5 +64,5 @@ def __init__(self, *args, **kwargs): def test_helpers_impossible(self): """Testing impossible helper usecase.""" - with self.assertRaises(SystemExit): + with pytest.raises(SystemExit): self.parse_jobs_config() diff --git a/hwbench/bench/test_spike.py b/hwbench/bench/test_spike.py index 5a9ad57..085140d 100644 --- a/hwbench/bench/test_spike.py +++ b/hwbench/bench/test_spike.py @@ -20,7 +20,7 @@ def test_spike(self): assert self.benches.count_benchmarks() == 5 assert self.benches.count_jobs() == 3 assert self.benches.runtime() == 300 - self.assertIsNone(self.benches.benchs[0].validate_parameters()) + assert self.benches.benchs[0].validate_parameters() is None assert self.get_bench_parameters(1).get_pinned_cpu() == self.QUADRANT0 assert self.get_bench_parameters(2).get_pinned_cpu() == self.QUADRANT1 diff --git a/hwbench/config/config.py b/hwbench/config/config.py index 925d1a1..3f1596c 100644 --- a/hwbench/config/config.py +++ b/hwbench/config/config.py @@ -6,9 +6,10 @@ import re from typing import Any -from ..bench.engine import EngineBase -from ..environment import hardware as env_hw -from ..utils import helpers as h +from hwbench.bench.engine import EngineBase +from hwbench.environment import hardware as env_hw +from hwbench.utils import helpers as h + from . import config_syntax diff --git a/hwbench/config/config_helpers.py b/hwbench/config/config_helpers.py index 5247a5d..66ed865 100644 --- a/hwbench/config/config_helpers.py +++ b/hwbench/config/config_helpers.py @@ -1,10 +1,10 @@ -from ..environment import hardware as env_hw +from hwbench.environment import hardware as env_hw def simple(hardware: env_hw.BaseHardware) -> str: """A naive cpu scaling.""" # 1, 2, 3, 4, 8, 16 then +16 up to the core count - # [1, 2, 3, 4, 8, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256] # noqa: E501 + # 1, 2, 3, 4, 8, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256 core_count = [] for test in range(1, 22): if test <= 4: diff --git a/hwbench/config/test_parse.py b/hwbench/config/test_parse.py index 33c334c..1c5b868 100644 --- a/hwbench/config/test_parse.py +++ b/hwbench/config/test_parse.py @@ -1,8 +1,10 @@ import pathlib from unittest.mock import patch -from ..bench import test_benchmarks_common as tbc -from ..environment.mock import MockHardware +import pytest + +from hwbench.bench import test_benchmarks_common as tbc +from hwbench.environment.mock import MockHardware class TestParseConfig(tbc.TestCommon): @@ -75,5 +77,5 @@ def test_range_list_input(self): assert self.get_jobs_config().parse_range("int8,float") == ["int8", "float"] assert self.get_jobs_config().parse_range("1-3 4-5") == [[1, 2, 3], [4, 5]] assert self.get_jobs_config().parse_range("1,32 2,33") == [[1, 32], [2, 33]] - with self.assertRaises(SystemExit): + with pytest.raises(SystemExit): self.get_jobs_config().parse_range("bad,range,bad-range") diff --git a/hwbench/engines/sleep.py b/hwbench/engines/sleep.py index 6ae93fb..655e7ab 100644 --- a/hwbench/engines/sleep.py +++ b/hwbench/engines/sleep.py @@ -1,6 +1,6 @@ -from ..bench.benchmark import ExternalBench -from ..bench.engine import EngineBase, EngineModuleBase -from ..bench.parameters import BenchmarkParameters +from hwbench.bench.benchmark import ExternalBench +from hwbench.bench.engine import EngineBase, EngineModuleBase +from hwbench.bench.parameters import BenchmarkParameters class EngineModuleSleep(EngineModuleBase): diff --git a/hwbench/engines/spike.py b/hwbench/engines/spike.py index fcc1605..7513653 100644 --- a/hwbench/engines/spike.py +++ b/hwbench/engines/spike.py @@ -4,11 +4,11 @@ import time from statistics import mean -from ..bench import monitoring_structs -from ..bench.benchmark import ExternalBench -from ..bench.engine import EngineBase, EngineModuleBase -from ..bench.parameters import BenchmarkParameters -from ..utils import helpers as h +from hwbench.bench import monitoring_structs +from hwbench.bench.benchmark import ExternalBench +from hwbench.bench.engine import EngineBase, EngineModuleBase +from hwbench.bench.parameters import BenchmarkParameters +from hwbench.utils import helpers as h class EngineModuleCPUSpike(EngineModuleBase): @@ -140,7 +140,7 @@ def __spawn_stressor(self, additional_args=None, wait_stressor=False): args = [ self.engine_module.engine.get_binary(), "-c", - f"{str(self.parameters.get_engine_instances_count())}", + f"{self.parameters.get_engine_instances_count()!s}", "--cpu-method", "matrixprod", ] + additional_args diff --git a/hwbench/engines/stressng.py b/hwbench/engines/stressng.py index 805ab2e..3657e2a 100644 --- a/hwbench/engines/stressng.py +++ b/hwbench/engines/stressng.py @@ -2,10 +2,10 @@ import re -from ..bench.benchmark import ExternalBench -from ..bench.engine import EngineBase, EngineModuleBase -from ..bench.parameters import BenchmarkParameters -from ..utils import helpers as h +from hwbench.bench.benchmark import ExternalBench +from hwbench.bench.engine import EngineBase, EngineModuleBase +from hwbench.bench.parameters import BenchmarkParameters +from hwbench.utils import helpers as h class EngineModulePinnable(EngineModuleBase): @@ -17,7 +17,7 @@ def validate_module_parameters(self, params: BenchmarkParameters): pinned = [pinned] for cpu in pinned: if params.get_hw().logical_core_count() <= int(cpu): - return f"Cannot pin on core #{cpu} we only have " f"{params.get_hw().logical_core_count()} cores" + return f"Cannot pin on core #{cpu} we only have {params.get_hw().logical_core_count()} cores" return "" diff --git a/hwbench/engines/stressng_cpu.py b/hwbench/engines/stressng_cpu.py index 7afbdc1..af9d4d8 100644 --- a/hwbench/engines/stressng_cpu.py +++ b/hwbench/engines/stressng_cpu.py @@ -1,7 +1,8 @@ import os import subprocess -from ..bench.parameters import BenchmarkParameters +from hwbench.bench.parameters import BenchmarkParameters + from .stressng import EngineBase, EngineModulePinnable, StressNG diff --git a/hwbench/engines/stressng_memrate.py b/hwbench/engines/stressng_memrate.py index a23d034..1ddba56 100644 --- a/hwbench/engines/stressng_memrate.py +++ b/hwbench/engines/stressng_memrate.py @@ -1,7 +1,8 @@ import re from typing import Any -from ..bench.parameters import BenchmarkParameters +from hwbench.bench.parameters import BenchmarkParameters + from .stressng import EngineBase, EngineModulePinnable, StressNG diff --git a/hwbench/engines/stressng_qsort.py b/hwbench/engines/stressng_qsort.py index 02ac484..15c9bcf 100644 --- a/hwbench/engines/stressng_qsort.py +++ b/hwbench/engines/stressng_qsort.py @@ -1,4 +1,5 @@ -from ..bench.parameters import BenchmarkParameters +from hwbench.bench.parameters import BenchmarkParameters + from .stressng import EngineBase, EngineModulePinnable, StressNG diff --git a/hwbench/engines/stressng_stream.py b/hwbench/engines/stressng_stream.py index c50ff30..0da274f 100644 --- a/hwbench/engines/stressng_stream.py +++ b/hwbench/engines/stressng_stream.py @@ -3,7 +3,8 @@ import re from typing import Any -from ..bench.parameters import BenchmarkParameters +from hwbench.bench.parameters import BenchmarkParameters + from .stressng import EngineBase, EngineModulePinnable, StressNG diff --git a/hwbench/engines/stressng_vnni.py b/hwbench/engines/stressng_vnni.py index ac8ef23..709f249 100644 --- a/hwbench/engines/stressng_vnni.py +++ b/hwbench/engines/stressng_vnni.py @@ -1,8 +1,9 @@ from collections.abc import Callable, Iterable from typing import NamedTuple -from ..bench.parameters import BenchmarkParameters -from ..environment.hardware import BaseHardware +from hwbench.bench.parameters import BenchmarkParameters +from hwbench.environment.hardware import BaseHardware + from .stressng import EngineBase, EngineModulePinnable, StressNG diff --git a/hwbench/engines/test_parse.py b/hwbench/engines/test_parse.py index cad5712..1f7390d 100644 --- a/hwbench/engines/test_parse.py +++ b/hwbench/engines/test_parse.py @@ -3,8 +3,11 @@ import unittest from unittest.mock import patch -from ..bench.parameters import BenchmarkParameters -from ..environment.mock import MockHardware +import pytest + +from hwbench.bench.parameters import BenchmarkParameters +from hwbench.environment.mock import MockHardware + from .stressng import Engine as StressNG from .stressng_memrate import EngineModuleMemrate, StressNGMemrate from .stressng_qsort import EngineModuleQsort, StressNGQsort @@ -79,7 +82,7 @@ def test_module_parsing_output(self): stderr = (d / "stderr").read_bytes() output = test_target.parse_cmd(stdout, stderr) # these are unused in parsing - for key in test_target.parameters.get_result_format().keys(): + for key in test_target.parameters.get_result_format(): output.pop(key, None) assert output == json.loads((d / "output").read_bytes()) @@ -185,5 +188,5 @@ def test_instance(flags, method): assert test_params(AMD_7502, "avx_vpaddb128") is False assert test_params(AMD_7502, "avx_vpaddb256") is False - with self.assertRaises(LookupError): + with pytest.raises(LookupError): test_instance(AMD_9534, "inexistant") diff --git a/hwbench/environment/cpu_cores.py b/hwbench/environment/cpu_cores.py index 001199f..8059fd9 100644 --- a/hwbench/environment/cpu_cores.py +++ b/hwbench/environment/cpu_cores.py @@ -2,7 +2,7 @@ import pathlib -from ..utils.external import External +from hwbench.utils.external import External class CPU_CORES(External): @@ -52,9 +52,9 @@ def get_cores(self, socket, number) -> list[int]: def get_physical_cores(self) -> list[int]: """Return the list of physical cores.""" - cores = [] + cores: list[int] = [] for socket in self.sockets: - cores += [key for key in self.get_socket(socket).keys()] + cores += self.get_socket(socket).keys() return cores def get_hyperthread_cores(self) -> list[int]: diff --git a/hwbench/environment/cpu_info.py b/hwbench/environment/cpu_info.py index c931070..4695273 100644 --- a/hwbench/environment/cpu_info.py +++ b/hwbench/environment/cpu_info.py @@ -1,6 +1,6 @@ import pathlib -from ..utils.external import External +from hwbench.utils.external import External class CPU_INFO(External): diff --git a/hwbench/environment/dmi.py b/hwbench/environment/dmi.py index 679c84b..493c89a 100644 --- a/hwbench/environment/dmi.py +++ b/hwbench/environment/dmi.py @@ -3,8 +3,8 @@ import os import pathlib -from ..utils.archive import create_tar_from_directory, extract_file_from_tar -from ..utils.external import External +from hwbench.utils.archive import create_tar_from_directory, extract_file_from_tar +from hwbench.utils.external import External class DmiSys: diff --git a/hwbench/environment/hardware.py b/hwbench/environment/hardware.py index dda8a2e..f75f31a 100644 --- a/hwbench/environment/hardware.py +++ b/hwbench/environment/hardware.py @@ -3,7 +3,8 @@ import pathlib from abc import abstractmethod -from ..utils.external import External_Simple +from hwbench.utils.external import External_Simple + from .base import BaseEnvironment from .cpu import CPU from .dmi import DmidecodeRaw, DmiSys diff --git a/hwbench/environment/lspci.py b/hwbench/environment/lspci.py index f9d4fee..cafe625 100644 --- a/hwbench/environment/lspci.py +++ b/hwbench/environment/lspci.py @@ -1,4 +1,4 @@ -from ..utils.external import External +from hwbench.utils.external import External class Lspci(External): diff --git a/hwbench/environment/numa.py b/hwbench/environment/numa.py index 335dcff..cc092b5 100644 --- a/hwbench/environment/numa.py +++ b/hwbench/environment/numa.py @@ -3,7 +3,7 @@ import pathlib import re -from ..utils.external import External +from hwbench.utils.external import External class NUMA(External): @@ -35,12 +35,11 @@ def parse_cmd(self, stdout: bytes, _stderr: bytes): numa_dest = -1 for latency in latencies.split(): numa_dest += 1 - if int(latency) < 12: - if not self.__is_numa_node_in_quadrant(numa_dest): - if not quadrant: - self.quadrants.append([]) - quadrant = self.quadrants[-1] - quadrant.append(int(numa_dest)) + if int(latency) < 12 and not self.__is_numa_node_in_quadrant(numa_dest): + if not quadrant: + self.quadrants.append([]) + quadrant = self.quadrants[-1] + quadrant.append(int(numa_dest)) return self.numa_domains def run_cmd_version(self) -> list[str]: diff --git a/hwbench/environment/packages.py b/hwbench/environment/packages.py index 546cc10..6718daf 100644 --- a/hwbench/environment/packages.py +++ b/hwbench/environment/packages.py @@ -1,4 +1,4 @@ -from ..utils.external import External +from hwbench.utils.external import External class RpmList(External): diff --git a/hwbench/environment/software.py b/hwbench/environment/software.py index 759cc20..a592bd9 100644 --- a/hwbench/environment/software.py +++ b/hwbench/environment/software.py @@ -2,8 +2,9 @@ import os import pathlib -from ..utils.archive import copy_file, create_tar_from_directory -from ..utils.external import External_Simple +from hwbench.utils.archive import copy_file, create_tar_from_directory +from hwbench.utils.external import External_Simple + from .base import BaseEnvironment from .packages import RpmList diff --git a/hwbench/environment/test_dell_c6420.py b/hwbench/environment/test_dell_c6420.py index 472e55b..dbc4448 100644 --- a/hwbench/environment/test_dell_c6420.py +++ b/hwbench/environment/test_dell_c6420.py @@ -1,6 +1,6 @@ import pathlib -from ..bench.monitoring_structs import ( +from hwbench.bench.monitoring_structs import ( FanContext, MonitorMetric, Power, @@ -9,6 +9,7 @@ Temperature, ThermalContext, ) + from .test_dell import TestDell from .vendors.dell.dell import Dell diff --git a/hwbench/environment/test_dell_c6615.py b/hwbench/environment/test_dell_c6615.py index 331ac6a..7695015 100644 --- a/hwbench/environment/test_dell_c6615.py +++ b/hwbench/environment/test_dell_c6615.py @@ -1,6 +1,6 @@ import pathlib -from ..bench.monitoring_structs import ( +from hwbench.bench.monitoring_structs import ( FanContext, MonitorMetric, Power, @@ -9,6 +9,7 @@ Temperature, ThermalContext, ) + from .test_dell import TestDell from .vendors.dell.dell import Dell diff --git a/hwbench/environment/test_hpe.py b/hwbench/environment/test_hpe.py index f744f65..c92d862 100644 --- a/hwbench/environment/test_hpe.py +++ b/hwbench/environment/test_hpe.py @@ -1,6 +1,6 @@ import pathlib -from ..bench.monitoring_structs import ( +from hwbench.bench.monitoring_structs import ( FanContext, MonitorMetric, Power, @@ -9,6 +9,7 @@ Temperature, ThermalContext, ) + from .test_vendors import PATCH_TYPES, TestVendors from .vendors.hpe.hpe import ILO, Hpe @@ -85,8 +86,6 @@ def test_fan(self): "Fan 7": MonitorMetric("Fan 7", "Percent", 48), } - # super().generic_fan_test(expected_output) - def test_power_consumption(self): expected_output = self.generic_power_output() expected_output[str(PowerContext.BMC)] = { diff --git a/hwbench/environment/test_vendors.py b/hwbench/environment/test_vendors.py index 76e3121..0fb3a25 100644 --- a/hwbench/environment/test_vendors.py +++ b/hwbench/environment/test_vendors.py @@ -7,7 +7,8 @@ from typing import Any # noqa: F401 from unittest.mock import patch -from ..bench.monitoring_structs import FanContext, PowerContext, ThermalContext +from hwbench.bench.monitoring_structs import FanContext, PowerContext, ThermalContext + from .vendors.vendor import Vendor path = pathlib.Path("") @@ -83,11 +84,11 @@ def tearDown(self): def sample(self, name): """Return the samples for this test.""" output = None - file = open(self.__get_samples_file_name(name)) - output = file.readlines() - # If the file is empty but json output is requested, let's return an empty string - if not len(output): - output = "{}" + with open(self.__get_samples_file_name(name)) as file: + output = file.readlines() + # If the file is empty but json output is requested, let's return an empty string + if not len(output): + output = "{}" return ast.literal_eval("\n".join(output)) def __get_samples_file_name(self, name): @@ -117,7 +118,7 @@ def generic_power_output(self): def generic_test(self, expected_output, func): for pc in func: - if pc not in expected_output.keys(): + if pc not in expected_output: raise AssertionError(f"Missing Physical Context '{pc}' in expected_output") for sensor in func[pc]: if sensor not in expected_output[pc]: diff --git a/hwbench/environment/turbostat.py b/hwbench/environment/turbostat.py index 1184b51..7612fda 100644 --- a/hwbench/environment/turbostat.py +++ b/hwbench/environment/turbostat.py @@ -3,13 +3,14 @@ import os import re import subprocess +from contextlib import suppress from enum import Enum from packaging.version import Version -from ..bench.monitoring_structs import CPUContext, MonitorMetric, PowerContext -from ..environment.hardware import BaseHardware -from ..utils.helpers import fatal, is_binary_available +from hwbench.bench.monitoring_structs import CPUContext, MonitorMetric, PowerContext +from hwbench.environment.hardware import BaseHardware +from hwbench.utils.helpers import fatal, is_binary_available CORE = "core" PACKAGE = "package" @@ -216,21 +217,18 @@ def parse(self): # Collecting the overall packages power consumption self.power_metrics[str(PowerContext.CPU)][PACKAGE].add(self.get_global_packages_power()) - # self.__results[PACKAGE].add(self.get_global_packages_power()) # We skip the header and then extract all cores informations for line in self.get_output()[header_size:]: items = line.split() core_nb = items[int(self.__get_field_position(CPUSTATS.CPU))] if self.has(CPUSTATS.CORE_WATTS): - try: + # Some processors reports the corewatt in the header but not for all cores ... + # So let's ignore if the metrics does not exist for this core + with suppress(IndexError): self.power_metrics[str(PowerContext.CPU)][f"Core_{core_nb}"].add( float(items[int(self.__get_field_position(CPUSTATS.CORE_WATTS))]) ) - except IndexError: - # Some processors reports the corewatt in the header but not for all cores ... - # So let's ignore if the metrics does not exist for this core - pass self.freq_metrics[str(CPUContext.CPU)][f"Core_{core_nb}"].add( float(items[int(self.__get_field_position(CPUSTATS.BUSY_MHZ))]) @@ -268,10 +266,7 @@ def get_output_field(self, line, field): return None def get_output_fields(self, line, fields): - output = [] - for field in fields: - output.append(self.get_output_field(line, field)) - return output + return [self.get_output_field(line, field) for field in fields] def get_core_info(self, core_nb, info): # We ignore the two header lines and jumps to the core itself diff --git a/hwbench/environment/vendors/amd/amd.py b/hwbench/environment/vendors/amd/amd.py index e65160e..f555467 100644 --- a/hwbench/environment/vendors/amd/amd.py +++ b/hwbench/environment/vendors/amd/amd.py @@ -1,4 +1,5 @@ -from ..vendor import Vendor +from hwbench.environment.vendors.vendor import Vendor + from .ami_aptio import Ami_Aptio diff --git a/hwbench/environment/vendors/amd/ami_aptio.py b/hwbench/environment/vendors/amd/ami_aptio.py index f2aaceb..c9771d7 100644 --- a/hwbench/environment/vendors/amd/ami_aptio.py +++ b/hwbench/environment/vendors/amd/ami_aptio.py @@ -1,6 +1,6 @@ import re -from ....utils.external import External +from hwbench.utils.external import External class Ami_Aptio(External): diff --git a/hwbench/environment/vendors/bmc.py b/hwbench/environment/vendors/bmc.py index 9bcf93f..f0b8c7b 100644 --- a/hwbench/environment/vendors/bmc.py +++ b/hwbench/environment/vendors/bmc.py @@ -5,7 +5,7 @@ import pathlib from typing import cast -from ...bench.monitoring_structs import ( +from hwbench.bench.monitoring_structs import ( FanContext, MonitorMetric, Power, @@ -13,8 +13,9 @@ PowerContext, Temperature, ) -from ...utils import helpers as h -from ...utils.external import External +from hwbench.utils import helpers as h +from hwbench.utils.external import External + from .monitoring_device import MonitoringDevice diff --git a/hwbench/environment/vendors/dell/dell.py b/hwbench/environment/vendors/dell/dell.py index 5df32f9..fc107c1 100644 --- a/hwbench/environment/vendors/dell/dell.py +++ b/hwbench/environment/vendors/dell/dell.py @@ -2,17 +2,10 @@ from typing import cast -from ....bench.monitoring_structs import ( - MonitorMetric, - Power, - PowerContext, - Temperature, -) -from ....bench.monitoring_structs import ( - PowerCategories as PowerCat, -) -from ....utils import helpers as h -from ..vendor import BMC, Vendor +from hwbench.bench.monitoring_structs import MonitorMetric, Power, PowerContext, Temperature +from hwbench.bench.monitoring_structs import PowerCategories as PowerCat +from hwbench.environment.vendors.vendor import BMC, Vendor +from hwbench.utils import helpers as h class IDRAC(BMC): @@ -32,11 +25,10 @@ def read_thermals( name = t["Name"].split("Temp")[0].strip() pc = t["PhysicalContext"] - # Adding quirks on some models - if pc is None: - # On Gen14, some PhysicalContext are not provided, let's workaround that. - if "Inlet" in name: - pc = "Intake" + # Adding quirks on some models. + # On Gen14, some PhysicalContext are not provided, let's workaround that. + if pc is None and "Inlet" in name: + pc = "Intake" super().add_monitoring_value( cast(dict[str, dict[str, MonitorMetric]], thermals), diff --git a/hwbench/environment/vendors/detect.py b/hwbench/environment/vendors/detect.py index 45a1605..d8fde54 100644 --- a/hwbench/environment/vendors/detect.py +++ b/hwbench/environment/vendors/detect.py @@ -1,6 +1,7 @@ import pathlib -from ..dmi import DmiSys +from hwbench.environment.dmi import DmiSys + from .amd.amd import Amd from .dell.dell import Dell from .generic import GenericVendor diff --git a/hwbench/environment/vendors/hpe/hpe.py b/hwbench/environment/vendors/hpe/hpe.py index 78ab209..c02396d 100644 --- a/hwbench/environment/vendors/hpe/hpe.py +++ b/hwbench/environment/vendors/hpe/hpe.py @@ -6,17 +6,11 @@ from functools import cache from typing import cast -from ....bench.monitoring_structs import ( - MonitorMetric, - Power, - PowerContext, - Temperature, -) -from ....bench.monitoring_structs import ( - PowerCategories as PowerCat, -) -from ....utils import helpers as h -from ..vendor import BMC, Vendor +from hwbench.bench.monitoring_structs import MonitorMetric, Power, PowerContext, Temperature +from hwbench.bench.monitoring_structs import PowerCategories as PowerCat +from hwbench.environment.vendors.vendor import BMC, Vendor +from hwbench.utils import helpers as h + from .ilorest import ILOREST, Ilorest, IlorestServerclone @@ -176,7 +170,7 @@ def read_power_consumption(self, power_consumption: dict[str, dict[str, Power]] @cache def is_multinode_chassis(self) -> bool: - return True if self.get_redfish_url("/redfish/v1/Chassis/enclosurechassis/", log_failure=False) else False + return bool(self.get_redfish_url("/redfish/v1/Chassis/enclosurechassis/", log_failure=False)) def get_oem_chassis(self): if self.is_multinode_chassis(): diff --git a/hwbench/environment/vendors/hpe/ilorest.py b/hwbench/environment/vendors/hpe/ilorest.py index d552e4c..31ee7a8 100644 --- a/hwbench/environment/vendors/hpe/ilorest.py +++ b/hwbench/environment/vendors/hpe/ilorest.py @@ -1,8 +1,8 @@ import json import subprocess -from ....utils import helpers as h -from ....utils.external import External +from hwbench.utils import helpers as h +from hwbench.utils.external import External class Ilorest(External): diff --git a/hwbench/environment/vendors/mock.py b/hwbench/environment/vendors/mock.py index d15264a..82fd674 100644 --- a/hwbench/environment/vendors/mock.py +++ b/hwbench/environment/vendors/mock.py @@ -2,7 +2,7 @@ from typing import cast -from ...bench.monitoring_structs import ( +from hwbench.bench.monitoring_structs import ( FanContext, MonitorMetric, Power, @@ -11,6 +11,7 @@ Temperature, ThermalContext, ) + from .vendor import BMC, Vendor diff --git a/hwbench/environment/vendors/monitoring_device.py b/hwbench/environment/vendors/monitoring_device.py index e707d95..fc45797 100644 --- a/hwbench/environment/vendors/monitoring_device.py +++ b/hwbench/environment/vendors/monitoring_device.py @@ -5,10 +5,8 @@ import cachetools.func import redfish # type: ignore -from ...bench.monitoring_structs import ( - MonitorMetric, -) -from ...utils import helpers as h +from hwbench.bench.monitoring_structs import MonitorMetric +from hwbench.utils import helpers as h class MonitoringDevice: @@ -125,7 +123,9 @@ def get_redfish_url(self, url, log_failure=True): redfish = self.redfish_obj.get(url, None).dict # Let's ignore errors and return empty objects # It will be up to the caller to see there is no answer and process this - # {'error': {'code': 'iLO.0.10.ExtendedInfo', 'message': 'See @Message.ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageArgs': ['/redfish/v1/Chassis/enclosurechassis/'], 'MessageId': 'Base.1.4.ResourceMissingAtURI'}]}} + # {'error': + # {'code': 'iLO.0.10.ExtendedInfo', 'message': 'See @Message.ExtendedInfo for more information.', '@Message.ExtendedInfo': + # [{'MessageArgs': ['/redfish/v1/Chassis/enclosurechassis/'], 'MessageId': 'Base.1.4.ResourceMissingAtURI'}]}} if redfish and "error" in redfish: if log_failure: logging.error(f"Parsing redfish url {url} failed : {redfish}") diff --git a/hwbench/environment/vendors/pdu.py b/hwbench/environment/vendors/pdu.py index 84886ba..e15bca4 100644 --- a/hwbench/environment/vendors/pdu.py +++ b/hwbench/environment/vendors/pdu.py @@ -1,7 +1,8 @@ from __future__ import annotations -from ...bench.monitoring_structs import Power, PowerContext -from ...utils import helpers as h +from hwbench.bench.monitoring_structs import Power, PowerContext +from hwbench.utils import helpers as h + from .monitoring_device import MonitoringDevice diff --git a/hwbench/environment/vendors/pdus/raritan.py b/hwbench/environment/vendors/pdus/raritan.py index ef213b8..5e8fba2 100644 --- a/hwbench/environment/vendors/pdus/raritan.py +++ b/hwbench/environment/vendors/pdus/raritan.py @@ -1,8 +1,8 @@ from __future__ import annotations -from ....bench.monitoring_structs import Power, PowerContext -from ....utils import helpers as h -from ..pdu import PDU +from hwbench.bench.monitoring_structs import Power, PowerContext +from hwbench.environment.vendors.pdu import PDU +from hwbench.utils import helpers as h def init(vendor, pdu_section): diff --git a/hwbench/environment/vendors/vendor.py b/hwbench/environment/vendors/vendor.py index 6705fc1..cce45a4 100644 --- a/hwbench/environment/vendors/vendor.py +++ b/hwbench/environment/vendors/vendor.py @@ -4,7 +4,8 @@ import os from abc import ABC, abstractmethod -from ...utils import helpers as h +from hwbench.utils import helpers as h + from .bmc import BMC from .pdu import PDU diff --git a/hwbench/hwbench.py b/hwbench/hwbench.py index 0a2a01c..247e352 100755 --- a/hwbench/hwbench.py +++ b/hwbench/hwbench.py @@ -123,7 +123,7 @@ def default(self, o): def write_output(out_dir: pathlib.Path, out): out_file = out_dir / "results.json" - print(f"Result file available at {str(out_file)}") + print(f"Result file available at {out_file!s}") out_file.write_text(json.dumps(out, cls=EnhancedJSONEncoder)) diff --git a/hwbench/tuning/drop_caches.py b/hwbench/tuning/drop_caches.py index 4c3d0d5..0c00f70 100644 --- a/hwbench/tuning/drop_caches.py +++ b/hwbench/tuning/drop_caches.py @@ -1,6 +1,6 @@ from pathlib import Path -from ..utils.hwlogging import tunninglog +from hwbench.utils.hwlogging import tunninglog class SysctlDropCaches: diff --git a/hwbench/tuning/power_profile.py b/hwbench/tuning/power_profile.py index 54b48a6..778dcb8 100644 --- a/hwbench/tuning/power_profile.py +++ b/hwbench/tuning/power_profile.py @@ -2,7 +2,7 @@ import pathlib import re -from ..utils.hwlogging import tunninglog +from hwbench.utils.hwlogging import tunninglog class PerformancePowerProfile: diff --git a/hwbench/tuning/scheduler.py b/hwbench/tuning/scheduler.py index 271c316..451cc54 100644 --- a/hwbench/tuning/scheduler.py +++ b/hwbench/tuning/scheduler.py @@ -1,7 +1,7 @@ import os import pathlib -from ..utils.hwlogging import tunninglog +from hwbench.utils.hwlogging import tunninglog class IOScheduler: diff --git a/hwbench/tuning/setup.py b/hwbench/tuning/setup.py index ed1a828..ff1364b 100644 --- a/hwbench/tuning/setup.py +++ b/hwbench/tuning/setup.py @@ -1,5 +1,6 @@ -from ..utils.external import External_Simple -from ..utils.hwlogging import tunninglog +from hwbench.utils.external import External_Simple +from hwbench.utils.hwlogging import tunninglog + from .drop_caches import SysctlDropCaches from .power_profile import PerformancePowerProfile from .scheduler import MQDeadlineIOScheduler diff --git a/hwbench/tuning/turbo_boost.py b/hwbench/tuning/turbo_boost.py index c16ea2f..9a32557 100644 --- a/hwbench/tuning/turbo_boost.py +++ b/hwbench/tuning/turbo_boost.py @@ -1,7 +1,7 @@ import errno from pathlib import Path -from ..utils.hwlogging import tunninglog +from hwbench.utils.hwlogging import tunninglog class TurboBoost: diff --git a/hwbench/utils/archive.py b/hwbench/utils/archive.py index 98da6b8..6c73837 100644 --- a/hwbench/utils/archive.py +++ b/hwbench/utils/archive.py @@ -11,20 +11,19 @@ def create_tar_from_directory(dir: str, tarfilename: pathlib.Path) -> None: """create a tar archive from a directory and its subdirectories without following the symlinks.""" # may raise tarfile.ReadError if tarfilename is not a tar file - tarfd = tarfile.open(tarfilename, "x") - for rootpath, _dirnames, filenames in os.walk(dir): - for filename in filenames: - file = pathlib.Path(rootpath) / filename - try: - content = file.read_bytes() - except OSError as e: # ignore files that might not work at the kernel level - if e.errno not in [errno.EIO, errno.EINVAL, errno.EACCES]: - print(f"{file} is unreadable {e}") - continue - tf = tarfile.TarInfo(str(file)) - tf.size = len(content) - tarfd.addfile(tf, io.BytesIO(content)) - tarfd.close() + with tarfile.open(tarfilename, "x") as tarfd: + for rootpath, _dirnames, filenames in os.walk(dir): + for filename in filenames: + file = pathlib.Path(rootpath) / filename + try: + content = file.read_bytes() + except OSError as e: # ignore files that might not work at the kernel level + if e.errno not in [errno.EIO, errno.EINVAL, errno.EACCES]: + print(f"{file} is unreadable {e}") + continue + tf = tarfile.TarInfo(str(file)) + tf.size = len(content) + tarfd.addfile(tf, io.BytesIO(content)) return None @@ -32,18 +31,13 @@ def extract_file_from_tar(tarfilename: str, filename: str) -> bytes | None: """return a specific file in a tar archive as bytes if the file exists.""" # may raise tarfile.ReadError if tarfilename is not a tar file - tarfd = tarfile.open(tarfilename, "r") - try: + with tarfile.open(tarfilename, "r") as tarfd: file = tarfd.extractfile(filename) if not file: tarfd.close() return None ret = file.read(-1) - tarfd.close() return ret - except KeyError: - tarfd.close() - return None def copy_file(filename: str, destination_dir: str) -> None: diff --git a/pyproject.toml b/pyproject.toml index 94d8e70..11c12a4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,5 +55,16 @@ select = [ "F", # pyflakes "I", # isort "UP", # pyupgrade + "SIM", # flake8-simplify + "YTT", # flake8-2020 + "FA", # flake8-future-annotations + "ISC", # flake8-implicit-str-concat + "PYI", # flake8-pyi + "PT", # flake8-pytest-style + "TID", # flake8-tidy-imports + "ERA", # eradicate +] +ignore = [ + "E501", + "ISC001", ] -ignore = ["E501"]