Skip to content

Commit

Permalink
[ruff] Enable extra plugins
Browse files Browse the repository at this point in the history
  • Loading branch information
Bastien Vallet committed Jan 8, 2025
1 parent 2c941d5 commit d68be36
Show file tree
Hide file tree
Showing 60 changed files with 242 additions and 226 deletions.
6 changes: 3 additions & 3 deletions csv/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@ def create_csv_power(out_file: pathlib.Path, data):
job_name = result.get("job_name", "")
job_number = result.get("job_number", "")
monitoring = result.get("monitoring", {})
for category in monitoring.keys():
for typ in monitoring[category].keys():
for category in monitoring:
for typ in monitoring[category]:
measures = monitoring[category][typ]
events = measures.get("events", [])
unit = measures.get("unit")
Expand Down Expand Up @@ -108,7 +108,7 @@ def print_memrates(out, results):
job_name = result.get("job_name", "")
job_number = result.get("job_number", "")
workers = result.get("workers")
for key in result.keys():
for key in result:
if isinstance(result[key], dict) and "sum_speed" in result[key]:
result_list.append(
{
Expand Down
4 changes: 2 additions & 2 deletions graph/chassis.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def get_marker(category: PowerCategories) -> str:
"Time [seconds]",
y_label,
outdir,
f"time_watt_{base_outfile}_by_{str(graph_type)}",
f"time_watt_{base_outfile}_by_{graph_type!s}",
)

if graph_type == PowerCategories.SERVERINCHASSIS:
Expand All @@ -126,7 +126,7 @@ def get_marker(category: PowerCategories) -> str:
y_serie = np.array(sum_serie_to_plot[str(component)])[order]
curve_label = str(component)
if component in [PowerCategories.SERVER, PowerCategories.SERVERINCHASSIS]:
curve_label = f"sum of {str(component)}"
curve_label = f"sum of {component!s}"
graph.get_ax().plot(x_serie, y_serie, "", label=curve_label, marker=get_marker(component))

for trace in args.traces:
Expand Down
14 changes: 7 additions & 7 deletions graph/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,22 +215,22 @@ def generic_graph(

components = bench.get_all_metrics(component_type, filter)
if not len(components):
title = f"{item_title}: no {str(component_type)} metric found"
title = f"{item_title}: no {component_type!s} metric found"
if filter:
title += f" with filter = '{filter}'"
return 0

samples_count = bench.get_samples_count()
unit = bench.get_metric_unit(component_type)
title = f'{item_title} during "{bench.get_bench_name()}" benchmark job\n' f"{args.title}\n" f"\n Stressor: "
title = f'{item_title} during "{bench.get_bench_name()}" benchmark job\n{args.title}\n\n Stressor: '
title += f"{bench.workers()} x {bench.get_title_engine_name()} for {bench.duration()} seconds"
title += f"\n{bench.get_system_title()}"
graph = Graph(
args,
title,
"Time [seconds]",
unit,
output_dir.joinpath(f"{trace.get_name()}/{bench.get_bench_name()}/{str(component_type)}"),
output_dir.joinpath(f"{trace.get_name()}/{bench.get_bench_name()}/{component_type!s}"),
outfile,
show_source_file=trace,
)
Expand All @@ -245,7 +245,7 @@ def generic_graph(

if args.verbose:
print(
f"{trace.get_name()}/{bench.get_bench_name()}: {len(components)} {str(component_type)} to graph with {samples_count} samples"
f"{trace.get_name()}/{bench.get_bench_name()}: {len(components)} {component_type!s} to graph with {samples_count} samples"
)

time_serie = []
Expand Down Expand Up @@ -280,7 +280,7 @@ def generic_graph(
data_serie[component.get_full_name()].append(component.get_mean()[sample])

if second_axis:
for _, entry in bench.get_monitoring_metric(second_axis).items():
for entry in bench.get_monitoring_metric(second_axis).values():
for sensor, measure in entry.items():
# We don't plot the Cores here
# We don't plot sensor on y2 if already plot on y1
Expand Down Expand Up @@ -366,7 +366,7 @@ def yerr_graph(
)
data_serie[MEAN].append(mean_value)

title = f'{prefix}{component.get_name()} during "{bench.get_bench_name()}" benchmark job\n' f"\n Stressor: "
title = f'{prefix}{component.get_name()} during "{bench.get_bench_name()}" benchmark job\n\n Stressor: '
title += f"{bench.workers()} x {bench.get_title_engine_name()} for {bench.duration()} seconds"
title += f"\n{bench.get_system_title()}"

Expand All @@ -375,7 +375,7 @@ def yerr_graph(
title,
"Time [seconds]",
unit,
output_dir.joinpath(f"{trace.get_name()}/{bench.get_bench_name()}/{str(component_type)}"),
output_dir.joinpath(f"{trace.get_name()}/{bench.get_bench_name()}/{component_type!s}"),
f"{prefix}{component.get_name()}",
show_source_file=trace,
)
Expand Down
11 changes: 4 additions & 7 deletions graph/individual.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def individual_graph(args, output_dir, job: str, traces_name: list) -> int:
benches = args.traces[0].get_benches_by_job_per_emp(job)
# For all subjobs sharing the same engine module parameter
# i.e int128
for emp in benches.keys():
for emp in benches:
aggregated_perfs = {} # type: dict[str, dict[str, Any]]
aggregated_perfs_watt = {} # type: dict[str, dict[str, Any]]
aggregated_watt = {} # type: dict[str, dict[str, Any]]
Expand All @@ -27,7 +27,7 @@ def individual_graph(args, output_dir, job: str, traces_name: list) -> int:
perf_list, unit = benches[emp]["metrics"]
# For each metric we need to plot
for perf in perf_list:
if perf not in aggregated_perfs.keys():
if perf not in aggregated_perfs:
aggregated_perfs[perf] = {}
aggregated_perfs_watt[perf] = {}
aggregated_watt[perf] = {}
Expand All @@ -52,7 +52,7 @@ def individual_graph(args, output_dir, job: str, traces_name: list) -> int:
for trace in args.traces:
# Let's iterate on each Bench from this trace file matching this em
for bench in trace.get_benches_by_job_per_emp(job)[emp]["bench"]:
if bench.workers() not in aggregated_perfs[perf].keys():
if bench.workers() not in aggregated_perfs[perf]:
# If the worker count is not known yet, let's init all structures with as much zeros as the number of traces
# This will be the default value in case of the host doesn't have performance results
aggregated_perfs[perf][bench.workers()] = [0] * len(traces_name)
Expand Down Expand Up @@ -90,10 +90,7 @@ def individual_graph(args, output_dir, job: str, traces_name: list) -> int:

# Let's define the tree architecture based on the benchmark profile
# If the benchmark has multiple performance results, let's put them in a specific directory
if len(perf_list) > 1:
outdir = outdir.joinpath(emp, perf)
else:
outdir = outdir.joinpath(emp)
outdir = outdir.joinpath(emp, perf) if len(perf_list) > 1 else outdir.joinpath(emp)

# Select the proper datasource and titles/labels regarding the graph type
if graph_type == "perf_watt":
Expand Down
8 changes: 4 additions & 4 deletions graph/scaling.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def scaling_graph(args, output_dir, job: str, traces_name: list) -> int:
print(f"Scaling: working on job '{job}' : {len(benches.keys())} engine_module_parameter to render")
# For all subjobs sharing the same engine module parameter
# i.e int128
for emp in benches.keys():
for emp in benches:
aggregated_perfs = {} # type: dict[str, dict[str, Any]]
aggregated_perfs_watt = {} # type: dict[str, dict[str, Any]]
aggregated_watt = {} # type: dict[str, dict[str, Any]]
Expand All @@ -38,7 +38,7 @@ def scaling_graph(args, output_dir, job: str, traces_name: list) -> int:

# For each metric we need to plot
for perf in perf_list:
if perf not in aggregated_perfs.keys():
if perf not in aggregated_perfs:
aggregated_perfs[perf] = {}
aggregated_perfs_watt[perf] = {}
aggregated_watt[perf] = {}
Expand All @@ -62,7 +62,7 @@ def scaling_graph(args, output_dir, job: str, traces_name: list) -> int:

# for each performance metric we have to plot,
# let's prepare the data set to plot
if trace.get_name() not in aggregated_perfs[perf].keys():
if trace.get_name() not in aggregated_perfs[perf]:
aggregated_perfs[perf][trace.get_name()] = []
aggregated_perfs_watt[perf][trace.get_name()] = []
aggregated_watt[perf][trace.get_name()] = []
Expand Down Expand Up @@ -112,7 +112,7 @@ def scaling_graph(args, output_dir, job: str, traces_name: list) -> int:
outfile = f"scaling_{clean_perf}_{bench.get_title_engine_name().replace(' ','_')}"
y_source = aggregated_perfs

title = f'{args.title}\n\n{graph_type_title} via "{job}" benchmark job\n' f"\n Stressor: "
title = f'{args.title}\n\n{graph_type_title} via "{job}" benchmark job\n\n Stressor: '
title += f"{bench.get_title_engine_name()} for {bench.duration()} seconds"
xlabel = "Workers"
# If we have a constent ratio between cores & workers, let's report them under the Xaxis
Expand Down
35 changes: 18 additions & 17 deletions graph/trace.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import json
import pathlib
from statistics import mean
from typing import Any # noqa: F401
from typing import Any

from graph.common import fatal
from hwbench.bench.monitoring_structs import (
Expand All @@ -19,6 +19,13 @@
MEAN = "mean"


METRIC_AXIs = {
"Percent": (100, 10, 5),
"RPM": (21000, 1000, 250),
"Celsius": (110, 10, 5),
}


class Bench:
def __init__(self, trace, bench_name: str):
self.trace = trace
Expand Down Expand Up @@ -68,12 +75,12 @@ def load_monitoring(self):
self.metrics = {}
m = self.get_monitoring()
if m:
for metric in m.keys():
for metric in m:
if metric in MonitoringMetadata.list_str():
self.metrics[metric] = m[metric]
elif metric in Metrics.list_str():
self.metrics[metric] = {}
for component_family in m[metric].keys():
for component_family in m[metric]:
self.metrics[metric][component_family] = {}
for measure in m[metric][component_family]:
original_measure = m[metric][component_family][measure]
Expand All @@ -100,14 +107,7 @@ def get_monitoring_metric_by_name(self, metric: Metrics, metric_name: str) -> Mo

def get_monitoring_metric_axis(self, unit: str) -> tuple[Any, Any, Any]:
"""Return adjusted metric axis values"""
# return y_max, y_major_tick, y_minor_tick
if unit == "Percent":
return 100, 10, 5
elif unit == "RPM":
return 21000, 1000, 250
elif unit == "Celsius":
return 110, 10, 5
return None, None, None
return METRIC_AXIs.get(unit, (None, None, None))

def get_component(self, metric_type: Metrics, component: Any) -> dict[str, MonitorMetric]:
return self.get_monitoring_metric(metric_type)[str(component)]
Expand Down Expand Up @@ -153,8 +153,8 @@ def get_system_title(self):
d = self.get_trace().get_dmi()
c = self.get_trace().get_cpu()
k = self.get_trace().get_kernel()
title = f"System: {d['serial']} {d['product']} Bios " f"v{d['bios']['version']} Linux Kernel {k['release']}"
title += f"\nProcessor: {c['model']} with {c['physical_cores']} cores " f"and {c['numa_domains']} NUMA domains"
title = f"System: {d['serial']} {d['product']} Bios v{d['bios']['version']} Linux Kernel {k['release']}"
title += f"\nProcessor: {c['model']} with {c['physical_cores']} cores and {c['numa_domains']} NUMA domains"
return title

def job_name(self) -> str:
Expand Down Expand Up @@ -449,10 +449,11 @@ def validate(self) -> None:
def _list_power_metrics(self) -> list[str]:
first_bench = self.first_bench()
first_bench.load_monitoring()
power_metrics = []
for name, value in first_bench.get_monitoring_metric(Metrics.POWER_CONSUMPTION).items():
for v in value:
power_metrics.append(f"{name}.{v}")
power_metrics = [
f"{name}.{v}"
for name, value in first_bench.get_monitoring_metric(Metrics.POWER_CONSUMPTION).items()
for v in value
]
return power_metrics

def list_power_metrics(self):
Expand Down
9 changes: 5 additions & 4 deletions hwbench/bench/benchmark.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import time
from typing import Any

from ..utils import helpers as h
from ..utils.external import External
from hwbench.utils import helpers as h
from hwbench.utils.external import External

from .engine import EngineModuleBase
from .parameters import BenchmarkParameters

Expand Down Expand Up @@ -50,7 +51,7 @@ def validate_parameters(self):
p = self.get_parameters()
error = e.validate_module_parameters(p)
if error:
h.fatal(f"Unsupported parameter for {e.get_engine().get_name()}/" f"{e.get_name()}: {error}")
h.fatal(f"Unsupported parameter for {e.get_engine().get_name()}/{e.get_name()}: {error}")

def run(self):
e = self.get_enginemodule()
Expand Down Expand Up @@ -93,7 +94,7 @@ def fully_skipped_job(self) -> bool:
if not self.skip:
return False

if self.parameters.get_skip_method() == "wait":
if self.parameters.get_skip_method() == "wait": # noqa: SIM103
# The job is skipped but we were asked to make a no-op run
return False

Expand Down
5 changes: 3 additions & 2 deletions hwbench/bench/benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,9 @@
import time
from datetime import timedelta

from ..environment.hardware import BaseHardware
from ..utils import helpers as h
from hwbench.environment.hardware import BaseHardware
from hwbench.utils import helpers as h

from .benchmark import Benchmark
from .monitoring import Monitoring
from .parameters import BenchmarkParameters
Expand Down
9 changes: 5 additions & 4 deletions hwbench/bench/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@
import abc
import pathlib

from ..utils.external import External
from ..utils.helpers import fatal
from hwbench.utils.external import External
from hwbench.utils.helpers import fatal

from .parameters import BenchmarkParameters


Expand Down Expand Up @@ -51,7 +52,7 @@ def __init__(self, name: str, binary: str, modules: dict[str, EngineModuleBase]
self.modules = modules
# FIXME: If the import is done at the file level, the mocking is lost here
# So I'm importing is_binary_available just before the call :/
from ..utils.helpers import is_binary_available
from hwbench.utils.helpers import is_binary_available

if not is_binary_available(self.binary):
fatal(f"Engine {name} requires '{binary}' binary, please install it.")
Expand All @@ -76,4 +77,4 @@ def get_module(self, module_name: str) -> EngineModuleBase | None:
return self.modules.get(module_name)

def module_exists(self, module_name) -> bool:
return module_name in self.modules.keys()
return module_name in self.modules
11 changes: 6 additions & 5 deletions hwbench/bench/monitoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@
from threading import Thread
from typing import Any

from ..environment.hardware import BaseHardware
from ..environment.turbostat import Turbostat
from ..utils import helpers as h
from hwbench.environment.hardware import BaseHardware
from hwbench.environment.turbostat import Turbostat
from hwbench.utils import helpers as h

from .monitoring_structs import Metrics, MonitoringMetadata, MonitorMetric


Expand Down Expand Up @@ -63,10 +64,10 @@ def prepare(self):
def check_monitoring(source: str, metric: Metrics):
data = self.get_metric(metric)
if not len(data):
h.fatal(f"Cannot detect {str(metric)} metrics")
h.fatal(f"Cannot detect {metric!s} metrics")

print(
f"Monitoring/{source}: {str(metric)} metrics:"
f"Monitoring/{source}: {metric!s} metrics:"
+ ", ".join([f"{len(data[pc])}x{pc}" for pc in data if len(data[pc]) > 0])
)

Expand Down
3 changes: 2 additions & 1 deletion hwbench/bench/parameters.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import pathlib

from ..environment.hardware import BaseHardware
from hwbench.environment.hardware import BaseHardware

from .monitoring import Monitoring


Expand Down
11 changes: 5 additions & 6 deletions hwbench/bench/test_benchmarks.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import pathlib
from unittest.mock import patch

import pytest

from . import test_benchmarks_common as tbc
from .monitoring_structs import Metrics

Expand Down Expand Up @@ -45,16 +47,13 @@ def test_parsing(self):
for job in range(196, 199):
self.assert_job(job, "check_physical_core_int8_perf", "cpu", "int8")
# Ensure the auto syntax updated the number of engine instances
if job == 198:
instances = 4
else:
instances = 2
instances = 4 if job == 198 else 2
assert self.get_bench_parameters(job).get_engine_instances_count() == instances

group_count = 0
for job in range(199, 203):
group_count += 2
self.assert_job(job, "check_physical_core_scale_plus_1_int8_perf", "cpu", "int8") # noqa: E501
self.assert_job(job, "check_physical_core_scale_plus_1_int8_perf", "cpu", "int8")
assert self.get_bench_parameters(job).get_engine_instances_count() == group_count
assert len(self.get_bench_parameters(job).get_pinned_cpu()) == group_count

Expand Down Expand Up @@ -89,7 +88,7 @@ def test_stream_short(self):
self.load_benches("./hwbench/config/stream.ini")
assert self.get_jobs_config().get_config().getint("global", "runtime") == 5
self.get_jobs_config().get_config().set("global", "runtime", "2")
with self.assertRaises(SystemExit):
with pytest.raises(SystemExit):
self.parse_jobs_config()
# This jobs_config file doesn't need monitoring
assert self.benches.need_monitoring() is False
Loading

0 comments on commit d68be36

Please sign in to comment.