Skip to content

Commit

Permalink
Merge pull request #204 from feelpp/160-separate-plots-config-from-be…
Browse files Browse the repository at this point in the history
…nchmark

resolves : 160 separate plots config from benchmark
  • Loading branch information
JavierCladellas authored Dec 20, 2024
2 parents 94f9f46 + 160673e commit 0887ee2
Show file tree
Hide file tree
Showing 9 changed files with 152 additions and 42 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ jobs:
- name: Dry-run reports
run: |
source .venv/bin/activate
render-benchmarks --config_file reports/website_config.json
render-benchmarks --config-file reports/website_config.json
- name: Check files
run: | #TODO: check if not empty (maybe)
nb_rfm_report_files=$(ls -1q reports/parallelSum/parallel_sum/gaya|wc -l)
Expand Down Expand Up @@ -109,7 +109,7 @@ jobs:
- name: Render reports
run: |
source .venv/bin/activate
render-benchmarks --config_file=./tmp/website_config.json
render-benchmarks --config-file=./tmp/website_config.json
env:
GIRDER_API_KEY: ${{ secrets.GIRDER }}
- name: Build Antora Site
Expand Down
6 changes: 3 additions & 3 deletions docs/modules/tutorial/pages/gettingstarted.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,6 @@ Once this file is located, users can run the `render-benchmarks` command to rend

The script takes the following arguments:

- `config_file` : The path of the website configuration file.
- `json_output_path`: [Optional] Path of the directory to download the reports to. Only relevant if the configuration file contains remote locations (only Girder is supported at the moment).
- `modules_path`: [Optional] Path to the Antora module to render the reports to. It defaults to _docs/modules/ROOT/pages_. Multiple directories will be recursively created under the provided path.
- `config-file` : The path of the website configuration file.
- `remote-download-dir`: [Optional] Path of the directory to download the reports to. Only relevant if the configuration file contains remote locations (only Girder is supported at the moment).
- `modules-path`: [Optional] Path to the Antora module to render the reports to. It defaults to _docs/modules/ROOT/pages_. Multiple directories will be recursively created under the provided path.
2 changes: 1 addition & 1 deletion netlify-build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,5 +17,5 @@ else
echo "Downloading Production benchmarks"
girder-download -gid $production_website_config_id -o ./tmp/ -fn website_config.json
fi
render-benchmarks --config_file=./tmp/website_config.json
render-benchmarks --config-file=./tmp/website_config.json
npx antora --stacktrace generate --cache-dir cache --clean --html-url-extension-style=indexify site.yml
2 changes: 1 addition & 1 deletion src/feelpp/benchmarking/reframe/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def main_cli():
#======================================================#

if parser.args.website:
subprocess.run(["render-benchmarks","--config_file", website_config.config_filepath])
subprocess.run(["render-benchmarks","--config-file", website_config.config_filepath])
subprocess.run(["npm","run","antora"])
subprocess.run(["npm","run","start"])

Expand Down
53 changes: 20 additions & 33 deletions src/feelpp/benchmarking/report/__main__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import argparse, os, json
import os, json, subprocess

from feelpp.benchmarking.report.config.handlers import ConfigHandler, GirderHandler
from feelpp.benchmarking.report.atomicReports.repository import AtomicReportRepository
Expand All @@ -7,21 +7,15 @@
from feelpp.benchmarking.report.useCases.repository import UseCaseRepository

from feelpp.benchmarking.report.renderer import RendererFactory

from feelpp.benchmarking.report.parser import ReportArgParser


def main_cli():
parser = argparse.ArgumentParser(description="Render all benchmarking reports")
parser.add_argument("--config_file", type=str, help="Path to the JSON config file", default="./src/feelpp/benchmarking/report/config/config.json")
parser.add_argument("--json_output_path", type=str, help="Path to the output directory", default="reports")
parser.add_argument("--modules_path", type=str, help="Path to the modules directory", default="./docs/modules/ROOT/pages")
args = parser.parse_args()

# Arguments treatment
json_output_path = args.json_output_path[:-1] if args.json_output_path[-1] == "/" else args.json_output_path
parser = ReportArgParser()
parser.printArgs()

config_handler = ConfigHandler(args.config_file)
girder_handler = GirderHandler(json_output_path)
config_handler = ConfigHandler(parser.args.config_file)
girder_handler = GirderHandler(parser.args.remote_download_dir)

applications = ApplicationRepository(config_handler.applications)
use_cases = UseCaseRepository(config_handler.use_cases)
Expand All @@ -36,31 +30,24 @@ def main_cli():
index_renderer = RendererFactory.create("index")
overview_renderer = RendererFactory.create("atomic_overview")

with open("./src/feelpp/benchmarking/report/config/overviewConfig.json","r") as f:
with open(parser.args.overview_config,"r") as f:
overview_config = json.load(f)

print("----- APPLICATIONS VIEW -------")
applications.printHierarchy()
applications.initModules(args.modules_path, index_renderer, parent_id="catalog-index")
applications.initOverviewModels(overview_config)
applications.createOverviews(args.modules_path,overview_renderer)
print("-------------------------------")
if parser.args.plot_configs:
atomic_reports.patchPlotConfigs(parser.args.plot_configs, parser.args.patch_reports, parser.args.save_patches)

print("----- MACHINES VIEW -------")
machines.printHierarchy()
machines.initModules(args.modules_path, index_renderer, parent_id="catalog-index")
machines.initOverviewModels(overview_config)
machines.createOverviews(args.modules_path,overview_renderer)
print("-------------------------------")
for repository in [applications,machines,use_cases]:
repository.printHierarchy()
repository.initModules(parser.args.modules_path, index_renderer, parent_id="catalog-index")
repository.initOverviewModels(overview_config)
repository.createOverviews(parser.args.modules_path,overview_renderer)

print("----- USE CASES VIEW -------")
use_cases.printHierarchy()
use_cases.initModules(args.modules_path, index_renderer, parent_id="catalog-index")
use_cases.initOverviewModels(overview_config)
use_cases.createOverviews(args.modules_path,overview_renderer)
print("-------------------------------")

report_renderer = RendererFactory.create("benchmark")

atomic_reports.movePartials(os.path.join(args.modules_path,"descriptions"))
atomic_reports.createReports(os.path.join(args.modules_path,"reports"),report_renderer)
atomic_reports.movePartials(os.path.join(parser.args.modules_path,"descriptions"))
atomic_reports.createReports(os.path.join(parser.args.modules_path,"reports"),report_renderer)

if parser.args.website:
subprocess.run(["npm","run","antora"])
subprocess.run(["npm","run","start"])
8 changes: 8 additions & 0 deletions src/feelpp/benchmarking/report/atomicReports/atomicReport.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ def __init__(self, application_id, machine_id, use_case_id, reframe_report_json,
partials_dir (str): The directory path where parametric descriptions of the use case can be found (usually comes with the reframe report). Pass None if non-existent
"""
data = self.parseJson(reframe_report_json)
self.plots_config_path = plot_config_json
self.plots_config = self.parseJson(plot_config_json)
self.partials_dir = partials_dir

Expand All @@ -41,6 +42,13 @@ def __init__(self, application_id, machine_id, use_case_id, reframe_report_json,

self.model = AtomicReportModel(self.runs)

def replacePlotsConfig(self,plot_config_json,save=False):
print(f"Patching plots for {self.machine_id}-{self.application_id}-{self.use_case_id}-{self.date} with {plot_config_json}")
self.plots_config = self.parseJson(plot_config_json)["plots"]
if save:
with open(self.plots_config_path, "w") as old_f:
json.dump(self.plots_config,old_f)

def setIndexes(self, application, machine, use_case):
""" Set the indexes for the atomic report.
Along with the date, they should form a unique identifier for the report.
Expand Down
49 changes: 48 additions & 1 deletion src/feelpp/benchmarking/report/atomicReports/repository.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from feelpp.benchmarking.report.atomicReports.atomicReport import AtomicReport
from feelpp.benchmarking.report.base.repository import Repository
import os
from datetime import datetime

class AtomicReportRepository(Repository):
""" Repository for atomic reports """
Expand Down Expand Up @@ -144,4 +145,50 @@ def movePartials(self,base_dir):
os.mkdir(base_dir)

for atomic_report in self.data:
atomic_report.movePartials(base_dir)
atomic_report.movePartials(base_dir)


def patchPlotConfigs(self,plot_configs, patch_reports_ids, save = False):
""" Replaces the plot configuration with a new one.
TODO: explain Cases (1 plot_config; many patches, ...)
Args:
plot_configs (list[str]): list of filepaths containing the new plot configuration.
patch_reports_ids (list[str] ): list of report ids to filter patching, following the syntax machine-application-usecase-date. The date componenent accept the 'latest' keyword, and the application, use case and date component accept the 'all' keyword. If the list is empty, the latest report will be patched.
save (bool): If true, it will replace the file contents of the old plots configuration
"""
if plot_configs:
if not patch_reports_ids: # 1 plot config, No reports to patch (select latest)
if len(plot_configs)>1:
raise ValueError("When no patch reports are provided, plot configuration should be of length one")
latest_report = max(self.data, key=lambda report: datetime.strptime(report.date, "%Y-%m-%dT%H:%M:%S%z"))
latest_report.replacePlotsConfig(plot_configs[0], save)
else:
for i,patch_report in enumerate(patch_reports_ids):
#Filter reports based on ids
patch_machine, patch_application, patch_usecase, patch_date = patch_report
patch_machine_reports = list(filter(lambda x: x.machine_id == patch_machine, self.data))

if patch_application == "all":
patch_application_reports = patch_machine_reports
else:
patch_application_reports = list(filter(lambda x: x.application_id == patch_application, patch_machine_reports))

if patch_usecase == "all":
patch_usecase_reports = patch_application_reports
else:
patch_usecase_reports = list(filter(lambda x: x.use_case_id == patch_usecase, patch_application_reports))

if patch_date == "all":
reports_to_patch = patch_usecase_reports
elif patch_date == "latest":
reports_to_patch = [max(patch_usecase_reports, key=lambda report: datetime.strptime(report.date, "%Y-%m-%dT%H:%M:%S%z"))]
else:
reports_to_patch = list(filter(lambda x: datetime.strptime(x.date,"%Y-%m-%dT%H:%M:%S%z").strftime("%Y_%m_%dT%H_%M_%S") == patch_date, patch_usecase_reports))

for report_to_patch in reports_to_patch:
#1 plot config, many reports to patch
#Same number of plot config as reports to patch
plot_config = plot_configs[i] if len(patch_reports_ids) == len(plot_configs) else plot_configs[0] if len(plot_configs) == 1 else False
if not plot_config:
raise ValueError("Plots configuration must be either of length 1 or exactly the same lenght as patches")
report_to_patch.replacePlotsConfig(plot_config,save)
68 changes: 68 additions & 0 deletions src/feelpp/benchmarking/report/parser.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
from argparse import ArgumentParser, RawTextHelpFormatter
from feelpp.benchmarking.reframe.parser import CustomHelpFormatter
import os, shutil


#TODO: Factorize with feelpp.reframe parser
class ReportArgParser():
""" Class for parsing and validating command-line arguments for the report module"""
def __init__(self):
self.parser = ArgumentParser(formatter_class=CustomHelpFormatter, add_help=False,description="Render benchmarking reports")
self.addArgs()
self.args = self.parser.parse_args()
self.validate()
self.normalizePaths()

def addArgs(self):
""" Add arguments to the parser """
self.parser.add_argument("--config-file", "-c", type=str, help="Path to the JSON config file", default="./reports/website_config.json")
self.parser.add_argument("--remote-download-dir", "-do", type=str, help="Path to the output directory where remote reports will be downloaded", default="reports")
self.parser.add_argument("--modules-path", "-m", type=str, help="Path to the modules directory where reports will be rendered", default="./docs/modules/ROOT/pages")
self.parser.add_argument("--overview-config", "-oc", type=str, help="Path to the overview configuration file", default="./src/feelpp/benchmarking/report/config/overviewConfig.json"),
self.parser.add_argument("--plot-configs", "-pc", type=str, nargs='+',default=[], action='extend', help="Path the a plot configuration to use for a given benchmark. To be used along with --patch-reports")
self.parser.add_argument("--patch-reports","-pr", type=str, nargs='+',default=[], action='extend', help="Id of the reports to path, the syntax of the id is machine:application:usecase:date e.g. gaya:feelpp_app:my_use_case:2024_11_05T01_05_32. It is possible to affect all reports in a component by replacing the machine, application, use_case or date by 'all'. Also, one can indicate to patch the latest report by replacing the date by 'latest'. If this option is not provided but plot-configs is, then the latest report will be patched (most recent report date)")
self.parser.add_argument("--save-patches","-sp", action='store_true', help="If this flag is active, existing plot configurations will be replaced with the ones provided in patch-reports.")
self.parser.add_argument("--website","-w", action='store_true', help="Compile documentation and start HTTP server with benchmark reports")

def validate(self):
""" Validate specific options """
self.checkDirectoriesExist()

if self.args.patch_reports:
for patch_report in self.args.patch_reports:
splitted_patch = patch_report.split(":")
if len(splitted_patch) != 4:
raise ValueError(f"The ID syntaxt is incorrect ({patch_report})")
machine, app, use_case, date = splitted_patch
if "latest" in [machine,app,use_case]:
raise ValueError("Latest not accepted for that component")
if machine == "all":
raise ValueError("The machine component patch does not support the 'all' keyworkd")

self.args.patch_reports = [patch_report.split(":") for patch_report in self.args.patch_reports]

def checkDirectoriesExist(self):
""" Check that directories passed as arguments exist in the filesystem"""
for filepath in [self.args.config_file, self.args.overview_config,self.args.modules_path]:
if not os.path.exists(filepath):
raise FileNotFoundError(f"File not found ({filepath})")

for file in self.args.plot_configs:
if not os.path.exists(file):
raise FileNotFoundError(f"File not found ({file})")

def normalizePaths(self):
"""Normalize paths passed as arguments"""
self.args.config_file = os.path.normpath(self.args.config_file)
self.args.remote_download_dir = os.path.normpath(self.args.remote_download_dir)
self.args.modules_path = os.path.normpath(self.args.modules_path)
self.args.overview_config = os.path.normpath(self.args.overview_config)
self.args.plot_configs = [os.path.normpath(plot_config) for plot_config in self.args.plot_configs]


def printArgs(self):
""" Prints arguments on the standard output"""
print("\n[Loaded command-line options]")
for arg in vars(self.args):
print(f"\t > {arg + ':' :<{20}} {getattr(self.args, arg)}")
print("\n" + '=' * shutil.get_terminal_size().columns)
2 changes: 1 addition & 1 deletion src/feelpp/benchmarking/report/templates/benchmark.adoc.j2
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
:page-jupyter: true
:page-tags: toolbox, catalog
:parent-catalogs: {{parent_catalogs}}
:description: Performance report for {{ machine_display_name }} on {{ session_info.time_end }}
:description: Performance report for {{ machine_display_name }} on {{ session_info.time_start }}
:page-illustration: {{ machine_id }}.jpg
:author: Your Name
:revdate: {{ session_info.time_end }}
Expand Down

0 comments on commit 0887ee2

Please sign in to comment.