Skip to content

Commit

Permalink
add plot benchmarks script
Browse files Browse the repository at this point in the history
  • Loading branch information
AmintorDusko committed Nov 9, 2023
1 parent 2bc4445 commit 70c185e
Show file tree
Hide file tree
Showing 3 changed files with 99 additions and 11 deletions.
18 changes: 8 additions & 10 deletions .github/workflows/unit-test-benchmarks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -172,32 +172,30 @@ jobs:
# If this is a reference benchmark and we don't hit the cache, move file to the cached directory.
- name: Move reference pytest benchmark file to cache
if: ${{ steps.continue.outputs.reference_benchmarks == 'true' && hashFiles('benchmarks.json') != ''}}
run: mkdir -p ${{ github.workspace}}/benchmark_reference && cp benchmarks.json "$_"
run: mkdir -p ${{ github.workspace }}/benchmark_reference && cp benchmarks.json "$_"

- name: Convert pytest benchmark JSON files to XUBM-JSON
if: ${{ steps.continue.outputs.local_benchmarks == 'true' }}
run: |
python ${{ github.workspace}}/scripts/benchmarks/convert_pytest_JSON_to_XUBM.py --author ${{ github.event.pull_request.user.login }}
python ${{ github.workspace }}/scripts/benchmarks/convert_pytest_JSON_to_XUBM.py --author ${{ github.event.pull_request.user.login }}
cd benchmark_reference
python ${{ github.workspace}}/scripts/benchmarks/convert_pytest_JSON_to_XUBM.py --author ${{ github.event.pull_request.user.login }}
python ${{ github.workspace }}/scripts/benchmarks/convert_pytest_JSON_to_XUBM.py --author ${{ github.event.pull_request.user.login }}
- name: checking 1
- name: Plotting benchmark graphs
if: ${{ steps.continue.outputs.local_benchmarks == 'true' }}
run: |
ls
python ${{ github.workspace }}/scripts/benchmarks/plot_benchmarks.py --graph_name ${{ inputs.benchmarks_name }}
- name: checking 2
- name: create directorie and move data to pe updated to an artifact
if: ${{ steps.continue.outputs.local_benchmarks == 'true' }}
run: |
cd benchmark_reference
ls
run: mkdir -p ${{ github.workspace }}/benchmark_results && cp benchmarks_xubm.json ${{ inputs.benchmarks_name }}.png "$_"

# If this is a PR benchmark, upload the data as an artifact.
- name: Upload PR pytest benchmark file
if: ${{ steps.continue.outputs.local_benchmarks == 'true' && hashFiles('benchmarks_xubm.json') != ''}}
uses: actions/upload-artifact@v3
with:
name: ${{ inputs.benchmarks_name }}
path: benchmarks_xubm.json
path: ${{ github.workspace }}/benchmark_results


1 change: 0 additions & 1 deletion scripts/benchmarks/convert_pytest_JSON_to_XUBM.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@ def create_benchmark_XUBM(data, args):
benchmark_xubm["gitID"] = data["commit_info"]["id"]
benchmark_xubm["runs"] = benchmark["stats"]["iterations"]
benchmark_xubm["params"] = benchmark["params"]
# benchmark_xubm["runtime"] = benchmark["stats"]["mean"] * (1 + random.uniform(-0.25, 0.25))
benchmark_xubm["runtime"] = benchmark["stats"]["mean"]
#Results are always in second:
benchmark_xubm["timeUnit"] = "seconds"
Expand Down
91 changes: 91 additions & 0 deletions scripts/benchmarks/plot_benchmarks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
"""
This module includes functionality to plot the branch benchmarks normalized by the reference.
"""
import argparse, json
import numpy as np

########################################################################
# Parsing arguments
########################################################################
def parse_args():
"""Parse external arguments provided to the script."""
parser = argparse.ArgumentParser()

parser.add_argument(
"--graph_name",
type=str,
default="Benchmark set",
nargs="?",
help="Name of the set of benchmarks.",
)

parser.add_argument(
"--filename_XUBM_ref",
type=str,
default="benchmark_reference/benchmarks_xubm.json",
nargs="?",
help="Name of the JSON-XUBM file with reference benchmarks.",
)

parser.add_argument(
"--filename_XUBM",
type=str,
default="benchmarks_xubm.json",
nargs="?",
help="Name of the JSON-XUBM file with most recent benchmarks.",
)

return parser.parse_args()

def format_plot_data(ref_data, data):

Check notice on line 40 in scripts/benchmarks/plot_benchmarks.py

View check run for this annotation

codefactor.io / CodeFactor

scripts/benchmarks/plot_benchmarks.py#L40

Redefining name 'ref_data' from outer scope (line 64) (redefined-outer-name)

Check notice on line 40 in scripts/benchmarks/plot_benchmarks.py

View check run for this annotation

codefactor.io / CodeFactor

scripts/benchmarks/plot_benchmarks.py#L40

Redefining name 'data' from outer scope (line 67) (redefined-outer-name)
"""Here we format the data coming from JSON files in two arrays with graph data.
Args:
ref_data (JSON-XUBM): reference benchmarks data
data (JSON-XUBM): local (or branch) benchmarks data
Returns:
tuple: data for x and y axis
"""

benchmark_ratios= []

Check notice on line 51 in scripts/benchmarks/plot_benchmarks.py

View check run for this annotation

codefactor.io / CodeFactor

scripts/benchmarks/plot_benchmarks.py#L51

Redefining name 'benchmark_ratios' from outer scope (line 69) (redefined-outer-name)
benchmark_names=[]

Check notice on line 52 in scripts/benchmarks/plot_benchmarks.py

View check run for this annotation

codefactor.io / CodeFactor

scripts/benchmarks/plot_benchmarks.py#L52

Redefining name 'benchmark_names' from outer scope (line 69) (redefined-outer-name)
for ref_benchmark, benchmark in zip(ref_data["xubm"], data["xubm"]):
if [ref_benchmark["name"] == benchmark["name"]]:
benchmark_names += [benchmark["name"], ]
benchmark_ratios += [benchmark["runtime"] / ref_benchmark["runtime"], ]

return benchmark_names, benchmark_ratios

if __name__ == "__main__":
args = parse_args()

with open(args.filename_XUBM_ref, 'r', encoding="utf-8") as file:
ref_data = json.load(file)

with open(args.filename_XUBM, 'r', encoding="utf-8") as file:
data = json.load(file)

benchmark_names, benchmark_ratios = format_plot_data(ref_data, data)

import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt

fig, ax = plt.subplots()

colormat=np.where(np.array(benchmark_ratios)>1.0, 'r','b')

ax.barh(benchmark_names, benchmark_ratios, color=colormat)

ax.axvline(x = 1.0, color = 'k', linestyle = '--', zorder=0)

ax.set_xlabel('runtime / reference runtime')
ax.set_title(args.graph_name)

regr_patch = mpatches.Patch(color='red', label='Regression')
prog_patch = mpatches.Patch(color='blue', label='Improvement')

plt.legend(title='Performance', handles=[regr_patch, prog_patch])

plt.savefig(args.graph_name+'.png', bbox_inches='tight')

0 comments on commit 70c185e

Please sign in to comment.