diff --git a/caffeination/microbenchmarks/bench_background_jobs.py b/caffeination/microbenchmarks/bench_background_jobs.py new file mode 100644 index 0000000..1ce7ff0 --- /dev/null +++ b/caffeination/microbenchmarks/bench_background_jobs.py @@ -0,0 +1,9 @@ +import frappe +from frappe.utils import execute_in_shell + + +def bench_bg_job_overheads(): + for _ in range(100): + frappe.enqueue(frappe.ping) + _, stderr = execute_in_shell("bench worker --burst", check_exit_code=True) + assert b"Job OK" in stderr diff --git a/caffeination/microbenchmarks/bench_redis.py b/caffeination/microbenchmarks/bench_redis.py index 63d8229..692f622 100644 --- a/caffeination/microbenchmarks/bench_redis.py +++ b/caffeination/microbenchmarks/bench_redis.py @@ -10,13 +10,14 @@ def bench_make_key(): return keys -def bench_set_value(): +def bench_redis_get_set_delete_cycle(): for dt in get_all_doctypes(): key = f"_test_set_value:{dt}" frappe.cache.set_value(key, cached_get_doc(dt), expires_in_sec=30) assert frappe.cache.exists(key) assert frappe.cache.get_value(key).name == dt - frappe.local.cache.clear() + frappe.cache.delete_value(key) + assert not frappe.cache.exists(key) @lru_cache diff --git a/caffeination/microbenchmarks/bench_web_requests.py b/caffeination/microbenchmarks/bench_web_requests.py new file mode 100644 index 0000000..c2d9908 --- /dev/null +++ b/caffeination/microbenchmarks/bench_web_requests.py @@ -0,0 +1,17 @@ +from functools import lru_cache + +import frappe +from frappe.app import application as _trigger_imports +from frappe.utils import get_test_client + + +def bench_request_overheads(): + client = get_test_client() + for _ in range(100): + resp = client.get("/api/method/ping", headers={"X-Frappe-Site-Name": get_site()}) + assert resp.status_code == 200 + + +@lru_cache +def get_site(): + return frappe.local.site diff --git a/caffeination/microbenchmarks/run_benchmarks.py b/caffeination/microbenchmarks/run_benchmarks.py index 9701bd1..1dda00e 100755 --- a/caffeination/microbenchmarks/run_benchmarks.py +++ b/caffeination/microbenchmarks/run_benchmarks.py @@ -1,45 +1,69 @@ #!/bin/env python3 import inspect -import os from types import FunctionType import frappe import pyperf +from frappe.utils import cstr -from caffeination.microbenchmarks import bench_database, bench_orm, bench_redis +from caffeination.microbenchmarks import ( + bench_background_jobs, + bench_database, + bench_orm, + bench_redis, + bench_web_requests, +) BENCHMARK_PREFIX = "bench_" -BENCHMARK_SITE = os.environ.get("FRAPPE_BENCHMARK_SITE") or "bench.localhost" def run_microbenchmarks(): - benchmarks = discover_benchmarks() + def update_cmd_line(cmd, args): + # Pass our added arguments to workers + cmd.extend(["--site", args.site]) + cmd.extend(["--filter", args.benchmark_filter]) - frappe.init(BENCHMARK_SITE) - frappe.connect() + runner = pyperf.Runner(add_cmdline_args=update_cmd_line) + + runner.argparser.add_argument( + "--filter", + dest="benchmark_filter", + help="Apply a filter to selectively run benchmarks. This is a substring filter.", + ) + runner.argparser.add_argument( + "--site", dest="site", help="Frappe site to use for benchmark", required=True + ) - runner = pyperf.Runner() + args = runner.argparser.parse_args() + benchmarks = discover_benchmarks(args.benchmark_filter) + setup(args.site) for name, func in benchmarks: runner.bench_func(name, func) + teardown(args.site) + + +def setup(site): + frappe.init(site) + frappe.connect() + +def teardown(site): frappe.destroy() -def discover_benchmarks(): - benchmark_modules = [ - bench_orm, - bench_database, - bench_redis, - ] +def discover_benchmarks(benchmark_filter=None): + benchmark_filter = cstr(benchmark_filter) + benchmark_modules = [bench_orm, bench_database, bench_redis, bench_background_jobs, bench_web_requests] benchmarks = [] for module in benchmark_modules: module_name = module.__name__.split(".")[-1] for fn_name, fn in inspect.getmembers(module, predicate=lambda x: isinstance(x, FunctionType)): if fn_name.startswith(BENCHMARK_PREFIX): - unique_name = f"{module_name}_{fn.__name__.removeprefix(BENCHMARK_PREFIX)}" - benchmarks.append((unique_name, fn)) + unique_name = f"{module_name}_{fn.__name__}" + if benchmark_filter in unique_name: + benchmarks.append((unique_name, fn)) return sorted(benchmarks, key=lambda x: x[0])