Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Memory profiling #658

Merged
merged 5 commits into from
Oct 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions .github/workflows/bench.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,11 @@ jobs:
run: |
docker run --privileged -v ${{ github.workspace }}/crates/benches/binary:/benches tlsn-bench

- name: Upload runtime_vs_latency.html
- name: Upload graphs
uses: actions/upload-artifact@v4
with:
name: benchmark_graphs
path: |
./crates/benches/binary/runtime_vs_latency.html
./crates/benches/binary/runtime_vs_bandwidth.html
./crates/benches/binary/runtime_vs_bandwidth.html
./crates/benches/binary/download_size_vs_memory.html
27 changes: 26 additions & 1 deletion crates/benches/binary/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,20 @@ version = "0.0.0"

[features]
default = []
# Enables benchmarks in the browser.
browser-bench = ["tlsn-benches-browser-native"]

[dependencies]
mpz-common = { workspace = true }
mpz-core = { workspace = true }
mpz-garble = { workspace = true }
mpz-ot = { workspace = true, features = ["ideal"] }
tlsn-benches-library = { workspace = true }
tlsn-benches-browser-native = { workspace = true, optional = true}
tlsn-common = { workspace = true }
tlsn-core = { workspace = true }
tlsn-hmac-sha256 = { workspace = true }
tlsn-prover = { workspace = true }
tlsn-server-fixture = { workspace = true }
tlsn-server-fixture-certs = { workspace = true }
tlsn-tls-core = { workspace = true }
Expand All @@ -22,8 +29,18 @@ anyhow = { workspace = true }
async-trait = { workspace = true }
charming = {version = "0.3.1", features = ["ssr"]}
csv = "1.3.0"
dhat = { version = "0.3.3" }
env_logger = { version = "0.6.0", default-features = false }
futures = { workspace = true }
serde = { workspace = true }
tokio = { workspace = true, features = ["rt", "rt-multi-thread", "macros", "net", "io-std"]}
tokio = { workspace = true, features = [
"rt",
"rt-multi-thread",
"macros",
"net",
"io-std",
"fs",
] }
tokio-util = { workspace = true }
toml = "0.8.11"
tracing-subscriber = {workspace = true, features = ["env-filter"]}
Expand All @@ -36,10 +53,18 @@ path = "bin/bench.rs"
name = "prover"
path = "bin/prover.rs"

[[bin]]
name = "prover-memory"
path = "bin/prover_memory.rs"

[[bin]]
name = "verifier"
path = "bin/verifier.rs"

[[bin]]
name = "verifier-memory"
path = "bin/verifier_memory.rs"

[[bin]]
name = "plot"
path = "bin/plot.rs"
9 changes: 6 additions & 3 deletions crates/benches/binary/bench.sh
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
#! /bin/bash

# Check if we are running as root
# Check if we are running as root.
if [ "$EUID" -ne 0 ]; then
echo "This script must be run as root"
exit
fi

# Run the benchmark binary
# Run the benchmark binary.
../../../target/release/bench

# Plot the results
# Run the benchmark binary in memory profiling mode.
../../../target/release/bench --memory-profiling

# Plot the results.
../../../target/release/plot metrics.csv
4 changes: 4 additions & 0 deletions crates/benches/binary/bench.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ download-delay = [10, 25, 50]
upload-size = 1024
download-size = 4096
defer-decryption = true
memory-profile = false

[[benches]]
name = "download_bandwidth"
Expand All @@ -17,6 +18,7 @@ download-delay = 25
upload-size = 1024
download-size = 4096
defer-decryption = true
memory-profile = false

[[benches]]
name = "upload_bandwidth"
Expand All @@ -27,6 +29,7 @@ download-delay = 25
upload-size = 1024
download-size = 4096
defer-decryption = [false, true]
memory-profile = false

[[benches]]
name = "download_volume"
Expand All @@ -39,3 +42,4 @@ upload-size = 1024
# error in the browser.
download-size = [1024, 4096, 16384, 45000]
defer-decryption = true
memory-profile = true
13 changes: 11 additions & 2 deletions crates/benches/binary/benches.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,20 @@ RUN \

RUN apt clean && rm -rf /var/lib/apt/lists/*

COPY --from=builder ["/usr/src/tlsn/target/release/bench", "/usr/src/tlsn/target/release/prover", "/usr/src/tlsn/target/release/verifier", "/usr/src/tlsn/target/release/plot", "/usr/local/bin/"]
COPY --from=builder \
["/usr/src/tlsn/target/release/bench", \
"/usr/src/tlsn/target/release/prover", \
"/usr/src/tlsn/target/release/prover-memory", \
"/usr/src/tlsn/target/release/verifier", \
"/usr/src/tlsn/target/release/verifier-memory", \
"/usr/src/tlsn/target/release/plot", \
"/usr/local/bin/"]

ENV PROVER_PATH="/usr/local/bin/prover"
ENV VERIFIER_PATH="/usr/local/bin/verifier"
ENV PROVER_MEMORY_PATH="/usr/local/bin/prover-memory"
ENV VERIFIER_MEMORY_PATH="/usr/local/bin/verifier-memory"

VOLUME [ "/benches" ]
WORKDIR "/benches"
CMD ["/bin/bash", "-c", "bench && plot /benches/metrics.csv && cat /benches/metrics.csv"]
CMD ["/bin/bash", "-c", "bench && bench --memory-profiling && plot /benches/metrics.csv && cat /benches/metrics.csv"]
24 changes: 19 additions & 5 deletions crates/benches/binary/bin/bench.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,26 @@
use std::{process::Command, thread, time::Duration};
use std::{env, process::Command, thread, time::Duration};

use tlsn_benches::{clean_up, set_up};

fn main() {
let prover_path = std::env::var("PROVER_PATH")
.unwrap_or_else(|_| "../../../target/release/prover".to_string());
let verifier_path = std::env::var("VERIFIER_PATH")
.unwrap_or_else(|_| "../../../target/release/verifier".to_string());
let args: Vec<String> = env::args().collect();
let is_memory_profiling = args.contains(&"--memory-profiling".to_string());

let (prover_path, verifier_path) = if is_memory_profiling {
(
std::env::var("PROVER_MEMORY_PATH")
.unwrap_or_else(|_| "../../../target/release/prover-memory".to_string()),
std::env::var("VERIFIER_MEMORY_PATH")
.unwrap_or_else(|_| "../../../target/release/verifier-memory".to_string()),
)
} else {
(
std::env::var("PROVER_PATH")
.unwrap_or_else(|_| "../../../target/release/prover".to_string()),
std::env::var("VERIFIER_PATH")
.unwrap_or_else(|_| "../../../target/release/verifier".to_string()),
)
};

if let Err(e) = set_up() {
println!("Error setting up: {}", e);
Expand Down
72 changes: 72 additions & 0 deletions crates/benches/binary/bin/plot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,81 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
let _chart = runtime_vs_latency(&all_data)?;
let _chart = runtime_vs_bandwidth(&all_data)?;

// Memory profiling is not compatible with browser benches.
if cfg!(not(feature = "browser-bench")) {
let _chart = download_size_vs_memory(&all_data)?;
}

Ok(())
}

fn download_size_vs_memory(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error::Error>> {
const TITLE: &str = "Download Size vs Memory";

let prover_kind: String = all_data
.first()
.map(|s| s.kind.clone().into())
.unwrap_or_default();

let data: Vec<Vec<f32>> = all_data
.iter()
.filter(|record| record.name == "download_volume" && record.heap_max_bytes.is_some())
.map(|record| {
vec![
record.download_size as f32,
record.heap_max_bytes.unwrap() as f32 / 1024.0 / 1024.0,
]
})
.collect();

// https://github.com/yuankunzhang/charming
let chart = Chart::new()
.title(
Title::new()
.text(TITLE)
.subtext(format!("{} Prover", prover_kind)),
)
.tooltip(Tooltip::new().trigger(Trigger::Axis))
.legend(Legend::new().orient(Orient::Vertical))
.toolbox(
Toolbox::new().show(true).feature(
Feature::new()
.save_as_image(SaveAsImage::new())
.restore(Restore::new())
.data_zoom(ToolboxDataZoom::new().y_axis_index("none"))
.data_view(DataView::new().read_only(false)),
),
)
.x_axis(
Axis::new()
.scale(true)
.name("Download Size (bytes)")
.name_gap(30)
.name_location(NameLocation::Center),
)
.y_axis(
Axis::new()
.scale(true)
.name("Heap Memory (Mbytes)")
.name_gap(40)
.name_location(NameLocation::Middle),
)
.series(
Scatter::new()
.name("Allocated Heap Memory")
.symbol_size(10)
.data(data),
);

// Save the chart as HTML file.
HtmlRenderer::new(TITLE, 1000, 800)
.theme(THEME)
.save(&chart, "download_size_vs_memory.html")
.unwrap();

Ok(chart)
}

fn runtime_vs_latency(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error::Error>> {
const TITLE: &str = "Runtime vs Latency";

Expand Down
Loading