diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml
index 392eb876c0..47646017ac 100644
--- a/.github/workflows/arbitrator-ci.yml
+++ b/.github/workflows/arbitrator-ci.yml
@@ -50,15 +50,13 @@ jobs:
- name: Install go
uses: actions/setup-go@v4
with:
- go-version: 1.21.x
+ go-version: 1.23.x
- name: Install custom go-ethereum
run: |
cd /tmp
- git clone --branch v1.13.8 --depth 1 https://github.com/ethereum/go-ethereum.git
+ git clone --branch v1.14.11 --depth 1 https://github.com/ethereum/go-ethereum.git
cd go-ethereum
- # Enable KZG point evaluation precompile early
- sed -i 's#var PrecompiledContractsBerlin = map\[common.Address\]PrecompiledContract{#\0 common.BytesToAddress([]byte{0x0a}): \&kzgPointEvaluation{},#g' core/vm/contracts.go
go build -o /usr/local/bin/geth ./cmd/geth
- name: Setup nodejs
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index acd6295b7c..a944f08f40 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -46,7 +46,7 @@ jobs:
- name: Install go
uses: actions/setup-go@v4
with:
- go-version: 1.21.x
+ go-version: 1.23.x
- name: Install wasm-ld
run: |
@@ -87,12 +87,12 @@ jobs:
uses: actions/cache@v3
with:
path: |
- ~/.cargo/registry/
- ~/.cargo/git/
+ ~/.cargo/
arbitrator/target/
arbitrator/wasm-libraries/target/
- arbitrator/wasm-libraries/soft-float/SoftFloat/build
+ arbitrator/wasm-libraries/soft-float/
target/etc/initial-machine-cache/
+ /home/runner/.rustup/toolchains/
key: ${{ runner.os }}-cargo-${{ steps.install-rust.outputs.rustc_hash }}-min-${{ hashFiles('arbitrator/Cargo.lock') }}-${{ matrix.test-mode }}
restore-keys: ${{ runner.os }}-cargo-${{ steps.install-rust.outputs.rustc_hash }}-
@@ -145,89 +145,42 @@ jobs:
env:
TEST_STATE_SCHEME: path
run: |
- packages=`go list ./...`
- for package in $packages; do
- echo running tests for $package
- if ! stdbuf -oL gotestsum --format short-verbose --packages="$package" --rerun-fails=2 --no-color=false -- -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -timeout 20m -tags=cionly > >(stdbuf -oL tee -a full.log | grep -vE "INFO|seal"); then
- exit 1
- fi
- done
+ echo "Running tests with Path Scheme" >> full.log
+ ${{ github.workspace }}/.github/workflows/gotestsum.sh --tags cionly --timeout 20m --cover
- name: run tests without race detection and hash state scheme
if: matrix.test-mode == 'defaults'
env:
TEST_STATE_SCHEME: hash
run: |
- packages=`go list ./...`
- for package in $packages; do
- echo running tests for $package
- if ! stdbuf -oL gotestsum --format short-verbose --packages="$package" --rerun-fails=2 --no-color=false -- -timeout 20m -tags=cionly; then
- exit 1
- fi
- done
-
- - name: run tests with race detection and path state scheme
- if: matrix.test-mode == 'race'
- env:
- TEST_STATE_SCHEME: path
- run: |
- packages=`go list ./...`
- for package in $packages; do
- echo running tests for $package
- if ! stdbuf -oL gotestsum --format short-verbose --packages="$package" --rerun-fails=2 --no-color=false -- -race -timeout 30m > >(stdbuf -oL tee -a full.log | grep -vE "INFO|seal"); then
- exit 1
- fi
- done
+ echo "Running tests with Hash Scheme" >> full.log
+ ${{ github.workspace }}/.github/workflows/gotestsum.sh --tags cionly --timeout 20m
- name: run tests with race detection and hash state scheme
if: matrix.test-mode == 'race'
env:
TEST_STATE_SCHEME: hash
run: |
- packages=`go list ./...`
- for package in $packages; do
- echo running tests for $package
- if ! stdbuf -oL gotestsum --format short-verbose --packages="$package" --rerun-fails=2 --no-color=false -- -race -timeout 30m; then
- exit 1
- fi
- done
+ echo "Running tests with Hash Scheme" >> full.log
+ ${{ github.workspace }}/.github/workflows/gotestsum.sh --race --timeout 30m
- name: run redis tests
if: matrix.test-mode == 'defaults'
- run: TEST_REDIS=redis://localhost:6379/0 gotestsum --format short-verbose -- -p 1 -run TestRedis ./arbnode/... ./system_tests/... -coverprofile=coverage-redis.txt -covermode=atomic -coverpkg=./...
+ run: |
+ echo "Running redis tests" >> full.log
+ TEST_REDIS=redis://localhost:6379/0 gotestsum --format short-verbose -- -p 1 -run TestRedis ./arbnode/... ./system_tests/... -coverprofile=coverage-redis.txt -covermode=atomic -coverpkg=./...
- name: run challenge tests
if: matrix.test-mode == 'challenge'
- run: |
- packages=`go list ./...`
- for package in $packages; do
- echo running tests for $package
- if ! stdbuf -oL gotestsum --format short-verbose --packages="$package" --rerun-fails=2 --no-color=false -- -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -tags=challengetest -run=TestChallenge > >(stdbuf -oL tee -a full.log | grep -vE "INFO|seal"); then
- exit 1
- fi
- done
+ run: ${{ github.workspace }}/.github/workflows/gotestsum.sh --tags challengetest --run TestChallenge --cover
- name: run stylus tests
if: matrix.test-mode == 'stylus'
- run: |
- packages=`go list ./...`
- for package in $packages; do
- echo running tests for $package
- if ! stdbuf -oL gotestsum --format short-verbose --packages="$package" --rerun-fails=2 --no-color=false -- -timeout 60m -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -tags=stylustest -run="TestProgramArbitrator" > >(stdbuf -oL tee -a full.log | grep -vE "INFO|seal"); then
- exit 1
- fi
- done
+ run: ${{ github.workspace }}/.github/workflows/gotestsum.sh --tags stylustest --run TestProgramArbitrator --timeout 60m --cover
- name: run long stylus tests
if: matrix.test-mode == 'long'
- run: |
- packages=`go list ./...`
- for package in $packages; do
- echo running tests for $package
- if ! stdbuf -oL gotestsum --format short-verbose --packages="$package" --rerun-fails=2 --no-color=false -- -timeout 60m -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -tags=stylustest -run="TestProgramLong" > >(stdbuf -oL tee -a full.log | grep -vE "INFO|seal"); then
- exit 1
- fi
- done
+ run: ${{ github.workspace }}/.github/workflows/gotestsum.sh --tags stylustest --run TestProgramLong --timeout 60m --cover
- name: Archive detailed run log
uses: actions/upload-artifact@v3
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 1cde8f06b9..26447947d4 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -73,7 +73,7 @@ jobs:
- name: Install go
uses: actions/setup-go@v4
with:
- go-version: 1.21.x
+ go-version: 1.23.x
- name: Install rust stable
uses: dtolnay/rust-toolchain@stable
diff --git a/.github/workflows/gotestsum.sh b/.github/workflows/gotestsum.sh
new file mode 100755
index 0000000000..ed631847b7
--- /dev/null
+++ b/.github/workflows/gotestsum.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+
+check_missing_value() {
+ if [[ $1 -eq 0 || $2 == -* ]]; then
+ echo "missing $3 argument value"
+ exit 1
+ fi
+}
+
+timeout=""
+tags=""
+run=""
+race=false
+cover=false
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --timeout)
+ shift
+ check_missing_value $# "$1" "--timeout"
+ timeout=$1
+ shift
+ ;;
+ --tags)
+ shift
+ check_missing_value $# "$1" "--tags"
+ tags=$1
+ shift
+ ;;
+ --run)
+ shift
+ check_missing_value $# "$1" "--run"
+ run=$1
+ shift
+ ;;
+ --race)
+ race=true
+ shift
+ ;;
+ --cover)
+ cover=true
+ shift
+ ;;
+ *)
+ echo "Invalid argument: $1"
+ exit 1
+ ;;
+ esac
+done
+
+packages=$(go list ./...)
+for package in $packages; do
+ cmd="stdbuf -oL gotestsum --format short-verbose --packages=\"$package\" --rerun-fails=2 --no-color=false --"
+
+ if [ "$timeout" != "" ]; then
+ cmd="$cmd -timeout $timeout"
+ fi
+
+ if [ "$tags" != "" ]; then
+ cmd="$cmd -tags=$tags"
+ fi
+
+ if [ "$run" != "" ]; then
+ cmd="$cmd -run=$run"
+ fi
+
+ if [ "$race" == true ]; then
+ cmd="$cmd -race"
+ fi
+
+ if [ "$cover" == true ]; then
+ cmd="$cmd -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/..."
+ fi
+
+ cmd="$cmd > >(stdbuf -oL tee -a full.log | grep -vE \"INFO|seal\")"
+
+ echo ""
+ echo running tests for "$package"
+ echo "$cmd"
+
+ if ! eval "$cmd"; then
+ exit 1
+ fi
+done
diff --git a/.github/workflows/shellcheck-ci.yml b/.github/workflows/shellcheck-ci.yml
new file mode 100644
index 0000000000..d1c7b58580
--- /dev/null
+++ b/.github/workflows/shellcheck-ci.yml
@@ -0,0 +1,30 @@
+name: ShellCheck CI
+run-name: ShellCheck CI triggered from @${{ github.actor }} of ${{ github.head_ref }}
+
+on:
+ workflow_dispatch:
+ merge_group:
+ pull_request:
+ push:
+ branches:
+ - master
+
+jobs:
+ shellcheck:
+ name: Run ShellCheck
+ runs-on: ubuntu-8
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Run ShellCheck
+ uses: ludeeus/action-shellcheck@master
+ with:
+ ignore_paths: >-
+ ./fastcache/**
+ ./contracts/**
+ ./safe-smart-account/**
+ ./go-ethereum/**
+ ./nitro-testnode/**
+ ./brotli/**
+ ./arbitrator/**
diff --git a/.github/workflows/submodule-pin-check.sh b/.github/workflows/submodule-pin-check.sh
deleted file mode 100755
index aecb287ce1..0000000000
--- a/.github/workflows/submodule-pin-check.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-declare -Ar exceptions=(
- [contracts]=origin/develop
- [nitro-testnode]=origin/master
-
- #TODO Rachel to check these are the intended branches.
- [arbitrator/langs/c]=origin/vm-storage-cache
- [arbitrator/tools/wasmer]=origin/adopt-v4.2.8
-)
-
-divergent=0
-for mod in `git submodule --quiet foreach 'echo $name'`; do
- branch=origin/HEAD
- if [[ -v exceptions[$mod] ]]; then
- branch=${exceptions[$mod]}
- fi
-
- if ! git -C $mod merge-base --is-ancestor HEAD $branch; then
- echo $mod diverges from $branch
- divergent=1
- fi
-done
-
-exit $divergent
-
diff --git a/.github/workflows/submodule-pin-check.yml b/.github/workflows/submodule-pin-check.yml
index e459bad34d..60dd8ad827 100644
--- a/.github/workflows/submodule-pin-check.yml
+++ b/.github/workflows/submodule-pin-check.yml
@@ -1,21 +1,70 @@
-name: Submodule Pin Check
+name: Merge Checks
on:
- pull_request:
+ pull_request_target:
branches: [ master ]
types: [synchronize, opened, reopened]
+permissions:
+ statuses: write
+
jobs:
submodule-pin-check:
- name: Submodule Pin Check
+ name: Check Submodule Pin
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- submodules: recursive
+ submodules: true
+ persist-credentials: false
+ ref: "${{ github.event.pull_request.head.sha }}"
- name: Check all submodules are ancestors of origin/HEAD or configured branch
- run: ${{ github.workspace }}/.github/workflows/submodule-pin-check.sh
+ run: |
+ status_state="pending"
+ declare -Ar exceptions=(
+ [contracts]=origin/develop
+ [nitro-testnode]=origin/master
+
+ #TODO Rachel to check these are the intended branches.
+ [arbitrator/langs/c]=origin/vm-storage-cache
+ [arbitrator/tools/wasmer]=origin/adopt-v4.2.8
+ )
+ divergent=0
+ for mod in `git submodule --quiet foreach 'echo $name'`; do
+ branch=origin/HEAD
+ if [[ -v exceptions[$mod] ]]; then
+ branch=${exceptions[$mod]}
+ fi
+
+ if ! git -C $mod merge-base --is-ancestor HEAD $branch; then
+ echo $mod diverges from $branch
+ divergent=1
+ fi
+ done
+ if [ $divergent -eq 0 ]; then
+ status_state="success"
+ else
+ resp="$(curl -sSL --fail-with-body \
+ -H "Accept: application/vnd.github+json" \
+ -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
+ -H "X-GitHub-Api-Version: 2022-11-28" \
+ "https://api.github.com/repos/$GITHUB_REPOSITORY/commits/${{ github.event.pull_request.head.sha }}/statuses")"
+ if ! jq -e '.[] | select(.context == "Submodule Pin Check")' > /dev/null <<< "$resp"; then
+ # Submodule pin check is failling and no status exists
+ # Keep it without a status to keep the green checkmark appearing
+ # Otherwise, the commit and PR's CI will appear to be indefinitely pending
+ # Merging will still be blocked until the required status appears
+ exit 0
+ fi
+ fi
+ curl -sSL --fail-with-body \
+ -X POST \
+ -H "Accept: application/vnd.github+json" \
+ -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
+ -H "X-GitHub-Api-Version: 2022-11-28" \
+ "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/${{ github.event.pull_request.head.sha }}" \
+ -d '{"context":"Submodule Pin Check","state":"'"$status_state"'"}'
diff --git a/Dockerfile b/Dockerfile
index c8b36f0785..aba5432254 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -66,7 +66,7 @@ COPY --from=wasm-libs-builder /workspace/ /
FROM wasm-base AS wasm-bin-builder
# pinned go version
-RUN curl -L https://golang.org/dl/go1.21.10.linux-`dpkg --print-architecture`.tar.gz | tar -C /usr/local -xzf -
+RUN curl -L https://golang.org/dl/go1.23.1.linux-`dpkg --print-architecture`.tar.gz | tar -C /usr/local -xzf -
COPY ./Makefile ./go.mod ./go.sum ./
COPY ./arbcompress ./arbcompress
COPY ./arbos ./arbos
@@ -218,8 +218,9 @@ COPY ./scripts/download-machine.sh .
#RUN ./download-machine.sh consensus-v20 0x8b104a2e80ac6165dc58b9048de12f301d70b02a0ab51396c22b4b4b802a16a4
RUN ./download-machine.sh consensus-v30 0xb0de9cb89e4d944ae6023a3b62276e54804c242fd8c4c2d8e6cc4450f5fa8b1b && true
RUN ./download-machine.sh consensus-v31 0x260f5fa5c3176a856893642e149cf128b5a8de9f828afec8d11184415dd8dc69
+RUN ./download-machine.sh consensus-v32 0x184884e1eb9fefdc158f6c8ac912bb183bf3cf83f0090317e0bc4ac5860baa39
-FROM golang:1.21.10-bookworm AS node-builder
+FROM golang:1.23.1-bookworm AS node-builder
WORKDIR /workspace
ARG version=""
ARG datetime=""
@@ -264,6 +265,8 @@ COPY --from=node-builder /workspace/target/bin/relay /usr/local/bin/
COPY --from=node-builder /workspace/target/bin/nitro-val /usr/local/bin/
COPY --from=node-builder /workspace/target/bin/seq-coordinator-manager /usr/local/bin/
COPY --from=node-builder /workspace/target/bin/prover /usr/local/bin/
+COPY --from=node-builder /workspace/target/bin/dbconv /usr/local/bin/
+COPY ./scripts/convert-databases.bash /usr/local/bin/
COPY --from=machine-versions /workspace/machines /home/user/target/machines
COPY ./scripts/validate-wasm-module-root.sh .
RUN ./validate-wasm-module-root.sh /home/user/target/machines /usr/local/bin/prover
diff --git a/LICENSE.md b/LICENSE.md
index ea9a53da75..25768b3010 100644
--- a/LICENSE.md
+++ b/LICENSE.md
@@ -22,7 +22,7 @@ Additional Use Grant: You may use the Licensed Work in a production environment
Expansion Program Term of Use](https://docs.arbitrum.foundation/assets/files/Arbitrum%20Expansion%20Program%20Jan182024-4f08b0c2cb476a55dc153380fa3e64b0.pdf). For purposes of this
Additional Use Grant, the "Covered Arbitrum Chains" are
(a) Arbitrum One (chainid:42161), Arbitrum Nova (chainid:42170),
- rbitrum Rinkeby testnet/Rinkarby (chainid:421611),Arbitrum Nitro
+ Arbitrum Rinkeby testnet/Rinkarby (chainid:421611),Arbitrum Nitro
Goerli testnet (chainid:421613), and Arbitrum Sepolia Testnet
(chainid:421614); (b) any future blockchains authorized to be
designated as Covered Arbitrum Chains by the decentralized autonomous
diff --git a/Makefile b/Makefile
index dc8927dd22..88bbd8dabe 100644
--- a/Makefile
+++ b/Makefile
@@ -31,6 +31,14 @@ ifneq ($(origin GOLANG_LDFLAGS),undefined)
GOLANG_PARAMS = -ldflags="-extldflags '-ldl' $(GOLANG_LDFLAGS)"
endif
+UNAME_S := $(shell uname -s)
+
+# In Mac OSX, there are a lot of warnings emitted if these environment variables aren't set.
+ifeq ($(UNAME_S), Darwin)
+ export MACOSX_DEPLOYMENT_TARGET := $(shell sw_vers -productVersion)
+ export CGO_LDFLAGS := -Wl,-no_warn_duplicate_libraries
+endif
+
precompile_names = AddressTable Aggregator BLS Debug FunctionTable GasInfo Info osTest Owner RetryableTx Statistics Sys
precompiles = $(patsubst %,./solgen/generated/%.go, $(precompile_names))
@@ -141,8 +149,10 @@ stylus_test_erc20_wasm = $(call get_stylus_test_wasm,erc20)
stylus_test_erc20_src = $(call get_stylus_test_rust,erc20)
stylus_test_read-return-data_wasm = $(call get_stylus_test_wasm,read-return-data)
stylus_test_read-return-data_src = $(call get_stylus_test_rust,read-return-data)
+stylus_test_hostio-test_wasm = $(call get_stylus_test_wasm,hostio-test)
+stylus_test_hostio-test_src = $(call get_stylus_test_rust,hostio-test)
-stylus_test_wasms = $(stylus_test_keccak_wasm) $(stylus_test_keccak-100_wasm) $(stylus_test_fallible_wasm) $(stylus_test_storage_wasm) $(stylus_test_multicall_wasm) $(stylus_test_log_wasm) $(stylus_test_create_wasm) $(stylus_test_math_wasm) $(stylus_test_sdk-storage_wasm) $(stylus_test_erc20_wasm) $(stylus_test_read-return-data_wasm) $(stylus_test_evm-data_wasm) $(stylus_test_bfs:.b=.wasm)
+stylus_test_wasms = $(stylus_test_keccak_wasm) $(stylus_test_keccak-100_wasm) $(stylus_test_fallible_wasm) $(stylus_test_storage_wasm) $(stylus_test_multicall_wasm) $(stylus_test_log_wasm) $(stylus_test_create_wasm) $(stylus_test_math_wasm) $(stylus_test_sdk-storage_wasm) $(stylus_test_erc20_wasm) $(stylus_test_read-return-data_wasm) $(stylus_test_evm-data_wasm) $(stylus_test_hostio-test_wasm) $(stylus_test_bfs:.b=.wasm)
stylus_benchmarks = $(wildcard $(stylus_dir)/*.toml $(stylus_dir)/src/*.rs) $(stylus_test_wasms)
# user targets
@@ -157,7 +167,7 @@ all: build build-replay-env test-gen-proofs
@touch .make/all
.PHONY: build
-build: $(patsubst %,$(output_root)/bin/%, nitro deploy relay daserver datool seq-coordinator-invalidate nitro-val seq-coordinator-manager)
+build: $(patsubst %,$(output_root)/bin/%, nitro deploy relay daserver datool seq-coordinator-invalidate nitro-val seq-coordinator-manager dbconv)
@printf $(done)
.PHONY: build-node-deps
@@ -275,6 +285,7 @@ clean:
rm -f arbitrator/wasm-libraries/soft-float/SoftFloat/build/Wasm-Clang/*.a
rm -f arbitrator/wasm-libraries/forward/*.wat
rm -rf arbitrator/stylus/tests/*/target/ arbitrator/stylus/tests/*/*.wasm
+ rm -rf brotli/buildfiles
@rm -rf contracts/build contracts/cache solgen/go/
@rm -f .make/*
@@ -310,6 +321,9 @@ $(output_root)/bin/nitro-val: $(DEP_PREDICATE) build-node-deps
$(output_root)/bin/seq-coordinator-manager: $(DEP_PREDICATE) build-node-deps
go build $(GOLANG_PARAMS) -o $@ "$(CURDIR)/cmd/seq-coordinator-manager"
+$(output_root)/bin/dbconv: $(DEP_PREDICATE) build-node-deps
+ go build $(GOLANG_PARAMS) -o $@ "$(CURDIR)/cmd/dbconv"
+
# recompile wasm, but don't change timestamp unless files differ
$(replay_wasm): $(DEP_PREDICATE) $(go_source) .make/solgen
mkdir -p `dirname $(replay_wasm)`
@@ -470,6 +484,10 @@ $(stylus_test_erc20_wasm): $(stylus_test_erc20_src)
$(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo)
@touch -c $@ # cargo might decide to not rebuild the binary
+$(stylus_test_hostio-test_wasm): $(stylus_test_hostio-test_src)
+ $(cargo_nightly) --manifest-path $< --release --config $(stylus_cargo)
+ @touch -c $@ # cargo might decide to not rebuild the binary
+
contracts/test/prover/proofs/float%.json: $(arbitrator_cases)/float%.wasm $(prover_bin) $(output_latest)/soft-float.wasm
$(prover_bin) $< -l $(output_latest)/soft-float.wasm -o $@ -b --allow-hostapi --require-success
diff --git a/README.md b/README.md
index a07772628b..1f0e4ac81c 100644
--- a/README.md
+++ b/README.md
@@ -17,26 +17,26 @@
Nitro is the latest iteration of the Arbitrum technology. It is a fully integrated, complete
-layer 2 optimistic rollup system, including fraud proofs, the sequencer, the token bridges,
+layer 2 optimistic rollup system, including fraud proofs, the sequencer, the token bridges,
advanced calldata compression, and more.
See the live docs-site [here](https://developer.arbitrum.io/) (or [here](https://github.com/OffchainLabs/arbitrum-docs) for markdown docs source.)
-See [here](./audits) for security audit reports.
+See [here](https://docs.arbitrum.io/audit-reports) for security audit reports.
-The Nitro stack is built on several innovations. At its core is a new prover, which can do Arbitrum’s classic
-interactive fraud proofs over WASM code. That means the L2 Arbitrum engine can be written and compiled using
+The Nitro stack is built on several innovations. At its core is a new prover, which can do Arbitrum’s classic
+interactive fraud proofs over WASM code. That means the L2 Arbitrum engine can be written and compiled using
standard languages and tools, replacing the custom-designed language and compiler used in previous Arbitrum
-versions. In normal execution,
-validators and nodes run the Nitro engine compiled to native code, switching to WASM if a fraud proof is needed.
-We compile the core of Geth, the EVM engine that practically defines the Ethereum standard, right into Arbitrum.
+versions. In normal execution,
+validators and nodes run the Nitro engine compiled to native code, switching to WASM if a fraud proof is needed.
+We compile the core of Geth, the EVM engine that practically defines the Ethereum standard, right into Arbitrum.
So the previous custom-built EVM emulator is replaced by Geth, the most popular and well-supported Ethereum client.
-The last piece of the stack is a slimmed-down version of our ArbOS component, rewritten in Go, which provides the
-rest of what’s needed to run an L2 chain: things like cross-chain communication, and a new and improved batching
+The last piece of the stack is a slimmed-down version of our ArbOS component, rewritten in Go, which provides the
+rest of what’s needed to run an L2 chain: things like cross-chain communication, and a new and improved batching
and compression system to minimize L1 costs.
-Essentially, Nitro runs Geth at layer 2 on top of Ethereum, and can prove fraud over the core engine of Geth
+Essentially, Nitro runs Geth at layer 2 on top of Ethereum, and can prove fraud over the core engine of Geth
compiled to WASM.
Arbitrum One successfully migrated from the Classic Arbitrum stack onto Nitro on 8/31/22. (See [state migration](https://developer.arbitrum.io/migration/state-migration) and [dapp migration](https://developer.arbitrum.io/migration/dapp_migration) for more info).
@@ -45,14 +45,12 @@ Arbitrum One successfully migrated from the Classic Arbitrum stack onto Nitro on
Nitro is currently licensed under a [Business Source License](./LICENSE.md), similar to our friends at Uniswap and Aave, with an "Additional Use Grant" to ensure that everyone can have full comfort using and running nodes on all public Arbitrum chains.
-The Additional Use Grant also permits the deployment of the Nitro software, in a permissionless fashion and without cost, as a new blockchain provided that the chain settles to either Arbitrum One or Arbitrum Nova.
+The Additional Use Grant also permits the deployment of the Nitro software, in a permissionless fashion and without cost, as a new blockchain provided that the chain settles to either Arbitrum One or Arbitrum Nova.
-For those that prefer to deploy the Nitro software either directly on Ethereum (i.e. an L2) or have it settle to another Layer-2 on top of Ethereum, the [Arbitrum Expansion Program (the "AEP")](https://docs.arbitrum.foundation/assets/files/Arbitrum%20Expansion%20Program%20Jan182024-4f08b0c2cb476a55dc153380fa3e64b0.pdf) was recently established. The AEP allows for the permissionless deployment in the aforementioned fashion provided that 10% of net revenue (as more fully described in the AEP) is contributed back to the Arbitrum community in accordance with the requirements of the AEP.
+For those that prefer to deploy the Nitro software either directly on Ethereum (i.e. an L2) or have it settle to another Layer-2 on top of Ethereum, the [Arbitrum Expansion Program (the "AEP")](https://docs.arbitrum.foundation/assets/files/Arbitrum%20Expansion%20Program%20Jan182024-4f08b0c2cb476a55dc153380fa3e64b0.pdf) was recently established. The AEP allows for the permissionless deployment in the aforementioned fashion provided that 10% of net revenue (as more fully described in the AEP) is contributed back to the Arbitrum community in accordance with the requirements of the AEP.
## Contact
Discord - [Arbitrum](https://discord.com/invite/5KE54JwyTs)
Twitter: [Arbitrum](https://twitter.com/arbitrum)
-
-
diff --git a/arbcompress/compress_common.go b/arbcompress/compress_common.go
index a61dd9a171..997232e7cc 100644
--- a/arbcompress/compress_common.go
+++ b/arbcompress/compress_common.go
@@ -17,6 +17,8 @@ func compressedBufferSizeFor(length int) int {
return length + (length>>10)*8 + 64 // actual limit is: length + (length >> 14) * 4 + 6
}
-func CompressLevel(input []byte, level int) ([]byte, error) {
+func CompressLevel(input []byte, level uint64) ([]byte, error) {
+ // level is trusted and shouldn't be anything crazy
+ // #nosec G115
return Compress(input, uint32(level), EmptyDictionary)
}
diff --git a/arbcompress/native.go b/arbcompress/native.go
index 8244010979..f7b8f0b8e0 100644
--- a/arbcompress/native.go
+++ b/arbcompress/native.go
@@ -7,7 +7,7 @@
package arbcompress
/*
-#cgo CFLAGS: -g -Wall -I${SRCDIR}/../target/include/
+#cgo CFLAGS: -g -I${SRCDIR}/../target/include/
#cgo LDFLAGS: ${SRCDIR}/../target/lib/libstylus.a -lm
#include "arbitrator.h"
*/
diff --git a/arbitrator/Cargo.lock b/arbitrator/Cargo.lock
index 79a9117a31..2b437968fa 100644
--- a/arbitrator/Cargo.lock
+++ b/arbitrator/Cargo.lock
@@ -215,7 +215,6 @@ dependencies = [
"prover",
"serde",
"serde_json",
- "serde_with 3.9.0",
]
[[package]]
@@ -496,6 +495,12 @@ version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
+[[package]]
+name = "clru"
+version = "0.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cbd0f76e066e64fdc5631e3bb46381254deab9ef1158292f27c8c57e3bf3fe59"
+
[[package]]
name = "colorchoice"
version = "1.0.2"
@@ -705,38 +710,14 @@ dependencies = [
"typenum",
]
-[[package]]
-name = "darling"
-version = "0.13.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c"
-dependencies = [
- "darling_core 0.13.4",
- "darling_macro 0.13.4",
-]
-
[[package]]
name = "darling"
version = "0.20.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989"
dependencies = [
- "darling_core 0.20.10",
- "darling_macro 0.20.10",
-]
-
-[[package]]
-name = "darling_core"
-version = "0.13.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610"
-dependencies = [
- "fnv",
- "ident_case",
- "proc-macro2",
- "quote",
- "strsim 0.10.0",
- "syn 1.0.109",
+ "darling_core",
+ "darling_macro",
]
[[package]]
@@ -753,24 +734,13 @@ dependencies = [
"syn 2.0.72",
]
-[[package]]
-name = "darling_macro"
-version = "0.13.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835"
-dependencies = [
- "darling_core 0.13.4",
- "quote",
- "syn 1.0.109",
-]
-
[[package]]
name = "darling_macro"
version = "0.20.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806"
dependencies = [
- "darling_core 0.20.10",
+ "darling_core",
"quote",
"syn 2.0.72",
]
@@ -928,7 +898,7 @@ version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59c3b24c345d8c314966bdc1832f6c2635bfcce8e7cf363bd115987bba2ee242"
dependencies = [
- "darling 0.20.10",
+ "darling",
"proc-macro2",
"quote",
"syn 2.0.72",
@@ -1750,7 +1720,7 @@ dependencies = [
"rustc-demangle",
"serde",
"serde_json",
- "serde_with 1.14.0",
+ "serde_with",
"sha2 0.9.9",
"sha3 0.9.1",
"smallvec",
@@ -2073,16 +2043,6 @@ dependencies = [
"serde",
]
-[[package]]
-name = "serde_with"
-version = "1.14.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff"
-dependencies = [
- "serde",
- "serde_with_macros 1.5.2",
-]
-
[[package]]
name = "serde_with"
version = "3.9.0"
@@ -2097,29 +2057,17 @@ dependencies = [
"serde",
"serde_derive",
"serde_json",
- "serde_with_macros 3.9.0",
+ "serde_with_macros",
"time",
]
-[[package]]
-name = "serde_with_macros"
-version = "1.5.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082"
-dependencies = [
- "darling 0.13.4",
- "proc-macro2",
- "quote",
- "syn 1.0.109",
-]
-
[[package]]
name = "serde_with_macros"
version = "3.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350"
dependencies = [
- "darling 0.20.10",
+ "darling",
"proc-macro2",
"quote",
"syn 2.0.72",
@@ -2226,12 +2174,6 @@ version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
-[[package]]
-name = "strsim"
-version = "0.10.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
-
[[package]]
name = "strsim"
version = "0.11.1"
@@ -2270,13 +2212,13 @@ dependencies = [
"bincode",
"brotli",
"caller-env",
+ "clru",
"derivative",
"eyre",
"fnv",
"hex",
"lazy_static",
"libc",
- "lru",
"num-bigint",
"parking_lot",
"prover",
diff --git a/arbitrator/Cargo.toml b/arbitrator/Cargo.toml
index 94ca08b0b5..eaafb6e439 100644
--- a/arbitrator/Cargo.toml
+++ b/arbitrator/Cargo.toml
@@ -24,9 +24,7 @@ repository = "https://github.com/OffchainLabs/nitro.git"
rust-version = "1.67"
[workspace.dependencies]
-cfg-if = "1.0.0"
lazy_static = "1.4.0"
-lru = "0.12.3"
num_enum = { version = "0.7.2", default-features = false }
ruint2 = "1.9.0"
wasmparser = "0.121"
diff --git a/arbitrator/arbutil/src/evm/api.rs b/arbitrator/arbutil/src/evm/api.rs
index 093e7f2984..9d4c78c0de 100644
--- a/arbitrator/arbutil/src/evm/api.rs
+++ b/arbitrator/arbutil/src/evm/api.rs
@@ -77,7 +77,7 @@ pub trait EvmApi: Send + 'static {
/// Reads the 32-byte value in the EVM state trie at offset `key`.
/// Returns the value and the access cost in gas.
/// Analogous to `vm.SLOAD`.
- fn get_bytes32(&mut self, key: Bytes32) -> (Bytes32, u64);
+ fn get_bytes32(&mut self, key: Bytes32, evm_api_gas_to_use: u64) -> (Bytes32, u64);
/// Stores the given value at the given key in Stylus VM's cache of the EVM state trie.
/// Note that the actual values only get written after calls to `set_trie_slots`.
diff --git a/arbitrator/arbutil/src/evm/mod.rs b/arbitrator/arbutil/src/evm/mod.rs
index 1671e67072..36dadd906a 100644
--- a/arbitrator/arbutil/src/evm/mod.rs
+++ b/arbitrator/arbutil/src/evm/mod.rs
@@ -74,9 +74,12 @@ pub const GASPRICE_GAS: u64 = GAS_QUICK_STEP;
// vm.GasQuickStep (see jump_table.go)
pub const ORIGIN_GAS: u64 = GAS_QUICK_STEP;
+pub const ARBOS_VERSION_STYLUS_CHARGING_FIXES: u64 = 32;
+
#[derive(Clone, Copy, Debug, Default)]
#[repr(C)]
pub struct EvmData {
+ pub arbos_version: u64,
pub block_basefee: Bytes32,
pub chainid: u64,
pub block_coinbase: Bytes20,
diff --git a/arbitrator/arbutil/src/evm/req.rs b/arbitrator/arbutil/src/evm/req.rs
index b1c8d99972..0304f2d378 100644
--- a/arbitrator/arbutil/src/evm/req.rs
+++ b/arbitrator/arbutil/src/evm/req.rs
@@ -7,8 +7,6 @@ use crate::{
storage::{StorageCache, StorageWord},
user::UserOutcomeKind,
},
- format::Utf8OrHex,
- pricing::EVM_API_INK,
Bytes20, Bytes32,
};
use eyre::{bail, eyre, Result};
@@ -100,13 +98,13 @@ impl> EvmApiRequestor {
}
impl> EvmApi for EvmApiRequestor {
- fn get_bytes32(&mut self, key: Bytes32) -> (Bytes32, u64) {
+ fn get_bytes32(&mut self, key: Bytes32, evm_api_gas_to_use: u64) -> (Bytes32, u64) {
let cache = &mut self.storage_cache;
let mut cost = cache.read_gas();
let value = cache.entry(key).or_insert_with(|| {
let (res, _, gas) = self.handler.request(EvmApiMethod::GetBytes32, key);
- cost = cost.saturating_add(gas).saturating_add(EVM_API_INK);
+ cost = cost.saturating_add(gas).saturating_add(evm_api_gas_to_use);
StorageWord::known(res.try_into().unwrap())
});
(value.value, cost)
@@ -140,8 +138,13 @@ impl> EvmApi for EvmApiRequestor {
}
let (res, _, cost) = self.request(EvmApiMethod::SetTrieSlots, data);
- if res[0] != EvmApiStatus::Success.into() {
- bail!("{}", String::from_utf8_or_hex(res));
+ let status = res
+ .first()
+ .copied()
+ .map(EvmApiStatus::from)
+ .unwrap_or(EvmApiStatus::Failure);
+ if status != EvmApiStatus::Success {
+ bail!("{:?}", status);
}
Ok(cost)
}
@@ -156,8 +159,13 @@ impl> EvmApi for EvmApiRequestor {
data.extend(key);
data.extend(value);
let (res, ..) = self.request(EvmApiMethod::SetTransientBytes32, data);
- if res[0] != EvmApiStatus::Success.into() {
- bail!("{}", String::from_utf8_or_hex(res));
+ let status = res
+ .first()
+ .copied()
+ .map(EvmApiStatus::from)
+ .unwrap_or(EvmApiStatus::Failure);
+ if status != EvmApiStatus::Success {
+ bail!("{:?}", status);
}
Ok(())
}
@@ -290,9 +298,10 @@ impl> EvmApi for EvmApiRequestor {
let mut request = Vec::with_capacity(2 * 8 + 3 * 2 + name.len() + args.len() + outs.len());
request.extend(start_ink.to_be_bytes());
request.extend(end_ink.to_be_bytes());
- request.extend((name.len() as u16).to_be_bytes());
- request.extend((args.len() as u16).to_be_bytes());
- request.extend((outs.len() as u16).to_be_bytes());
+ // u32 is enough to represent the slices lengths because the WASM environment runs in 32 bits.
+ request.extend((name.len() as u32).to_be_bytes());
+ request.extend((args.len() as u32).to_be_bytes());
+ request.extend((outs.len() as u32).to_be_bytes());
request.extend(name.as_bytes());
request.extend(args);
request.extend(outs);
diff --git a/arbitrator/arbutil/src/types.rs b/arbitrator/arbutil/src/types.rs
index 6cf1d6cdf7..722a89b81e 100644
--- a/arbitrator/arbutil/src/types.rs
+++ b/arbitrator/arbutil/src/types.rs
@@ -8,6 +8,7 @@ use std::{
borrow::Borrow,
fmt,
ops::{Deref, DerefMut},
+ str::FromStr,
};
// These values must be kept in sync with `arbutil/preimage_type.go`,
@@ -83,6 +84,32 @@ impl From for Bytes32 {
}
}
+impl FromStr for Bytes32 {
+ type Err = &'static str;
+
+ fn from_str(s: &str) -> Result {
+ // Remove the "0x" prefix if present
+ let s = s.strip_prefix("0x").unwrap_or(s);
+
+ // Pad with leading zeros if the string is shorter than 64 characters (32 bytes)
+ let padded = format!("{:0>64}", s);
+
+ // Decode the hex string using the hex crate
+ let decoded_bytes = hex::decode(padded).map_err(|_| "Invalid hex string")?;
+
+ // Ensure the decoded bytes is exactly 32 bytes
+ if decoded_bytes.len() != 32 {
+ return Err("Hex string too long for Bytes32");
+ }
+
+ // Create a 32-byte array and fill it with the decoded bytes.
+ let mut b = [0u8; 32];
+ b.copy_from_slice(&decoded_bytes);
+
+ Ok(Bytes32(b))
+ }
+}
+
impl TryFrom<&[u8]> for Bytes32 {
type Error = std::array::TryFromSliceError;
@@ -249,3 +276,77 @@ impl From for Bytes20 {
<[u8; 20]>::from(x).into()
}
}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_bytes32() {
+ let b = Bytes32::from(0x12345678u32);
+ let expected = [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x12, 0x34, 0x56, 0x78,
+ ];
+ assert_eq!(b, Bytes32(expected));
+ }
+
+ #[test]
+ fn test_from_str_short() {
+ // Short hex string
+ let b = Bytes32::from_str("0x12345678").unwrap();
+ let expected = [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x12, 0x34, 0x56, 0x78,
+ ];
+ assert_eq!(b, Bytes32(expected));
+ }
+
+ #[test]
+ fn test_from_str_very_short() {
+ // Short hex string
+ let b = Bytes32::from_str("0x1").unwrap();
+ let expected = [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0x1,
+ ];
+ assert_eq!(b, Bytes32(expected));
+ }
+
+ #[test]
+ fn test_from_str_no_prefix() {
+ // Short hex string
+ let b = Bytes32::from_str("12345678").unwrap();
+ let expected = [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x12, 0x34, 0x56, 0x78,
+ ];
+ assert_eq!(b, Bytes32(expected));
+ }
+
+ #[test]
+ fn test_from_str_full() {
+ // Full-length hex string
+ let b =
+ Bytes32::from_str("0x0000000000000000000000000000000000000000000000000000000012345678")
+ .unwrap();
+ let expected = [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x12, 0x34, 0x56, 0x78,
+ ];
+ assert_eq!(b, Bytes32(expected));
+ }
+
+ #[test]
+ fn test_from_str_invalid_non_hex() {
+ let s = "0x123g5678"; // Invalid character 'g'
+ assert!(Bytes32::from_str(s).is_err());
+ }
+
+ #[test]
+ fn test_from_str_too_big() {
+ let s =
+ "0123456789ABCDEF0123456789ABCDEF01234567890123456789ABCDEF01234567890123456789ABCDEF0"; // 65 characters
+ assert!(Bytes32::from_str(s).is_err());
+ }
+}
diff --git a/arbitrator/bench/Cargo.toml b/arbitrator/bench/Cargo.toml
index 3ab5b99b08..74b948aca8 100644
--- a/arbitrator/bench/Cargo.toml
+++ b/arbitrator/bench/Cargo.toml
@@ -3,10 +3,6 @@ name = "bench"
version = "0.1.0"
edition = "2021"
-[lib]
-name = "bench"
-path = "src/lib.rs"
-
[[bin]]
name = "benchbin"
path = "src/bin.rs"
@@ -20,7 +16,6 @@ clap = { version = "4.4.8", features = ["derive"] }
gperftools = { version = "0.2.0", optional = true }
serde = { version = "1.0.130", features = ["derive", "rc"] }
serde_json = "1.0.67"
-serde_with = { version = "3.8.1", features = ["base64"] }
[features]
counters = []
diff --git a/arbitrator/bench/src/bin.rs b/arbitrator/bench/src/bin.rs
index f7e69f5373..60a7036e2b 100644
--- a/arbitrator/bench/src/bin.rs
+++ b/arbitrator/bench/src/bin.rs
@@ -1,6 +1,5 @@
use std::{path::PathBuf, time::Duration};
-use bench::prepare::*;
use clap::Parser;
use eyre::bail;
@@ -10,11 +9,12 @@ use gperftools::profiler::PROFILER;
#[cfg(feature = "heapprof")]
use gperftools::heap_profiler::HEAP_PROFILER;
-use prover::machine::MachineStatus;
-
#[cfg(feature = "counters")]
use prover::{machine, memory, merkle};
+use prover::machine::MachineStatus;
+use prover::prepare::prepare_machine;
+
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
diff --git a/arbitrator/bench/src/lib.rs b/arbitrator/bench/src/lib.rs
deleted file mode 100644
index 5f7c024094..0000000000
--- a/arbitrator/bench/src/lib.rs
+++ /dev/null
@@ -1,2 +0,0 @@
-pub mod parse_input;
-pub mod prepare;
diff --git a/arbitrator/bench/src/parse_input.rs b/arbitrator/bench/src/parse_input.rs
deleted file mode 100644
index decc67372a..0000000000
--- a/arbitrator/bench/src/parse_input.rs
+++ /dev/null
@@ -1,76 +0,0 @@
-use arbutil::Bytes32;
-use serde::{Deserialize, Serialize};
-use serde_json;
-use serde_with::base64::Base64;
-use serde_with::As;
-use serde_with::DisplayFromStr;
-use std::{
- collections::HashMap,
- io::{self, BufRead},
-};
-
-mod prefixed_hex {
- use serde::{self, Deserialize, Deserializer, Serializer};
-
- pub fn serialize(bytes: &Vec, serializer: S) -> Result
- where
- S: Serializer,
- {
- serializer.serialize_str(&format!("0x{}", hex::encode(bytes)))
- }
-
- pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error>
- where
- D: Deserializer<'de>,
- {
- let s = String::deserialize(deserializer)?;
- if let Some(s) = s.strip_prefix("0x") {
- hex::decode(s).map_err(serde::de::Error::custom)
- } else {
- Err(serde::de::Error::custom("missing 0x prefix"))
- }
- }
-}
-
-#[derive(Debug, Clone, Deserialize, Serialize)]
-pub struct PreimageMap(HashMap>);
-
-#[derive(Debug, Clone, Deserialize, Serialize)]
-#[serde(rename_all = "PascalCase")]
-pub struct BatchInfo {
- pub number: u64,
- #[serde(with = "As::")]
- pub data_b64: Vec,
-}
-
-#[derive(Debug, Deserialize, Serialize)]
-#[serde(rename_all = "PascalCase")]
-pub struct StartState {
- #[serde(with = "prefixed_hex")]
- pub block_hash: Vec,
- #[serde(with = "prefixed_hex")]
- pub send_root: Vec,
- pub batch: u64,
- pub pos_in_batch: u64,
-}
-
-#[derive(Debug, Deserialize, Serialize)]
-#[serde(rename_all = "PascalCase")]
-pub struct FileData {
- pub id: u64,
- pub has_delayed_msg: bool,
- pub delayed_msg_nr: u64,
- #[serde(with = "As::>>")]
- pub preimages_b64: HashMap>>,
- pub batch_info: Vec,
- #[serde(with = "As::")]
- pub delayed_msg_b64: Vec,
- pub start_state: StartState,
-}
-
-impl FileData {
- pub fn from_reader(mut reader: R) -> io::Result {
- let data = serde_json::from_reader(&mut reader)?;
- Ok(data)
- }
-}
diff --git a/arbitrator/jit/src/machine.rs b/arbitrator/jit/src/machine.rs
index 2a3c5c5616..02523f740a 100644
--- a/arbitrator/jit/src/machine.rs
+++ b/arbitrator/jit/src/machine.rs
@@ -129,7 +129,9 @@ pub fn create(opts: &Opts, env: WasmEnv) -> (Instance, FunctionEnv, Sto
"send_response" => func!(program::send_response),
"create_stylus_config" => func!(program::create_stylus_config),
"create_evm_data" => func!(program::create_evm_data),
+ "create_evm_data_v2" => func!(program::create_evm_data_v2),
"activate" => func!(program::activate),
+ "activate_v2" => func!(program::activate_v2),
},
};
diff --git a/arbitrator/jit/src/program.rs b/arbitrator/jit/src/program.rs
index c608a3cf85..084afe96bc 100644
--- a/arbitrator/jit/src/program.rs
+++ b/arbitrator/jit/src/program.rs
@@ -16,8 +16,45 @@ use prover::{
programs::{config::PricingParams, prelude::*},
};
-/// activates a user program
+const DEFAULT_STYLUS_ARBOS_VERSION: u64 = 31;
+
pub fn activate(
+ env: WasmEnvMut,
+ wasm_ptr: GuestPtr,
+ wasm_size: u32,
+ pages_ptr: GuestPtr,
+ asm_estimate_ptr: GuestPtr,
+ init_cost_ptr: GuestPtr,
+ cached_init_cost_ptr: GuestPtr,
+ stylus_version: u16,
+ debug: u32,
+ codehash: GuestPtr,
+ module_hash_ptr: GuestPtr,
+ gas_ptr: GuestPtr,
+ err_buf: GuestPtr,
+ err_buf_len: u32,
+) -> Result {
+ activate_v2(
+ env,
+ wasm_ptr,
+ wasm_size,
+ pages_ptr,
+ asm_estimate_ptr,
+ init_cost_ptr,
+ cached_init_cost_ptr,
+ stylus_version,
+ DEFAULT_STYLUS_ARBOS_VERSION,
+ debug,
+ codehash,
+ module_hash_ptr,
+ gas_ptr,
+ err_buf,
+ err_buf_len,
+ )
+}
+
+/// activates a user program
+pub fn activate_v2(
mut env: WasmEnvMut,
wasm_ptr: GuestPtr,
wasm_size: u32,
@@ -25,7 +62,8 @@ pub fn activate(
asm_estimate_ptr: GuestPtr,
init_cost_ptr: GuestPtr,
cached_init_cost_ptr: GuestPtr,
- version: u16,
+ stylus_version: u16,
+ arbos_version_for_gas: u64,
debug: u32,
codehash: GuestPtr,
module_hash_ptr: GuestPtr,
@@ -40,7 +78,15 @@ pub fn activate(
let page_limit = mem.read_u16(pages_ptr);
let gas_left = &mut mem.read_u64(gas_ptr);
- match Module::activate(&wasm, codehash, version, page_limit, debug, gas_left) {
+ match Module::activate(
+ &wasm,
+ codehash,
+ stylus_version,
+ arbos_version_for_gas,
+ page_limit,
+ debug,
+ gas_left,
+ ) {
Ok((module, data)) => {
mem.write_u64(gas_ptr, *gas_left);
mem.write_u16(pages_ptr, data.footprint);
@@ -222,9 +268,47 @@ pub fn create_stylus_config(
Ok(res as u64)
}
-/// Creates an `EvmData` handler from its component parts.
pub fn create_evm_data(
+ env: WasmEnvMut,
+ block_basefee_ptr: GuestPtr,
+ chainid: u64,
+ block_coinbase_ptr: GuestPtr,
+ block_gas_limit: u64,
+ block_number: u64,
+ block_timestamp: u64,
+ contract_address_ptr: GuestPtr,
+ module_hash_ptr: GuestPtr,
+ msg_sender_ptr: GuestPtr,
+ msg_value_ptr: GuestPtr,
+ tx_gas_price_ptr: GuestPtr,
+ tx_origin_ptr: GuestPtr,
+ cached: u32,
+ reentrant: u32,
+) -> Result {
+ create_evm_data_v2(
+ env,
+ DEFAULT_STYLUS_ARBOS_VERSION,
+ block_basefee_ptr,
+ chainid,
+ block_coinbase_ptr,
+ block_gas_limit,
+ block_number,
+ block_timestamp,
+ contract_address_ptr,
+ module_hash_ptr,
+ msg_sender_ptr,
+ msg_value_ptr,
+ tx_gas_price_ptr,
+ tx_origin_ptr,
+ cached,
+ reentrant,
+ )
+}
+
+/// Creates an `EvmData` handler from its component parts.
+pub fn create_evm_data_v2(
mut env: WasmEnvMut,
+ arbos_version: u64,
block_basefee_ptr: GuestPtr,
chainid: u64,
block_coinbase_ptr: GuestPtr,
@@ -243,6 +327,7 @@ pub fn create_evm_data(
let (mut mem, _) = env.jit_env();
let evm_data = EvmData {
+ arbos_version,
block_basefee: mem.read_bytes32(block_basefee_ptr),
cached: cached != 0,
chainid,
diff --git a/arbitrator/jit/src/wavmio.rs b/arbitrator/jit/src/wavmio.rs
index 062d18d8e9..0ca666d3b2 100644
--- a/arbitrator/jit/src/wavmio.rs
+++ b/arbitrator/jit/src/wavmio.rs
@@ -8,8 +8,6 @@ use crate::{
};
use arbutil::{Color, PreimageType};
use caller_env::{GuestPtr, MemAccess};
-use sha2::Sha256;
-use sha3::{Digest, Keccak256};
use std::{
io,
io::{BufReader, BufWriter, ErrorKind},
@@ -170,19 +168,25 @@ pub fn resolve_preimage_impl(
error!("Missing requested preimage for hash {hash_hex} in {name}")
};
- // Check if preimage rehashes to the provided hash. Exclude blob preimages
- let calculated_hash: [u8; 32] = match preimage_type {
- PreimageType::Keccak256 => Keccak256::digest(preimage).into(),
- PreimageType::Sha2_256 => Sha256::digest(preimage).into(),
- PreimageType::EthVersionedHash => *hash,
- };
- if calculated_hash != *hash {
- error!(
- "Calculated hash {} of preimage {} does not match provided hash {}",
- hex::encode(calculated_hash),
- hex::encode(preimage),
- hex::encode(*hash)
- );
+ #[cfg(debug_assertions)]
+ {
+ use sha2::Sha256;
+ use sha3::{Digest, Keccak256};
+
+ // Check if preimage rehashes to the provided hash. Exclude blob preimages
+ let calculated_hash: [u8; 32] = match preimage_type {
+ PreimageType::Keccak256 => Keccak256::digest(preimage).into(),
+ PreimageType::Sha2_256 => Sha256::digest(preimage).into(),
+ PreimageType::EthVersionedHash => *hash,
+ };
+ if calculated_hash != *hash {
+ error!(
+ "Calculated hash {} of preimage {} does not match provided hash {}",
+ hex::encode(calculated_hash),
+ hex::encode(preimage),
+ hex::encode(*hash)
+ );
+ }
}
if offset % 32 != 0 {
diff --git a/arbitrator/prover/Cargo.toml b/arbitrator/prover/Cargo.toml
index 5475647765..da329b1cb5 100644
--- a/arbitrator/prover/Cargo.toml
+++ b/arbitrator/prover/Cargo.toml
@@ -19,10 +19,10 @@ num = "0.4"
rustc-demangle = "0.1.21"
serde = { version = "1.0.130", features = ["derive", "rc"] }
serde_json = "1.0.67"
+serde_with = { version = "3.8.1", features = ["base64"] }
sha3 = "0.9.1"
static_assertions = "1.1.0"
structopt = "0.3.23"
-serde_with = "1.12.1"
parking_lot = "0.12.1"
lazy_static.workspace = true
itertools = "0.10.5"
diff --git a/arbitrator/prover/src/binary.rs b/arbitrator/prover/src/binary.rs
index aa5537476c..2260f6bf48 100644
--- a/arbitrator/prover/src/binary.rs
+++ b/arbitrator/prover/src/binary.rs
@@ -9,7 +9,9 @@ use crate::{
},
value::{ArbValueType, FunctionType, IntegerValType, Value},
};
-use arbutil::{math::SaturatingSum, Bytes32, Color, DebugColor};
+use arbutil::{
+ evm::ARBOS_VERSION_STYLUS_CHARGING_FIXES, math::SaturatingSum, Bytes32, Color, DebugColor,
+};
use eyre::{bail, ensure, eyre, Result, WrapErr};
use fnv::{FnvHashMap as HashMap, FnvHashSet as HashSet};
use nom::{
@@ -641,6 +643,7 @@ impl<'a> WasmBinary<'a> {
/// Parses and instruments a user wasm
pub fn parse_user(
wasm: &'a [u8],
+ arbos_version_for_gas: u64,
page_limit: u16,
compile: &CompileConfig,
codehash: &Bytes32,
@@ -678,6 +681,10 @@ impl<'a> WasmBinary<'a> {
limit!(65536, code.expr.len(), "opcodes in func body");
}
+ if arbos_version_for_gas >= ARBOS_VERSION_STYLUS_CHARGING_FIXES {
+ limit!(513, bin.imports.len(), "imports")
+ }
+
let table_entries = bin.tables.iter().map(|x| x.initial).saturating_sum();
limit!(4096, table_entries, "table entries");
diff --git a/arbitrator/prover/src/lib.rs b/arbitrator/prover/src/lib.rs
index 0f537478eb..08473c2598 100644
--- a/arbitrator/prover/src/lib.rs
+++ b/arbitrator/prover/src/lib.rs
@@ -11,6 +11,8 @@ pub mod machine;
/// cbindgen:ignore
pub mod memory;
pub mod merkle;
+pub mod parse_input;
+pub mod prepare;
mod print;
pub mod programs;
mod reinterpret;
diff --git a/arbitrator/prover/src/machine.rs b/arbitrator/prover/src/machine.rs
index 358876bd25..4ece1f7bf2 100644
--- a/arbitrator/prover/src/machine.rs
+++ b/arbitrator/prover/src/machine.rs
@@ -371,13 +371,16 @@ impl Module {
for import in &bin.imports {
let module = import.module;
let have_ty = &bin.types[import.offset as usize];
- let (forward, import_name) = match import.name.strip_prefix(Module::FORWARDING_PREFIX) {
- Some(name) => (true, name),
- None => (false, import.name),
- };
+ // allow_hostapi is only set for system modules like the
+ // forwarder. We restrict stripping the prefix for user modules.
+ let (forward, import_name) =
+ if allow_hostapi && import.name.starts_with(Self::FORWARDING_PREFIX) {
+ (true, &import.name[Self::FORWARDING_PREFIX.len()..])
+ } else {
+ (false, import.name)
+ };
- let mut qualified_name = format!("{module}__{import_name}");
- qualified_name = qualified_name.replace(&['/', '.', '-'] as &[char], "_");
+ let qualified_name = format!("{module}__{import_name}");
let func = if let Some(import) = available_imports.get(&qualified_name) {
let call = match forward {
diff --git a/arbitrator/prover/src/main.rs b/arbitrator/prover/src/main.rs
index dba32e0e72..a889cc60f3 100644
--- a/arbitrator/prover/src/main.rs
+++ b/arbitrator/prover/src/main.rs
@@ -8,6 +8,7 @@ use eyre::{eyre, Context, Result};
use fnv::{FnvHashMap as HashMap, FnvHashSet as HashSet};
use prover::{
machine::{GlobalState, InboxIdentifier, Machine, MachineStatus, PreimageResolver, ProofInfo},
+ prepare::prepare_machine,
utils::{file_bytes, hash_preimage, CBytes},
wavm::Opcode,
};
@@ -86,6 +87,10 @@ struct Opts {
skip_until_host_io: bool,
#[structopt(long)]
max_steps: Option,
+ // JSON inputs supercede any of the command-line inputs which could
+ // be specified in the JSON file.
+ #[structopt(long)]
+ json_inputs: Option,
}
fn file_with_stub_header(path: &Path, headerlength: usize) -> Result> {
@@ -135,83 +140,8 @@ fn main() -> Result<()> {
}
}
}
- let mut inbox_contents = HashMap::default();
- let mut inbox_position = opts.inbox_position;
- let mut delayed_position = opts.delayed_inbox_position;
- let inbox_header_len;
- let delayed_header_len;
- if opts.inbox_add_stub_headers {
- inbox_header_len = INBOX_HEADER_LEN;
- delayed_header_len = DELAYED_HEADER_LEN + 1;
- } else {
- inbox_header_len = 0;
- delayed_header_len = 0;
- }
-
- for path in opts.inbox {
- inbox_contents.insert(
- (InboxIdentifier::Sequencer, inbox_position),
- file_with_stub_header(&path, inbox_header_len)?,
- );
- println!("read file {:?} to seq. inbox {}", &path, inbox_position);
- inbox_position += 1;
- }
- for path in opts.delayed_inbox {
- inbox_contents.insert(
- (InboxIdentifier::Delayed, delayed_position),
- file_with_stub_header(&path, delayed_header_len)?,
- );
- delayed_position += 1;
- }
- let mut preimages: HashMap> = HashMap::default();
- if let Some(path) = opts.preimages {
- let mut file = BufReader::new(File::open(path)?);
- loop {
- let mut ty_buf = [0u8; 1];
- match file.read_exact(&mut ty_buf) {
- Ok(()) => {}
- Err(e) if e.kind() == ErrorKind::UnexpectedEof => break,
- Err(e) => return Err(e.into()),
- }
- let preimage_ty: PreimageType = ty_buf[0].try_into()?;
-
- let mut size_buf = [0u8; 8];
- file.read_exact(&mut size_buf)?;
- let size = u64::from_le_bytes(size_buf) as usize;
- let mut buf = vec![0u8; size];
- file.read_exact(&mut buf)?;
-
- let hash = hash_preimage(&buf, preimage_ty)?;
- preimages
- .entry(preimage_ty)
- .or_default()
- .insert(hash.into(), buf.as_slice().into());
- }
- }
- let preimage_resolver =
- Arc::new(move |_, ty, hash| preimages.get(&ty).and_then(|m| m.get(&hash)).cloned())
- as PreimageResolver;
-
- let last_block_hash = decode_hex_arg(&opts.last_block_hash, "--last-block-hash")?;
- let last_send_root = decode_hex_arg(&opts.last_send_root, "--last-send-root")?;
-
- let global_state = GlobalState {
- u64_vals: [opts.inbox_position, opts.position_within_message],
- bytes32_vals: [last_block_hash, last_send_root],
- };
-
- let mut mach = Machine::from_paths(
- &opts.libraries,
- &opts.binary,
- true,
- opts.allow_hostapi,
- opts.debug_funcs,
- true,
- global_state,
- inbox_contents,
- preimage_resolver,
- )?;
+ let mut mach = initialize_machine(&opts)?;
for path in &opts.stylus_modules {
let err = || eyre!("failed to read module at {}", path.to_string_lossy().red());
@@ -414,6 +344,13 @@ fn main() -> Result<()> {
});
}
+ println!(
+ "End GlobalState:\n BlockHash: {:?}\n SendRoot: {:?}\n Batch: {}\n PosInBatch: {}",
+ mach.get_global_state().bytes32_vals[0],
+ mach.get_global_state().bytes32_vals[1],
+ mach.get_global_state().u64_vals[0],
+ mach.get_global_state().u64_vals[1]
+ );
println!("End machine status: {:?}", mach.get_status());
println!("End machine hash: {}", mach.hash());
println!("End machine stack: {:?}", mach.get_data_stack());
@@ -462,7 +399,6 @@ fn main() -> Result<()> {
}
}
}
-
let opts_binary = opts.binary;
let opts_libraries = opts.libraries;
let format_pc = |module_num: usize, func_num: usize| -> (String, String) {
@@ -543,3 +479,87 @@ fn main() -> Result<()> {
}
Ok(())
}
+
+fn initialize_machine(opts: &Opts) -> eyre::Result {
+ if let Some(json_inputs) = opts.json_inputs.clone() {
+ prepare_machine(json_inputs, opts.binary.clone())
+ } else {
+ let mut inbox_contents = HashMap::default();
+ let mut inbox_position = opts.inbox_position;
+ let mut delayed_position = opts.delayed_inbox_position;
+ let inbox_header_len;
+ let delayed_header_len;
+ if opts.inbox_add_stub_headers {
+ inbox_header_len = INBOX_HEADER_LEN;
+ delayed_header_len = DELAYED_HEADER_LEN + 1;
+ } else {
+ inbox_header_len = 0;
+ delayed_header_len = 0;
+ }
+
+ for path in opts.inbox.clone() {
+ inbox_contents.insert(
+ (InboxIdentifier::Sequencer, inbox_position),
+ file_with_stub_header(&path, inbox_header_len)?,
+ );
+ println!("read file {:?} to seq. inbox {}", &path, inbox_position);
+ inbox_position += 1;
+ }
+ for path in opts.delayed_inbox.clone() {
+ inbox_contents.insert(
+ (InboxIdentifier::Delayed, delayed_position),
+ file_with_stub_header(&path, delayed_header_len)?,
+ );
+ delayed_position += 1;
+ }
+
+ let mut preimages: HashMap> = HashMap::default();
+ if let Some(path) = opts.preimages.clone() {
+ let mut file = BufReader::new(File::open(path)?);
+ loop {
+ let mut ty_buf = [0u8; 1];
+ match file.read_exact(&mut ty_buf) {
+ Ok(()) => {}
+ Err(e) if e.kind() == ErrorKind::UnexpectedEof => break,
+ Err(e) => return Err(e.into()),
+ }
+ let preimage_ty: PreimageType = ty_buf[0].try_into()?;
+
+ let mut size_buf = [0u8; 8];
+ file.read_exact(&mut size_buf)?;
+ let size = u64::from_le_bytes(size_buf) as usize;
+ let mut buf = vec![0u8; size];
+ file.read_exact(&mut buf)?;
+
+ let hash = hash_preimage(&buf, preimage_ty)?;
+ preimages
+ .entry(preimage_ty)
+ .or_default()
+ .insert(hash.into(), buf.as_slice().into());
+ }
+ }
+ let preimage_resolver =
+ Arc::new(move |_, ty, hash| preimages.get(&ty).and_then(|m| m.get(&hash)).cloned())
+ as PreimageResolver;
+
+ let last_block_hash = decode_hex_arg(&opts.last_block_hash, "--last-block-hash")?;
+ let last_send_root = decode_hex_arg(&opts.last_send_root, "--last-send-root")?;
+
+ let global_state = GlobalState {
+ u64_vals: [opts.inbox_position, opts.position_within_message],
+ bytes32_vals: [last_block_hash, last_send_root],
+ };
+
+ Machine::from_paths(
+ &opts.libraries,
+ &opts.binary,
+ true,
+ opts.allow_hostapi,
+ opts.debug_funcs,
+ true,
+ global_state,
+ inbox_contents,
+ preimage_resolver,
+ )
+ }
+}
diff --git a/arbitrator/prover/src/parse_input.rs b/arbitrator/prover/src/parse_input.rs
new file mode 100644
index 0000000000..fa7adb4c41
--- /dev/null
+++ b/arbitrator/prover/src/parse_input.rs
@@ -0,0 +1,112 @@
+use arbutil::Bytes32;
+use serde::Deserialize;
+use serde_json;
+use serde_with::base64::Base64;
+use serde_with::As;
+use serde_with::DisplayFromStr;
+use std::{
+ collections::HashMap,
+ io::{self, BufRead},
+};
+
+/// prefixed_hex deserializes hex strings which are prefixed with `0x`
+///
+/// The default hex deserializer does not support prefixed hex strings.
+///
+/// It is an error to use this deserializer on a string that does not
+/// begin with `0x`.
+mod prefixed_hex {
+ use serde::{self, Deserialize, Deserializer};
+
+ pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let s = String::deserialize(deserializer)?;
+ if let Some(s) = s.strip_prefix("0x") {
+ hex::decode(s).map_err(serde::de::Error::custom)
+ } else {
+ Err(serde::de::Error::custom("missing 0x prefix"))
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct UserWasm(Vec);
+
+/// UserWasm is a wrapper around Vec
+///
+/// It is useful for decompressing a brotli-compressed wasm module.
+///
+/// Note: The wrapped Vec is already Base64 decoded before
+/// from(Vec) is called by serde.
+impl UserWasm {
+ /// as_vec returns the decompressed wasm module as a Vec
+ pub fn as_vec(&self) -> Vec {
+ self.0.clone()
+ }
+}
+
+impl AsRef<[u8]> for UserWasm {
+ fn as_ref(&self) -> &[u8] {
+ &self.0
+ }
+}
+
+/// The Vec is compressed using brotli, and must be decompressed before use.
+impl From> for UserWasm {
+ fn from(data: Vec) -> Self {
+ let decompressed = brotli::decompress(&data, brotli::Dictionary::Empty).unwrap();
+ Self(decompressed)
+ }
+}
+
+#[derive(Debug, Clone, Deserialize)]
+#[serde(rename_all = "PascalCase")]
+pub struct BatchInfo {
+ pub number: u64,
+ #[serde(with = "As::")]
+ pub data_b64: Vec,
+}
+
+#[derive(Debug, Deserialize)]
+#[serde(rename_all = "PascalCase")]
+pub struct StartState {
+ #[serde(with = "prefixed_hex")]
+ pub block_hash: Vec,
+ #[serde(with = "prefixed_hex")]
+ pub send_root: Vec,
+ pub batch: u64,
+ pub pos_in_batch: u64,
+}
+
+/// FileData is the deserialized form of the input JSON file.
+///
+/// The go JSON library in json.go uses some custom serialization and
+/// compression logic that needs to be reversed when deserializing the
+/// JSON in rust.
+///
+/// Note: It is important to change this file whenever the go JSON
+/// serialization changes.
+#[derive(Debug, Deserialize)]
+#[serde(rename_all = "PascalCase")]
+pub struct FileData {
+ pub id: u64,
+ pub has_delayed_msg: bool,
+ pub delayed_msg_nr: u64,
+ #[serde(with = "As::>>")]
+ pub preimages_b64: HashMap>>,
+ pub batch_info: Vec,
+ #[serde(with = "As::")]
+ pub delayed_msg_b64: Vec,
+ pub start_state: StartState,
+ #[serde(with = "As::>>")]
+ pub user_wasms: HashMap>,
+}
+
+impl FileData {
+ pub fn from_reader(mut reader: R) -> io::Result {
+ let data = serde_json::from_reader(&mut reader)?;
+ Ok(data)
+ }
+}
diff --git a/arbitrator/bench/src/prepare.rs b/arbitrator/prover/src/prepare.rs
similarity index 85%
rename from arbitrator/bench/src/prepare.rs
rename to arbitrator/prover/src/prepare.rs
index 741a7350ac..a485267f39 100644
--- a/arbitrator/bench/src/prepare.rs
+++ b/arbitrator/prover/src/prepare.rs
@@ -1,13 +1,13 @@
use arbutil::{Bytes32, PreimageType};
-use prover::machine::{argument_data_to_inbox, GlobalState, Machine};
-use prover::utils::CBytes;
use std::collections::HashMap;
use std::fs::File;
use std::io::BufReader;
use std::path::{Path, PathBuf};
use std::sync::Arc;
+use crate::machine::{argument_data_to_inbox, GlobalState, Machine};
use crate::parse_input::*;
+use crate::utils::CBytes;
pub fn prepare_machine(preimages: PathBuf, machines: PathBuf) -> eyre::Result {
let file = File::open(preimages)?;
@@ -40,6 +40,15 @@ pub fn prepare_machine(preimages: PathBuf, machines: PathBuf) -> eyre::Result Store {
- let mut compiler: Box = match self.debug.cranelift {
+ pub fn engine(&self, target: Target) -> Engine {
+ use wasmer::sys::EngineBuilder;
+
+ let mut wasmer_config: Box = match self.debug.cranelift {
true => {
- let mut compiler = Cranelift::new();
- compiler.opt_level(CraneliftOptLevel::Speed);
- Box::new(compiler)
+ let mut wasmer_config = Cranelift::new();
+ wasmer_config.opt_level(CraneliftOptLevel::Speed);
+ Box::new(wasmer_config)
}
false => Box::new(Singlepass::new()),
};
- compiler.canonicalize_nans(true);
- compiler.enable_verifier();
+ wasmer_config.canonicalize_nans(true);
+ wasmer_config.enable_verifier();
let start = MiddlewareWrapper::new(StartMover::new(self.debug.debug_info));
let meter = MiddlewareWrapper::new(Meter::new(&self.pricing));
@@ -200,22 +202,24 @@ impl CompileConfig {
// add the instrumentation in the order of application
// note: this must be consistent with the prover
- compiler.push_middleware(Arc::new(start));
- compiler.push_middleware(Arc::new(meter));
- compiler.push_middleware(Arc::new(dygas));
- compiler.push_middleware(Arc::new(depth));
- compiler.push_middleware(Arc::new(bound));
+ wasmer_config.push_middleware(Arc::new(start));
+ wasmer_config.push_middleware(Arc::new(meter));
+ wasmer_config.push_middleware(Arc::new(dygas));
+ wasmer_config.push_middleware(Arc::new(depth));
+ wasmer_config.push_middleware(Arc::new(bound));
if self.debug.count_ops {
let counter = Counter::new();
- compiler.push_middleware(Arc::new(MiddlewareWrapper::new(counter)));
+ wasmer_config.push_middleware(Arc::new(MiddlewareWrapper::new(counter)));
}
- Store::new(compiler)
+ EngineBuilder::new(wasmer_config)
+ .set_target(Some(target))
+ .into()
}
#[cfg(feature = "native")]
- pub fn engine(&self) -> Engine {
- self.store().engine().clone()
+ pub fn store(&self, target: Target) -> Store {
+ Store::new(self.engine(target))
}
}
diff --git a/arbitrator/prover/src/programs/mod.rs b/arbitrator/prover/src/programs/mod.rs
index a5df2e31a8..a35308e7ff 100644
--- a/arbitrator/prover/src/programs/mod.rs
+++ b/arbitrator/prover/src/programs/mod.rs
@@ -8,7 +8,7 @@ use crate::{
programs::config::CompileConfig,
value::{FunctionType as ArbFunctionType, Value},
};
-use arbutil::{math::SaturatingSum, Bytes32, Color};
+use arbutil::{evm::ARBOS_VERSION_STYLUS_CHARGING_FIXES, math::SaturatingSum, Bytes32, Color};
use eyre::{bail, eyre, Report, Result, WrapErr};
use fnv::FnvHashMap as HashMap;
use std::fmt::Debug;
@@ -418,58 +418,64 @@ impl Module {
pub fn activate(
wasm: &[u8],
codehash: &Bytes32,
- version: u16,
+ stylus_version: u16,
+ arbos_version_for_gas: u64, // must only be used for activation gas
page_limit: u16,
debug: bool,
gas: &mut u64,
) -> Result<(Self, StylusData)> {
- // converts a number of microseconds to gas
- // TODO: collapse to a single value after finalizing factors
- let us_to_gas = |us: u64| {
- let fudge = 2;
- let sync_rate = 1_000_000 / 2;
- let speed = 7_000_000;
- us.saturating_mul(fudge * speed) / sync_rate
- };
-
- macro_rules! pay {
- ($us:expr) => {
- let amount = us_to_gas($us);
- if *gas < amount {
- *gas = 0;
- bail!("out of gas");
- }
- *gas -= amount;
+ let compile = CompileConfig::version(stylus_version, debug);
+ let (bin, stylus_data) =
+ WasmBinary::parse_user(wasm, arbos_version_for_gas, page_limit, &compile, codehash)
+ .wrap_err("failed to parse wasm")?;
+
+ if arbos_version_for_gas > 0 {
+ // converts a number of microseconds to gas
+ // TODO: collapse to a single value after finalizing factors
+ let us_to_gas = |us: u64| {
+ let fudge = 2;
+ let sync_rate = 1_000_000 / 2;
+ let speed = 7_000_000;
+ us.saturating_mul(fudge * speed) / sync_rate
};
- }
-
- // pay for wasm
- let wasm_len = wasm.len() as u64;
- pay!(wasm_len.saturating_mul(31_733 / 100_000));
-
- let compile = CompileConfig::version(version, debug);
- let (bin, stylus_data) = WasmBinary::parse_user(wasm, page_limit, &compile, codehash)
- .wrap_err("failed to parse wasm")?;
- // pay for funcs
- let funcs = bin.functions.len() as u64;
- pay!(funcs.saturating_mul(17_263) / 100_000);
-
- // pay for data
- let data = bin.datas.iter().map(|x| x.data.len()).saturating_sum() as u64;
- pay!(data.saturating_mul(17_376) / 100_000);
-
- // pay for elements
- let elems = bin.elements.iter().map(|x| x.range.len()).saturating_sum() as u64;
- pay!(elems.saturating_mul(17_376) / 100_000);
-
- // pay for memory
- let mem = bin.memories.first().map(|x| x.initial).unwrap_or_default();
- pay!(mem.saturating_mul(2217));
-
- // pay for code
- let code = bin.codes.iter().map(|x| x.expr.len()).saturating_sum() as u64;
- pay!(code.saturating_mul(535) / 1_000);
+ macro_rules! pay {
+ ($us:expr) => {
+ let amount = us_to_gas($us);
+ if *gas < amount {
+ *gas = 0;
+ bail!("out of gas");
+ }
+ *gas -= amount;
+ };
+ }
+
+ // pay for wasm
+ if arbos_version_for_gas >= ARBOS_VERSION_STYLUS_CHARGING_FIXES {
+ let wasm_len = wasm.len() as u64;
+ pay!(wasm_len.saturating_mul(31_733) / 100_000);
+ }
+
+ // pay for funcs
+ let funcs = bin.functions.len() as u64;
+ pay!(funcs.saturating_mul(17_263) / 100_000);
+
+ // pay for data
+ let data = bin.datas.iter().map(|x| x.data.len()).saturating_sum() as u64;
+ pay!(data.saturating_mul(17_376) / 100_000);
+
+ // pay for elements
+ let elems = bin.elements.iter().map(|x| x.range.len()).saturating_sum() as u64;
+ pay!(elems.saturating_mul(17_376) / 100_000);
+
+ // pay for memory
+ let mem = bin.memories.first().map(|x| x.initial).unwrap_or_default();
+ pay!(mem.saturating_mul(2217));
+
+ // pay for code
+ let code = bin.codes.iter().map(|x| x.expr.len()).saturating_sum() as u64;
+ pay!(code.saturating_mul(535) / 1_000);
+ }
let module = Self::from_user_binary(&bin, compile.debug.debug_funcs, Some(stylus_data))
.wrap_err("failed to build user module")?;
diff --git a/arbitrator/stylus/Cargo.toml b/arbitrator/stylus/Cargo.toml
index 4717bd631a..ea1d878ea0 100644
--- a/arbitrator/stylus/Cargo.toml
+++ b/arbitrator/stylus/Cargo.toml
@@ -21,11 +21,11 @@ thiserror = "1.0.33"
bincode = "1.3.3"
lazy_static.workspace = true
libc = "0.2.108"
-lru.workspace = true
eyre = "0.6.5"
rand = "0.8.5"
fnv = "1.0.7"
hex = "0.4.3"
+clru = "0.6.2"
[dev-dependencies]
num-bigint = "0.4.4"
diff --git a/arbitrator/stylus/src/cache.rs b/arbitrator/stylus/src/cache.rs
index 06739f2219..c1fdaaccee 100644
--- a/arbitrator/stylus/src/cache.rs
+++ b/arbitrator/stylus/src/cache.rs
@@ -2,16 +2,19 @@
// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE
use arbutil::Bytes32;
+use clru::{CLruCache, CLruCacheConfig, WeightScale};
use eyre::Result;
use lazy_static::lazy_static;
-use lru::LruCache;
use parking_lot::Mutex;
use prover::programs::config::CompileConfig;
+use std::hash::RandomState;
use std::{collections::HashMap, num::NonZeroUsize};
use wasmer::{Engine, Module, Store};
+use crate::target_cache::target_native;
+
lazy_static! {
- static ref INIT_CACHE: Mutex = Mutex::new(InitCache::new(256));
+ static ref INIT_CACHE: Mutex = Mutex::new(InitCache::new(256 * 1024 * 1024));
}
macro_rules! cache {
@@ -20,9 +23,16 @@ macro_rules! cache {
};
}
+pub struct LruCounters {
+ pub hits: u32,
+ pub misses: u32,
+ pub does_not_fit: u32,
+}
+
pub struct InitCache {
long_term: HashMap,
- lru: LruCache,
+ lru: CLruCache,
+ lru_counters: LruCounters,
}
#[derive(Clone, Copy, Hash, PartialEq, Eq)]
@@ -46,11 +56,16 @@ impl CacheKey {
struct CacheItem {
module: Module,
engine: Engine,
+ entry_size_estimate_bytes: usize,
}
impl CacheItem {
- fn new(module: Module, engine: Engine) -> Self {
- Self { module, engine }
+ fn new(module: Module, engine: Engine, entry_size_estimate_bytes: usize) -> Self {
+ Self {
+ module,
+ engine,
+ entry_size_estimate_bytes,
+ }
}
fn data(&self) -> (Module, Store) {
@@ -58,23 +73,66 @@ impl CacheItem {
}
}
+struct CustomWeightScale;
+impl WeightScale for CustomWeightScale {
+ fn weight(&self, _key: &CacheKey, val: &CacheItem) -> usize {
+ // clru defines that each entry consumes (weight + 1) of the cache capacity.
+ // We subtract 1 since we only want to use the weight as the size of the entry.
+ val.entry_size_estimate_bytes.saturating_sub(1)
+ }
+}
+
+#[repr(C)]
+pub struct LruCacheMetrics {
+ pub size_bytes: u64,
+ pub count: u32,
+ pub hits: u32,
+ pub misses: u32,
+ pub does_not_fit: u32,
+}
+
+pub fn deserialize_module(
+ module: &[u8],
+ version: u16,
+ debug: bool,
+) -> Result<(Module, Engine, usize)> {
+ let engine = CompileConfig::version(version, debug).engine(target_native());
+ let module = unsafe { Module::deserialize_unchecked(&engine, module)? };
+
+ let asm_size_estimate_bytes = module.serialize()?.len();
+ // add 128 bytes for the cache item overhead
+ let entry_size_estimate_bytes = asm_size_estimate_bytes + 128;
+
+ Ok((module, engine, entry_size_estimate_bytes))
+}
+
impl InitCache {
// current implementation only has one tag that stores to the long_term
// future implementations might have more, but 0 is a reserved tag
// that will never modify long_term state
const ARBOS_TAG: u32 = 1;
- fn new(size: usize) -> Self {
+ const DOES_NOT_FIT_MSG: &'static str = "Failed to insert into LRU cache, item too large";
+
+ fn new(size_bytes: usize) -> Self {
Self {
long_term: HashMap::new(),
- lru: LruCache::new(NonZeroUsize::new(size).unwrap()),
+ lru: CLruCache::with_config(
+ CLruCacheConfig::new(NonZeroUsize::new(size_bytes).unwrap())
+ .with_scale(CustomWeightScale),
+ ),
+ lru_counters: LruCounters {
+ hits: 0,
+ misses: 0,
+ does_not_fit: 0,
+ },
}
}
- pub fn set_lru_size(size: u32) {
+ pub fn set_lru_capacity(capacity_bytes: u64) {
cache!()
.lru
- .resize(NonZeroUsize::new(size.try_into().unwrap()).unwrap())
+ .resize(NonZeroUsize::new(capacity_bytes.try_into().unwrap()).unwrap())
}
/// Retrieves a cached value, updating items as necessary.
@@ -89,8 +147,11 @@ impl InitCache {
// See if the item is in the LRU cache, promoting if so
if let Some(item) = cache.lru.get(&key) {
- return Some(item.data());
+ let data = item.data();
+ cache.lru_counters.hits += 1;
+ return Some(data);
}
+ cache.lru_counters.misses += 1;
None
}
@@ -114,20 +175,24 @@ impl InitCache {
if long_term_tag == Self::ARBOS_TAG {
cache.long_term.insert(key, item.clone());
} else {
- cache.lru.promote(&key)
+ // only calls get to move the key to the head of the LRU list
+ cache.lru.get(&key);
}
return Ok(item.data());
}
drop(cache);
- let engine = CompileConfig::version(version, debug).engine();
- let module = unsafe { Module::deserialize_unchecked(&engine, module)? };
+ let (module, engine, entry_size_estimate_bytes) =
+ deserialize_module(module, version, debug)?;
- let item = CacheItem::new(module, engine);
+ let item = CacheItem::new(module, engine, entry_size_estimate_bytes);
let data = item.data();
let mut cache = cache!();
if long_term_tag != Self::ARBOS_TAG {
- cache.lru.put(key, item);
+ if cache.lru.put_with_weight(key, item).is_err() {
+ cache.lru_counters.does_not_fit += 1;
+ eprintln!("{}", Self::DOES_NOT_FIT_MSG);
+ };
} else {
cache.long_term.insert(key, item);
}
@@ -142,7 +207,9 @@ impl InitCache {
let key = CacheKey::new(module_hash, version, debug);
let mut cache = cache!();
if let Some(item) = cache.long_term.remove(&key) {
- cache.lru.put(key, item);
+ if cache.lru.put_with_weight(key, item).is_err() {
+ eprintln!("{}", Self::DOES_NOT_FIT_MSG);
+ }
}
}
@@ -153,7 +220,48 @@ impl InitCache {
let mut cache = cache!();
let cache = &mut *cache;
for (key, item) in cache.long_term.drain() {
- cache.lru.put(key, item); // not all will fit, just a heuristic
+ // not all will fit, just a heuristic
+ if cache.lru.put_with_weight(key, item).is_err() {
+ eprintln!("{}", Self::DOES_NOT_FIT_MSG);
+ }
}
}
+
+ pub fn get_lru_metrics() -> LruCacheMetrics {
+ let mut cache = cache!();
+
+ let count = cache.lru.len();
+ let metrics = LruCacheMetrics {
+ // add 1 to each entry to account that we subtracted 1 in the weight calculation
+ size_bytes: (cache.lru.weight() + count).try_into().unwrap(),
+
+ count: count.try_into().unwrap(),
+
+ hits: cache.lru_counters.hits,
+ misses: cache.lru_counters.misses,
+ does_not_fit: cache.lru_counters.does_not_fit,
+ };
+
+ // Empty counters.
+ // go side, which is the only consumer of this function besides tests,
+ // will read those counters and increment its own prometheus counters with them.
+ cache.lru_counters = LruCounters {
+ hits: 0,
+ misses: 0,
+ does_not_fit: 0,
+ };
+
+ metrics
+ }
+
+ // only used for testing
+ pub fn clear_lru_cache() {
+ let mut cache = cache!();
+ cache.lru.clear();
+ cache.lru_counters = LruCounters {
+ hits: 0,
+ misses: 0,
+ does_not_fit: 0,
+ };
+ }
}
diff --git a/arbitrator/stylus/src/lib.rs b/arbitrator/stylus/src/lib.rs
index 3c53359f8b..abea428167 100644
--- a/arbitrator/stylus/src/lib.rs
+++ b/arbitrator/stylus/src/lib.rs
@@ -11,13 +11,14 @@ use arbutil::{
format::DebugBytes,
Bytes32,
};
-use cache::InitCache;
+use cache::{deserialize_module, InitCache, LruCacheMetrics};
use evm_api::NativeRequestHandler;
use eyre::ErrReport;
use native::NativeInstance;
use prover::programs::{prelude::*, StylusData};
use run::RunProgram;
use std::{marker::PhantomData, mem, ptr};
+use target_cache::{target_cache_get, target_cache_set};
pub use brotli;
pub use prover;
@@ -29,6 +30,7 @@ pub mod run;
mod cache;
mod evm_api;
+mod target_cache;
mod util;
#[cfg(test)]
@@ -122,9 +124,9 @@ impl RustBytes {
}
}
-/// Instruments and "activates" a user wasm.
+/// "activates" a user wasm.
///
-/// The `output` is either the serialized asm & module pair or an error string.
+/// The `output` is either the module or an error string.
/// Returns consensus info such as the module hash and footprint on success.
///
/// Note that this operation costs gas and is limited by the amount supplied via the `gas` pointer.
@@ -137,10 +139,10 @@ impl RustBytes {
pub unsafe extern "C" fn stylus_activate(
wasm: GoSliceData,
page_limit: u16,
- version: u16,
+ stylus_version: u16,
+ arbos_version_for_gas: u64,
debug: bool,
output: *mut RustBytes,
- asm_len: *mut usize,
codehash: *const Bytes32,
module_hash: *mut Bytes32,
stylus_data: *mut StylusData,
@@ -152,18 +154,105 @@ pub unsafe extern "C" fn stylus_activate(
let codehash = &*codehash;
let gas = &mut *gas;
- let (asm, module, info) =
- match native::activate(wasm, codehash, version, page_limit, debug, gas) {
- Ok(val) => val,
- Err(err) => return output.write_err(err),
- };
- *asm_len = asm.len();
+ let (module, info) = match native::activate(
+ wasm,
+ codehash,
+ stylus_version,
+ arbos_version_for_gas,
+ page_limit,
+ debug,
+ gas,
+ ) {
+ Ok(val) => val,
+ Err(err) => return output.write_err(err),
+ };
+
*module_hash = module.hash();
*stylus_data = info;
- let mut data = asm;
- data.extend(&*module.into_bytes());
- output.write(data);
+ output.write(module.into_bytes());
+ UserOutcomeKind::Success
+}
+
+/// "compiles" a user wasm.
+///
+/// The `output` is either the asm or an error string.
+/// Returns consensus info such as the module hash and footprint on success.
+///
+/// # Safety
+///
+/// `output` must not be null.
+#[no_mangle]
+pub unsafe extern "C" fn stylus_compile(
+ wasm: GoSliceData,
+ version: u16,
+ debug: bool,
+ name: GoSliceData,
+ output: *mut RustBytes,
+) -> UserOutcomeKind {
+ let wasm = wasm.slice();
+ let output = &mut *output;
+ let name = match String::from_utf8(name.slice().to_vec()) {
+ Ok(val) => val,
+ Err(err) => return output.write_err(err.into()),
+ };
+ let target = match target_cache_get(&name) {
+ Ok(val) => val,
+ Err(err) => return output.write_err(err),
+ };
+
+ let asm = match native::compile(wasm, version, debug, target) {
+ Ok(val) => val,
+ Err(err) => return output.write_err(err),
+ };
+
+ output.write(asm);
+ UserOutcomeKind::Success
+}
+
+#[no_mangle]
+/// # Safety
+///
+/// `output` must not be null.
+pub unsafe extern "C" fn wat_to_wasm(wat: GoSliceData, output: *mut RustBytes) -> UserOutcomeKind {
+ let output = &mut *output;
+ let wasm = match wasmer::wat2wasm(wat.slice()) {
+ Ok(val) => val,
+ Err(err) => return output.write_err(err.into()),
+ };
+ output.write(wasm.into_owned());
+ UserOutcomeKind::Success
+}
+
+/// sets target index to a string
+///
+/// String format is: Triple+CpuFeature+CpuFeature..
+///
+/// # Safety
+///
+/// `output` must not be null.
+#[no_mangle]
+pub unsafe extern "C" fn stylus_target_set(
+ name: GoSliceData,
+ description: GoSliceData,
+ output: *mut RustBytes,
+ native: bool,
+) -> UserOutcomeKind {
+ let output = &mut *output;
+ let name = match String::from_utf8(name.slice().to_vec()) {
+ Ok(val) => val,
+ Err(err) => return output.write_err(err.into()),
+ };
+
+ let desc_str = match String::from_utf8(description.slice().to_vec()) {
+ Ok(val) => val,
+ Err(err) => return output.write_err(err.into()),
+ };
+
+ if let Err(err) = target_cache_set(name, desc_str, native) {
+ return output.write_err(err);
+ };
+
UserOutcomeKind::Success
}
@@ -220,10 +309,10 @@ pub unsafe extern "C" fn stylus_call(
status
}
-/// resize lru
+/// set lru cache capacity
#[no_mangle]
-pub extern "C" fn stylus_cache_lru_resize(size: u32) {
- InitCache::set_lru_size(size);
+pub extern "C" fn stylus_set_cache_lru_capacity(capacity_bytes: u64) {
+ InitCache::set_lru_capacity(capacity_bytes);
}
/// Caches an activated user program.
@@ -274,3 +363,32 @@ pub unsafe extern "C" fn stylus_drop_vec(vec: RustBytes) {
mem::drop(vec.into_vec())
}
}
+
+/// Gets lru cache metrics.
+#[no_mangle]
+pub extern "C" fn stylus_get_lru_cache_metrics() -> LruCacheMetrics {
+ InitCache::get_lru_metrics()
+}
+
+/// Clears lru cache.
+/// Only used for testing purposes.
+#[no_mangle]
+pub extern "C" fn stylus_clear_lru_cache() {
+ InitCache::clear_lru_cache()
+}
+
+/// Gets lru entry size in bytes.
+/// Only used for testing purposes.
+#[no_mangle]
+pub extern "C" fn stylus_get_lru_entry_size_estimate_bytes(
+ module: GoSliceData,
+ version: u16,
+ debug: bool,
+) -> u64 {
+ match deserialize_module(module.slice(), version, debug) {
+ Err(error) => panic!("tried to get invalid asm!: {error}"),
+ Ok((_, _, lru_entry_size_estimate_bytes)) => {
+ lru_entry_size_estimate_bytes.try_into().unwrap()
+ }
+ }
+}
diff --git a/arbitrator/stylus/src/native.rs b/arbitrator/stylus/src/native.rs
index a7b996edf0..516c6602e7 100644
--- a/arbitrator/stylus/src/native.rs
+++ b/arbitrator/stylus/src/native.rs
@@ -4,7 +4,7 @@
use crate::{
cache::InitCache,
env::{MeterData, WasmEnv},
- host, util,
+ host,
};
use arbutil::{
evm::{
@@ -33,11 +33,13 @@ use std::{
ops::{Deref, DerefMut},
};
use wasmer::{
- imports, AsStoreMut, Function, FunctionEnv, Instance, Memory, Module, Pages, Store,
+ imports, AsStoreMut, Function, FunctionEnv, Instance, Memory, Module, Pages, Store, Target,
TypedFunction, Value, WasmTypeList,
};
use wasmer_vm::VMExtern;
+use crate::target_cache::target_native;
+
#[derive(Debug)]
pub struct NativeInstance> {
pub instance: Instance,
@@ -98,7 +100,7 @@ impl> NativeInstance {
evm_data: EvmData,
) -> Result {
let env = WasmEnv::new(compile, None, evm, evm_data);
- let store = env.compile.store();
+ let store = env.compile.store(target_native());
let module = unsafe { Module::deserialize_unchecked(&store, module)? };
Self::from_module(module, store, env)
}
@@ -137,9 +139,10 @@ impl> NativeInstance {
evm_data: EvmData,
compile: &CompileConfig,
config: StylusConfig,
+ target: Target,
) -> Result {
let env = WasmEnv::new(compile.clone(), Some(config), evm_api, evm_data);
- let store = env.compile.store();
+ let store = env.compile.store(target);
let wat_or_wasm = std::fs::read(path)?;
let module = Module::new(&store, wat_or_wasm)?;
Self::from_module(module, store, env)
@@ -347,8 +350,8 @@ impl> StartlessMachine for NativeInstance {
}
}
-pub fn module(wasm: &[u8], compile: CompileConfig) -> Result> {
- let mut store = compile.store();
+pub fn module(wasm: &[u8], compile: CompileConfig, target: Target) -> Result> {
+ let mut store = compile.store(target);
let module = Module::new(&store, wasm)?;
macro_rules! stub {
(u8 <- $($types:tt)+) => {
@@ -428,7 +431,6 @@ pub fn module(wasm: &[u8], compile: CompileConfig) -> Result> {
imports.define("console", "tee_f64", stub!(f64 <- |_: f64|));
imports.define("debug", "null_host", stub!(||));
}
- Instance::new(&mut store, &module, &imports)?;
let module = module.serialize()?;
Ok(module.to_vec())
@@ -437,18 +439,26 @@ pub fn module(wasm: &[u8], compile: CompileConfig) -> Result> {
pub fn activate(
wasm: &[u8],
codehash: &Bytes32,
- version: u16,
+ stylus_version: u16,
+ arbos_version_for_gas: u64,
page_limit: u16,
debug: bool,
gas: &mut u64,
-) -> Result<(Vec, ProverModule, StylusData)> {
- let compile = CompileConfig::version(version, debug);
- let (module, stylus_data) =
- ProverModule::activate(wasm, codehash, version, page_limit, debug, gas)?;
+) -> Result<(ProverModule, StylusData)> {
+ let (module, stylus_data) = ProverModule::activate(
+ wasm,
+ codehash,
+ stylus_version,
+ arbos_version_for_gas,
+ page_limit,
+ debug,
+ gas,
+ )?;
+
+ Ok((module, stylus_data))
+}
- let asm = match self::module(wasm, compile) {
- Ok(asm) => asm,
- Err(err) => util::panic_with_wasm(wasm, err),
- };
- Ok((asm, module, stylus_data))
+pub fn compile(wasm: &[u8], version: u16, debug: bool, target: Target) -> Result> {
+ let compile = CompileConfig::version(version, debug);
+ self::module(wasm, compile, target)
}
diff --git a/arbitrator/stylus/src/target_cache.rs b/arbitrator/stylus/src/target_cache.rs
new file mode 100644
index 0000000000..a1d63829d6
--- /dev/null
+++ b/arbitrator/stylus/src/target_cache.rs
@@ -0,0 +1,81 @@
+// Copyright 2022-2024, Offchain Labs, Inc.
+// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE
+
+use eyre::{eyre, OptionExt, Result};
+use lazy_static::lazy_static;
+use parking_lot::RwLock;
+use std::{collections::HashMap, str::FromStr};
+use wasmer_types::{CpuFeature, Target, Triple};
+
+lazy_static! {
+ static ref TARGET_CACHE: RwLock> = RwLock::new(HashMap::new());
+ static ref TARGET_NATIVE: RwLock = RwLock::new(Target::default());
+}
+
+fn target_from_string(input: String) -> Result {
+ if input.is_empty() {
+ return Ok(Target::default());
+ }
+ let mut parts = input.split('+');
+
+ let Some(triple_string) = parts.next() else {
+ return Err(eyre!("no architecture"));
+ };
+
+ let triple = match Triple::from_str(triple_string) {
+ Ok(val) => val,
+ Err(e) => return Err(eyre!(e)),
+ };
+
+ let mut features = CpuFeature::set();
+ for flag in parts {
+ features.insert(CpuFeature::from_str(flag)?);
+ }
+
+ Ok(Target::new(triple, features))
+}
+
+/// Populates `TARGET_CACHE` inserting target specified by `description` under `name` key.
+/// Additionally, if `native` is set it sets `TARGET_NATIVE` to the specified target.
+pub fn target_cache_set(name: String, description: String, native: bool) -> Result<()> {
+ let target = target_from_string(description)?;
+
+ if native {
+ if !target.is_native() {
+ return Err(eyre!("arch not native"));
+ }
+ let flags_not_supported = Target::default()
+ .cpu_features()
+ .complement()
+ .intersection(*target.cpu_features());
+ if !flags_not_supported.is_empty() {
+ let mut err_message = String::new();
+ err_message.push_str("cpu flags not supported on local cpu for: ");
+ for item in flags_not_supported.iter() {
+ err_message.push('+');
+ err_message.push_str(&item.to_string());
+ }
+ return Err(eyre!(err_message));
+ }
+ *TARGET_NATIVE.write() = target.clone();
+ }
+
+ TARGET_CACHE.write().insert(name, target);
+
+ Ok(())
+}
+
+pub fn target_native() -> Target {
+ TARGET_NATIVE.read().clone()
+}
+
+pub fn target_cache_get(name: &str) -> Result {
+ if name.is_empty() {
+ return Ok(TARGET_NATIVE.read().clone());
+ }
+ TARGET_CACHE
+ .read()
+ .get(name)
+ .cloned()
+ .ok_or_eyre("arch not set")
+}
diff --git a/arbitrator/stylus/src/test/api.rs b/arbitrator/stylus/src/test/api.rs
index 92d7317918..66d600a6f7 100644
--- a/arbitrator/stylus/src/test/api.rs
+++ b/arbitrator/stylus/src/test/api.rs
@@ -14,6 +14,7 @@ use eyre::Result;
use parking_lot::Mutex;
use prover::programs::{memory::MemoryModel, prelude::*};
use std::{collections::HashMap, sync::Arc};
+use wasmer::Target;
use super::TestInstance;
@@ -53,7 +54,7 @@ impl TestEvmApi {
pub fn deploy(&mut self, address: Bytes20, config: StylusConfig, name: &str) -> Result<()> {
let file = format!("tests/{name}/target/wasm32-unknown-unknown/release/{name}.wasm");
let wasm = std::fs::read(file)?;
- let module = native::module(&wasm, self.compile.clone())?;
+ let module = native::module(&wasm, self.compile.clone(), Target::default())?;
self.contracts.lock().insert(address, module);
self.configs.lock().insert(address, config);
Ok(())
@@ -67,7 +68,7 @@ impl TestEvmApi {
}
impl EvmApi for TestEvmApi {
- fn get_bytes32(&mut self, key: Bytes32) -> (Bytes32, u64) {
+ fn get_bytes32(&mut self, key: Bytes32, _evm_api_gas_to_use: u64) -> (Bytes32, u64) {
let storage = &mut self.storage.lock();
let storage = storage.get_mut(&self.program).unwrap();
let value = storage.get(&key).cloned().unwrap_or_default();
diff --git a/arbitrator/stylus/src/test/misc.rs b/arbitrator/stylus/src/test/misc.rs
index ae44a885f0..92c4394ae3 100644
--- a/arbitrator/stylus/src/test/misc.rs
+++ b/arbitrator/stylus/src/test/misc.rs
@@ -9,12 +9,12 @@ use crate::{
};
use eyre::Result;
use prover::programs::{prelude::*, start::StartMover};
-use wasmer::{imports, Function};
+use wasmer::{imports, Function, Target};
#[test]
fn test_bulk_memory() -> Result<()> {
let (compile, config, ink) = test_configs();
- let mut store = compile.store();
+ let mut store = compile.store(Target::default());
let filename = "../prover/test-cases/bulk-memory.wat";
let imports = imports! {
"env" => {
diff --git a/arbitrator/stylus/src/test/mod.rs b/arbitrator/stylus/src/test/mod.rs
index 236e5639e6..00c9c62ae4 100644
--- a/arbitrator/stylus/src/test/mod.rs
+++ b/arbitrator/stylus/src/test/mod.rs
@@ -16,7 +16,7 @@ use rand::prelude::*;
use std::{collections::HashMap, path::Path, sync::Arc};
use wasmer::{
imports, wasmparser::Operator, CompilerConfig, Function, FunctionEnv, Imports, Instance,
- Module, Store,
+ Module, Store, Target,
};
use wasmer_compiler_singlepass::Singlepass;
@@ -33,7 +33,7 @@ type TestInstance = NativeInstance;
impl TestInstance {
fn new_test(path: &str, compile: CompileConfig) -> Result {
- let mut store = compile.store();
+ let mut store = compile.store(Target::default());
let imports = imports! {
"test" => {
"noop" => Function::new_typed(&mut store, || {}),
@@ -86,7 +86,14 @@ impl TestInstance {
config: StylusConfig,
) -> Result<(Self, TestEvmApi)> {
let (mut evm, evm_data) = TestEvmApi::new(compile.clone());
- let native = Self::from_path(path, evm.clone(), evm_data, compile, config)?;
+ let native = Self::from_path(
+ path,
+ evm.clone(),
+ evm_data,
+ compile,
+ config,
+ Target::default(),
+ )?;
let footprint = native.memory().ty(&native.store).minimum.0 as u16;
evm.set_pages(footprint);
Ok((native, evm))
diff --git a/arbitrator/stylus/src/test/native.rs b/arbitrator/stylus/src/test/native.rs
index 503e5875fe..9669932a03 100644
--- a/arbitrator/stylus/src/test/native.rs
+++ b/arbitrator/stylus/src/test/native.rs
@@ -381,7 +381,7 @@ fn test_storage() -> Result<()> {
let (mut native, mut evm) = TestInstance::new_with_evm(filename, &compile, config)?;
run_native(&mut native, &store_args, ink)?;
- assert_eq!(evm.get_bytes32(key.into()).0, Bytes32(value));
+ assert_eq!(evm.get_bytes32(key.into(), 0).0, Bytes32(value));
assert_eq!(run_native(&mut native, &load_args, ink)?, value);
let mut machine = Machine::from_user_path(Path::new(filename), &compile)?;
@@ -465,7 +465,7 @@ fn test_calls() -> Result<()> {
run_native(&mut native, &args, ink)?;
for (key, value) in slots {
- assert_eq!(evm.get_bytes32(key).0, value);
+ assert_eq!(evm.get_bytes32(key, 0).0, value);
}
Ok(())
}
diff --git a/arbitrator/stylus/tests/erc20/Cargo.lock b/arbitrator/stylus/tests/erc20/Cargo.lock
index c3e215978d..f5e1e0b15e 100644
--- a/arbitrator/stylus/tests/erc20/Cargo.lock
+++ b/arbitrator/stylus/tests/erc20/Cargo.lock
@@ -575,9 +575,9 @@ dependencies = [
[[package]]
name = "rustix"
-version = "0.37.23"
+version = "0.37.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06"
+checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2"
dependencies = [
"bitflags",
"errno",
diff --git a/arbitrator/stylus/tests/hostio-test/Cargo.lock b/arbitrator/stylus/tests/hostio-test/Cargo.lock
new file mode 100644
index 0000000000..1e726910b1
--- /dev/null
+++ b/arbitrator/stylus/tests/hostio-test/Cargo.lock
@@ -0,0 +1,636 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "aho-corasick"
+version = "1.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "alloy-primitives"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e416903084d3392ebd32d94735c395d6709415b76c7728e594d3f996f2b03e65"
+dependencies = [
+ "bytes",
+ "cfg-if 1.0.0",
+ "const-hex",
+ "derive_more",
+ "hex-literal",
+ "itoa",
+ "ruint",
+ "tiny-keccak",
+]
+
+[[package]]
+name = "alloy-sol-macro"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a74ceeffdacf9dd0910404d743d07273776fd17b85f9cb17b49a97e5c6055ce9"
+dependencies = [
+ "dunce",
+ "heck",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.77",
+ "syn-solidity",
+ "tiny-keccak",
+]
+
+[[package]]
+name = "alloy-sol-types"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d5f347cb6bb307b3802ec455ef43ce00f5e590e0ceca3d2f3b070f5ee367e235"
+dependencies = [
+ "alloy-primitives",
+ "alloy-sol-macro",
+ "const-hex",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
+
+[[package]]
+name = "bitflags"
+version = "2.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
+
+[[package]]
+name = "block-buffer"
+version = "0.10.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
+dependencies = [
+ "generic-array",
+]
+
+[[package]]
+name = "byteorder"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
+
+[[package]]
+name = "bytes"
+version = "1.7.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3"
+
+[[package]]
+name = "cfg-if"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "const-hex"
+version = "1.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6"
+dependencies = [
+ "cfg-if 1.0.0",
+ "cpufeatures",
+ "hex",
+ "proptest",
+ "serde",
+]
+
+[[package]]
+name = "convert_case"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e"
+
+[[package]]
+name = "convert_case"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca"
+dependencies = [
+ "unicode-segmentation",
+]
+
+[[package]]
+name = "cpufeatures"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "crunchy"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
+
+[[package]]
+name = "crypto-common"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
+dependencies = [
+ "generic-array",
+ "typenum",
+]
+
+[[package]]
+name = "derivative"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "derive_more"
+version = "0.99.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce"
+dependencies = [
+ "convert_case 0.4.0",
+ "proc-macro2",
+ "quote",
+ "rustc_version",
+ "syn 2.0.77",
+]
+
+[[package]]
+name = "digest"
+version = "0.10.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
+dependencies = [
+ "block-buffer",
+ "crypto-common",
+]
+
+[[package]]
+name = "dunce"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813"
+
+[[package]]
+name = "generic-array"
+version = "0.14.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
+dependencies = [
+ "typenum",
+ "version_check",
+]
+
+[[package]]
+name = "heck"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
+
+[[package]]
+name = "hex"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
+
+[[package]]
+name = "hex-literal"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46"
+
+[[package]]
+name = "hostio-test"
+version = "0.1.0"
+dependencies = [
+ "mini-alloc",
+ "stylus-sdk",
+]
+
+[[package]]
+name = "itoa"
+version = "1.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
+
+[[package]]
+name = "keccak"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654"
+dependencies = [
+ "cpufeatures",
+]
+
+[[package]]
+name = "keccak-const"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57d8d8ce877200136358e0bbff3a77965875db3af755a11e1fa6b1b3e2df13ea"
+
+[[package]]
+name = "lazy_static"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
+
+[[package]]
+name = "libc"
+version = "0.2.159"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5"
+
+[[package]]
+name = "libm"
+version = "0.2.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058"
+
+[[package]]
+name = "memchr"
+version = "2.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
+
+[[package]]
+name = "memory_units"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3"
+
+[[package]]
+name = "mini-alloc"
+version = "0.4.2"
+dependencies = [
+ "cfg-if 1.0.0",
+ "wee_alloc",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.2.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
+dependencies = [
+ "autocfg",
+ "libm",
+]
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04"
+dependencies = [
+ "zerocopy",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "proptest"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d"
+dependencies = [
+ "bitflags",
+ "num-traits",
+ "rand",
+ "rand_chacha",
+ "rand_xorshift",
+ "unarray",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rand"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
+dependencies = [
+ "rand_core",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
+dependencies = [
+ "ppv-lite86",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
+
+[[package]]
+name = "rand_xorshift"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f"
+dependencies = [
+ "rand_core",
+]
+
+[[package]]
+name = "regex"
+version = "1.10.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b"
+
+[[package]]
+name = "ruint"
+version = "1.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286"
+dependencies = [
+ "proptest",
+ "rand",
+ "ruint-macro",
+ "serde",
+ "valuable",
+ "zeroize",
+]
+
+[[package]]
+name = "ruint-macro"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18"
+
+[[package]]
+name = "rustc_version"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92"
+dependencies = [
+ "semver",
+]
+
+[[package]]
+name = "semver"
+version = "1.0.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b"
+
+[[package]]
+name = "serde"
+version = "1.0.210"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.210"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.77",
+]
+
+[[package]]
+name = "sha3"
+version = "0.10.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60"
+dependencies = [
+ "digest",
+ "keccak",
+]
+
+[[package]]
+name = "stylus-proc"
+version = "0.4.2"
+dependencies = [
+ "alloy-primitives",
+ "alloy-sol-types",
+ "cfg-if 1.0.0",
+ "convert_case 0.6.0",
+ "lazy_static",
+ "proc-macro2",
+ "quote",
+ "regex",
+ "sha3",
+ "syn 1.0.109",
+ "syn-solidity",
+]
+
+[[package]]
+name = "stylus-sdk"
+version = "0.4.2"
+dependencies = [
+ "alloy-primitives",
+ "alloy-sol-types",
+ "cfg-if 1.0.0",
+ "derivative",
+ "hex",
+ "keccak-const",
+ "lazy_static",
+ "stylus-proc",
+]
+
+[[package]]
+name = "syn"
+version = "1.0.109"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.77"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "syn-solidity"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e5f995d2140b0f751dbe94365be2591edbf3d1b75dcfaeac14183abbd2ff07bd"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.77",
+]
+
+[[package]]
+name = "tiny-keccak"
+version = "2.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237"
+dependencies = [
+ "crunchy",
+]
+
+[[package]]
+name = "typenum"
+version = "1.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
+
+[[package]]
+name = "unarray"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe"
+
+[[package]]
+name = "unicode-segmentation"
+version = "1.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493"
+
+[[package]]
+name = "valuable"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d"
+
+[[package]]
+name = "version_check"
+version = "0.9.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
+
+[[package]]
+name = "wee_alloc"
+version = "0.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dbb3b5a6b2bb17cb6ad44a2e68a43e8d2722c997da10e928665c72ec6c0a0b8e"
+dependencies = [
+ "cfg-if 0.1.10",
+ "libc",
+ "memory_units",
+ "winapi",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "zerocopy"
+version = "0.7.35"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0"
+dependencies = [
+ "byteorder",
+ "zerocopy-derive",
+]
+
+[[package]]
+name = "zerocopy-derive"
+version = "0.7.35"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.77",
+]
+
+[[package]]
+name = "zeroize"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
diff --git a/arbitrator/stylus/tests/hostio-test/Cargo.toml b/arbitrator/stylus/tests/hostio-test/Cargo.toml
new file mode 100644
index 0000000000..da7bbce7a3
--- /dev/null
+++ b/arbitrator/stylus/tests/hostio-test/Cargo.toml
@@ -0,0 +1,17 @@
+[package]
+name = "hostio-test"
+version = "0.1.0"
+edition = "2021"
+
+[dependencies]
+stylus-sdk = { path = "../../../langs/rust/stylus-sdk", features = ["debug", "hostio"] }
+mini-alloc.path = "../../../langs/rust/mini-alloc"
+
+[profile.release]
+codegen-units = 1
+strip = true
+lto = true
+panic = "abort"
+opt-level = "s"
+
+[workspace]
diff --git a/arbitrator/stylus/tests/hostio-test/src/main.rs b/arbitrator/stylus/tests/hostio-test/src/main.rs
new file mode 100644
index 0000000000..17a5d10266
--- /dev/null
+++ b/arbitrator/stylus/tests/hostio-test/src/main.rs
@@ -0,0 +1,207 @@
+// Copyright 2024, Offchain Labs, Inc.
+// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE
+
+#![no_main]
+
+use stylus_sdk::{
+ abi::Bytes,
+ alloy_primitives::{Address, B256, U256},
+ block, console, contract, evm, hostio, msg,
+ prelude::*,
+ stylus_proc::entrypoint,
+ tx,
+ types::AddressVM,
+};
+extern crate alloc;
+
+#[cfg(target_arch = "wasm32")]
+#[global_allocator]
+static ALLOC: mini_alloc::MiniAlloc = mini_alloc::MiniAlloc::INIT;
+
+sol_storage! {
+ #[entrypoint]
+ pub struct HostioTest {
+ }
+}
+
+type Result = std::result::Result>;
+
+// These are not available as hostios in the sdk, so we import them directly.
+#[link(wasm_import_module = "vm_hooks")]
+extern "C" {
+ fn math_div(value: *mut u8, divisor: *const u8);
+ fn math_mod(value: *mut u8, modulus: *const u8);
+ fn math_pow(value: *mut u8, exponent: *const u8);
+ fn math_add_mod(value: *mut u8, addend: *const u8, modulus: *const u8);
+ fn math_mul_mod(value: *mut u8, multiplier: *const u8, modulus: *const u8);
+ fn transient_load_bytes32(key: *const u8, dest: *mut u8);
+ fn transient_store_bytes32(key: *const u8, value: *const u8);
+ fn exit_early(status: u32);
+}
+
+#[external]
+impl HostioTest {
+ fn exit_early() -> Result<()> {
+ unsafe {
+ exit_early(0);
+ }
+ Ok(())
+ }
+
+ fn transient_load_bytes32(key: B256) -> Result {
+ let mut result = B256::ZERO;
+ unsafe {
+ transient_load_bytes32(key.as_ptr(), result.as_mut_ptr());
+ }
+ Ok(result)
+ }
+
+ fn transient_store_bytes32(key: B256, value: B256) {
+ unsafe {
+ transient_store_bytes32(key.as_ptr(), value.as_ptr());
+ }
+ }
+
+ fn return_data_size() -> Result {
+ unsafe { Ok(hostio::return_data_size().try_into().unwrap()) }
+ }
+
+ fn emit_log(data: Bytes, n: i8, t1: B256, t2: B256, t3: B256, t4: B256) -> Result<()> {
+ let topics = &[t1, t2, t3, t4];
+ evm::raw_log(&topics[0..n as usize], data.as_slice())?;
+ Ok(())
+ }
+
+ fn account_balance(account: Address) -> Result {
+ Ok(account.balance())
+ }
+
+ fn account_code(account: Address) -> Result> {
+ let mut size = 10000;
+ let mut code = vec![0; size];
+ unsafe {
+ size = hostio::account_code(account.as_ptr(), 0, size, code.as_mut_ptr());
+ }
+ code.resize(size, 0);
+ Ok(code)
+ }
+
+ fn account_code_size(account: Address) -> Result {
+ Ok(account.code_size().try_into().unwrap())
+ }
+
+ fn account_codehash(account: Address) -> Result {
+ Ok(account.codehash())
+ }
+
+ fn evm_gas_left() -> Result {
+ Ok(evm::gas_left().try_into().unwrap())
+ }
+
+ fn evm_ink_left() -> Result {
+ Ok(tx::ink_to_gas(evm::ink_left()).try_into().unwrap())
+ }
+
+ fn block_basefee() -> Result {
+ Ok(block::basefee())
+ }
+
+ fn chainid() -> Result {
+ Ok(block::chainid().try_into().unwrap())
+ }
+
+ fn block_coinbase() -> Result {
+ Ok(block::coinbase())
+ }
+
+ fn block_gas_limit() -> Result {
+ Ok(block::gas_limit().try_into().unwrap())
+ }
+
+ fn block_number() -> Result {
+ Ok(block::number().try_into().unwrap())
+ }
+
+ fn block_timestamp() -> Result {
+ Ok(block::timestamp().try_into().unwrap())
+ }
+
+ fn contract_address() -> Result {
+ Ok(contract::address())
+ }
+
+ fn math_div(a: U256, b: U256) -> Result {
+ let mut a_bytes: B256 = a.into();
+ let b_bytes: B256 = b.into();
+ unsafe {
+ math_div(a_bytes.as_mut_ptr(), b_bytes.as_ptr());
+ }
+ Ok(a_bytes.into())
+ }
+
+ fn math_mod(a: U256, b: U256) -> Result {
+ let mut a_bytes: B256 = a.into();
+ let b_bytes: B256 = b.into();
+ unsafe {
+ math_mod(a_bytes.as_mut_ptr(), b_bytes.as_ptr());
+ }
+ Ok(a_bytes.into())
+ }
+
+ fn math_pow(a: U256, b: U256) -> Result {
+ let mut a_bytes: B256 = a.into();
+ let b_bytes: B256 = b.into();
+ unsafe {
+ math_pow(a_bytes.as_mut_ptr(), b_bytes.as_ptr());
+ }
+ Ok(a_bytes.into())
+ }
+
+ fn math_add_mod(a: U256, b: U256, c: U256) -> Result {
+ let mut a_bytes: B256 = a.into();
+ let b_bytes: B256 = b.into();
+ let c_bytes: B256 = c.into();
+ unsafe {
+ math_add_mod(a_bytes.as_mut_ptr(), b_bytes.as_ptr(), c_bytes.as_ptr());
+ }
+ Ok(a_bytes.into())
+ }
+
+ fn math_mul_mod(a: U256, b: U256, c: U256) -> Result {
+ let mut a_bytes: B256 = a.into();
+ let b_bytes: B256 = b.into();
+ let c_bytes: B256 = c.into();
+ unsafe {
+ math_mul_mod(a_bytes.as_mut_ptr(), b_bytes.as_ptr(), c_bytes.as_ptr());
+ }
+ Ok(a_bytes.into())
+ }
+
+ fn msg_sender() -> Result {
+ Ok(msg::sender())
+ }
+
+ fn msg_value() -> Result {
+ Ok(msg::value())
+ }
+
+ fn keccak(preimage: Bytes) -> Result {
+ let mut result = B256::ZERO;
+ unsafe {
+ hostio::native_keccak256(preimage.as_ptr(), preimage.len(), result.as_mut_ptr());
+ }
+ Ok(result)
+ }
+
+ fn tx_gas_price() -> Result {
+ Ok(tx::gas_price())
+ }
+
+ fn tx_ink_price() -> Result {
+ Ok(tx::ink_to_gas(tx::ink_price().into()).try_into().unwrap())
+ }
+
+ fn tx_origin() -> Result {
+ Ok(tx::origin())
+ }
+}
diff --git a/arbitrator/stylus/tests/write-result-len.wat b/arbitrator/stylus/tests/write-result-len.wat
new file mode 100644
index 0000000000..4c9ad35087
--- /dev/null
+++ b/arbitrator/stylus/tests/write-result-len.wat
@@ -0,0 +1,24 @@
+;; Copyright 2024, Offchain Labs, Inc.
+;; For license information, see https://github.com/nitro/blob/master/LICENSE
+
+(module
+ (import "vm_hooks" "read_args" (func $read_args (param i32)))
+ (import "vm_hooks" "write_result" (func $write_result (param i32 i32)))
+ (memory (export "memory") 2 2)
+ (func $main (export "user_entrypoint") (param $args_len i32) (result i32)
+ (local $len i32)
+
+ ;; write args to 0x0
+ (call $read_args (i32.const 0))
+
+ ;; treat first 4 bytes as size to write
+ (i32.load (i32.const 0))
+ local.set $len
+
+ ;; call write
+ (call $write_result (i32.const 0) (local.get $len))
+
+ ;; return success
+ i32.const 0
+ )
+)
diff --git a/arbitrator/wasm-libraries/Cargo.lock b/arbitrator/wasm-libraries/Cargo.lock
index 7620ff538b..a5a066e5c9 100644
--- a/arbitrator/wasm-libraries/Cargo.lock
+++ b/arbitrator/wasm-libraries/Cargo.lock
@@ -31,6 +31,21 @@ version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f"
+[[package]]
+name = "android-tzdata"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
+
+[[package]]
+name = "android_system_properties"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "ansi_term"
version = "0.12.1"
@@ -91,6 +106,12 @@ version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
+[[package]]
+name = "base64"
+version = "0.22.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
+
[[package]]
name = "bincode"
version = "1.3.3"
@@ -203,6 +224,15 @@ dependencies = [
"rand_pcg",
]
+[[package]]
+name = "cc"
+version = "1.1.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9540e661f81799159abee814118cc139a2004b3a3aa3ea37724a1b66530b90e0"
+dependencies = [
+ "shlex",
+]
+
[[package]]
name = "cfg-if"
version = "0.1.10"
@@ -215,6 +245,19 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+[[package]]
+name = "chrono"
+version = "0.4.38"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401"
+dependencies = [
+ "android-tzdata",
+ "iana-time-zone",
+ "num-traits",
+ "serde",
+ "windows-targets",
+]
+
[[package]]
name = "clap"
version = "2.34.0"
@@ -236,6 +279,12 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e"
+[[package]]
+name = "core-foundation-sys"
+version = "0.8.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
+
[[package]]
name = "cpufeatures"
version = "0.2.12"
@@ -261,38 +310,14 @@ dependencies = [
"typenum",
]
-[[package]]
-name = "darling"
-version = "0.13.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c"
-dependencies = [
- "darling_core 0.13.4",
- "darling_macro 0.13.4",
-]
-
[[package]]
name = "darling"
version = "0.20.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989"
dependencies = [
- "darling_core 0.20.10",
- "darling_macro 0.20.10",
-]
-
-[[package]]
-name = "darling_core"
-version = "0.13.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610"
-dependencies = [
- "fnv",
- "ident_case",
- "proc-macro2",
- "quote",
- "strsim 0.10.0",
- "syn 1.0.109",
+ "darling_core",
+ "darling_macro",
]
[[package]]
@@ -305,29 +330,29 @@ dependencies = [
"ident_case",
"proc-macro2",
"quote",
+ "strsim 0.11.1",
"syn 2.0.72",
]
[[package]]
name = "darling_macro"
-version = "0.13.4"
+version = "0.20.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835"
+checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806"
dependencies = [
- "darling_core 0.13.4",
+ "darling_core",
"quote",
- "syn 1.0.109",
+ "syn 2.0.72",
]
[[package]]
-name = "darling_macro"
-version = "0.20.10"
+name = "deranged"
+version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806"
+checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4"
dependencies = [
- "darling_core 0.20.10",
- "quote",
- "syn 2.0.72",
+ "powerfmt",
+ "serde",
]
[[package]]
@@ -434,7 +459,7 @@ version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59c3b24c345d8c314966bdc1832f6c2635bfcce8e7cf363bd115987bba2ee242"
dependencies = [
- "darling 0.20.10",
+ "darling",
"proc-macro2",
"quote",
"syn 2.0.72",
@@ -548,6 +573,29 @@ dependencies = [
"caller-env",
]
+[[package]]
+name = "iana-time-zone"
+version = "0.1.61"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220"
+dependencies = [
+ "android_system_properties",
+ "core-foundation-sys",
+ "iana-time-zone-haiku",
+ "js-sys",
+ "wasm-bindgen",
+ "windows-core",
+]
+
+[[package]]
+name = "iana-time-zone-haiku"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
+dependencies = [
+ "cc",
+]
+
[[package]]
name = "ident_case"
version = "1.0.1"
@@ -568,6 +616,7 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
dependencies = [
"autocfg",
"hashbrown 0.12.3",
+ "serde",
]
[[package]]
@@ -578,6 +627,7 @@ checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0"
dependencies = [
"equivalent",
"hashbrown 0.14.5",
+ "serde",
]
[[package]]
@@ -595,6 +645,15 @@ version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
+[[package]]
+name = "js-sys"
+version = "0.3.70"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a"
+dependencies = [
+ "wasm-bindgen",
+]
+
[[package]]
name = "keccak"
version = "0.1.5"
@@ -632,6 +691,12 @@ dependencies = [
"scopeguard",
]
+[[package]]
+name = "log"
+version = "0.4.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
+
[[package]]
name = "lru"
version = "0.12.4"
@@ -719,6 +784,12 @@ dependencies = [
"num-traits",
]
+[[package]]
+name = "num-conv"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
+
[[package]]
name = "num-derive"
version = "0.4.2"
@@ -832,6 +903,12 @@ version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
+[[package]]
+name = "powerfmt"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
+
[[package]]
name = "proc-macro-crate"
version = "3.1.0"
@@ -1115,24 +1192,32 @@ dependencies = [
[[package]]
name = "serde_with"
-version = "1.14.0"
+version = "3.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff"
+checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857"
dependencies = [
+ "base64",
+ "chrono",
+ "hex",
+ "indexmap 1.9.3",
+ "indexmap 2.3.0",
"serde",
+ "serde_derive",
+ "serde_json",
"serde_with_macros",
+ "time",
]
[[package]]
name = "serde_with_macros"
-version = "1.5.2"
+version = "3.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082"
+checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350"
dependencies = [
- "darling 0.13.4",
+ "darling",
"proc-macro2",
"quote",
- "syn 1.0.109",
+ "syn 2.0.72",
]
[[package]]
@@ -1181,6 +1266,12 @@ dependencies = [
"keccak",
]
+[[package]]
+name = "shlex"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
+
[[package]]
name = "simdutf8"
version = "0.1.4"
@@ -1216,9 +1307,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
[[package]]
name = "strsim"
-version = "0.10.0"
+version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
+checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]]
name = "structopt"
@@ -1307,6 +1398,37 @@ dependencies = [
"syn 2.0.72",
]
+[[package]]
+name = "time"
+version = "0.3.36"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885"
+dependencies = [
+ "deranged",
+ "itoa",
+ "num-conv",
+ "powerfmt",
+ "serde",
+ "time-core",
+ "time-macros",
+]
+
+[[package]]
+name = "time-core"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
+
+[[package]]
+name = "time-macros"
+version = "0.2.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf"
+dependencies = [
+ "num-conv",
+ "time-core",
+]
+
[[package]]
name = "tiny-keccak"
version = "2.0.2"
@@ -1445,6 +1567,61 @@ dependencies = [
"wee_alloc",
]
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5"
+dependencies = [
+ "cfg-if 1.0.0",
+ "once_cell",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b"
+dependencies = [
+ "bumpalo",
+ "log",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.72",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.72",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484"
+
[[package]]
name = "wasm-encoder"
version = "0.215.0"
@@ -1535,6 +1712,15 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+[[package]]
+name = "windows-core"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9"
+dependencies = [
+ "windows-targets",
+]
+
[[package]]
name = "windows-targets"
version = "0.52.6"
diff --git a/arbitrator/wasm-libraries/forward/src/main.rs b/arbitrator/wasm-libraries/forward/src/main.rs
index 05a949e8aa..f978a8723b 100644
--- a/arbitrator/wasm-libraries/forward/src/main.rs
+++ b/arbitrator/wasm-libraries/forward/src/main.rs
@@ -191,7 +191,8 @@ fn forward_stub(file: &mut File) -> Result<()> {
"{s};; allows user_host to request a trap\n\
{s}(global $trap (mut i32) (i32.const 0))\n\
{s}(func $check unreachable)\n\
- {s}(func (export \"forward__set_trap\") unreachable)"
+ {s};; stub for the forward__set_trap function\n\
+ {s}(func $forward__set_trap unreachable)"
);
wln!("{s};; user linkage");
diff --git a/arbitrator/wasm-libraries/user-host-trait/src/lib.rs b/arbitrator/wasm-libraries/user-host-trait/src/lib.rs
index 37af85c382..12a6bdbed2 100644
--- a/arbitrator/wasm-libraries/user-host-trait/src/lib.rs
+++ b/arbitrator/wasm-libraries/user-host-trait/src/lib.rs
@@ -8,7 +8,7 @@ use arbutil::{
api::{DataReader, EvmApi},
storage::StorageCache,
user::UserOutcomeKind,
- EvmData,
+ EvmData, ARBOS_VERSION_STYLUS_CHARGING_FIXES,
},
pricing::{self, EVM_API_INK, HOSTIO_INK, PTR_INK},
Bytes20, Bytes32,
@@ -143,11 +143,20 @@ pub trait UserHost: GasMeteredMachine {
/// [`SLOAD`]: https://www.evm.codes/#54
fn storage_load_bytes32(&mut self, key: GuestPtr, dest: GuestPtr) -> Result<(), Self::Err> {
self.buy_ink(HOSTIO_INK + 2 * PTR_INK)?;
- self.require_gas(evm::COLD_SLOAD_GAS + EVM_API_INK + StorageCache::REQUIRED_ACCESS_GAS)?; // cache-miss case
+ let arbos_version = self.evm_data().arbos_version;
+ // require for cache-miss case, preserve wrong behavior for old arbos
+ let evm_api_gas_to_use = if arbos_version < ARBOS_VERSION_STYLUS_CHARGING_FIXES {
+ EVM_API_INK
+ } else {
+ self.pricing().ink_to_gas(EVM_API_INK)
+ };
+ self.require_gas(
+ evm::COLD_SLOAD_GAS + StorageCache::REQUIRED_ACCESS_GAS + evm_api_gas_to_use,
+ )?;
let key = self.read_bytes32(key)?;
- let (value, gas_cost) = self.evm_api().get_bytes32(key);
+ let (value, gas_cost) = self.evm_api().get_bytes32(key, evm_api_gas_to_use);
self.buy_gas(gas_cost)?;
self.write_bytes32(dest, value)?;
trace!("storage_load_bytes32", self, key, value)
@@ -185,7 +194,10 @@ pub trait UserHost: GasMeteredMachine {
self.require_gas(evm::SSTORE_SENTRY_GAS)?; // see operations_acl_arbitrum.go
let gas_left = self.gas_left()?;
- self.evm_api().flush_storage_cache(clear, gas_left)?;
+ let gas_cost = self.evm_api().flush_storage_cache(clear, gas_left)?;
+ if self.evm_data().arbos_version >= ARBOS_VERSION_STYLUS_CHARGING_FIXES {
+ self.buy_gas(gas_cost)?;
+ }
trace!("storage_flush_cache", self, [be!(clear as u8)], &[])
}
diff --git a/arbitrator/wasm-libraries/user-host/src/link.rs b/arbitrator/wasm-libraries/user-host/src/link.rs
index 428611167d..f4c402fd97 100644
--- a/arbitrator/wasm-libraries/user-host/src/link.rs
+++ b/arbitrator/wasm-libraries/user-host/src/link.rs
@@ -37,14 +37,15 @@ struct MemoryLeaf([u8; 32]);
///
/// pages_ptr: starts pointing to max allowed pages, returns number of pages used
#[no_mangle]
-pub unsafe extern "C" fn programs__activate(
+pub unsafe extern "C" fn programs__activate_v2(
wasm_ptr: GuestPtr,
wasm_size: usize,
pages_ptr: GuestPtr,
asm_estimate_ptr: GuestPtr,
init_cost_ptr: GuestPtr,
cached_init_cost_ptr: GuestPtr,
- version: u16,
+ stylus_version: u16,
+ arbos_version_for_gas: u64,
debug: u32,
codehash: GuestPtr,
module_hash_ptr: GuestPtr,
@@ -58,7 +59,15 @@ pub unsafe extern "C" fn programs__activate(
let page_limit = STATIC_MEM.read_u16(pages_ptr);
let gas_left = &mut STATIC_MEM.read_u64(gas_ptr);
- match Module::activate(&wasm, codehash, version, page_limit, debug, gas_left) {
+ match Module::activate(
+ &wasm,
+ codehash,
+ stylus_version,
+ arbos_version_for_gas,
+ page_limit,
+ debug,
+ gas_left,
+ ) {
Ok((module, data)) => {
STATIC_MEM.write_u64(gas_ptr, *gas_left);
STATIC_MEM.write_u16(pages_ptr, data.footprint);
@@ -242,7 +251,8 @@ pub unsafe extern "C" fn programs__create_stylus_config(
/// Creates an `EvmData` handler from its component parts.
///
#[no_mangle]
-pub unsafe extern "C" fn programs__create_evm_data(
+pub unsafe extern "C" fn programs__create_evm_data_v2(
+ arbos_version: u64,
block_basefee_ptr: GuestPtr,
chainid: u64,
block_coinbase_ptr: GuestPtr,
@@ -259,6 +269,7 @@ pub unsafe extern "C" fn programs__create_evm_data(
reentrant: u32,
) -> u64 {
let evm_data = EvmData {
+ arbos_version,
block_basefee: read_bytes32(block_basefee_ptr),
cached: cached != 0,
chainid,
diff --git a/arbitrator/wasm-libraries/user-test/src/program.rs b/arbitrator/wasm-libraries/user-test/src/program.rs
index c56ea52ad0..85b522ee74 100644
--- a/arbitrator/wasm-libraries/user-test/src/program.rs
+++ b/arbitrator/wasm-libraries/user-test/src/program.rs
@@ -102,7 +102,7 @@ impl Program {
pub struct MockEvmApi;
impl EvmApi for MockEvmApi {
- fn get_bytes32(&mut self, key: Bytes32) -> (Bytes32, u64) {
+ fn get_bytes32(&mut self, key: Bytes32, _evm_api_gas_to_use: u64) -> (Bytes32, u64) {
let value = KEYS.lock().get(&key).cloned().unwrap_or_default();
(value, 2100) // pretend worst case
}
diff --git a/arbnode/api.go b/arbnode/api.go
index 228ad51cf8..2dabd41bff 100644
--- a/arbnode/api.go
+++ b/arbnode/api.go
@@ -7,9 +7,11 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/staker"
"github.com/offchainlabs/nitro/validator"
+ "github.com/offchainlabs/nitro/validator/server_api"
)
type BlockValidatorAPI struct {
@@ -54,3 +56,8 @@ func (a *BlockValidatorDebugAPI) ValidateMessageNumber(
result.Valid = valid
return result, err
}
+
+func (a *BlockValidatorDebugAPI) ValidationInputsAt(ctx context.Context, msgNum hexutil.Uint64, target ethdb.WasmTarget,
+) (server_api.InputJSON, error) {
+ return a.val.ValidationInputsAt(ctx, arbutil.MessageIndex(msgNum), target)
+}
diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go
index 71239efdbb..44b360e76e 100644
--- a/arbnode/batch_poster.go
+++ b/arbnode/batch_poster.go
@@ -121,7 +121,7 @@ type BatchPoster struct {
nextRevertCheckBlock int64 // the last parent block scanned for reverting batches
postedFirstBatch bool // indicates if batch poster has posted the first batch
- accessList func(SequencerInboxAccs, AfterDelayedMessagesRead int) types.AccessList
+ accessList func(SequencerInboxAccs, AfterDelayedMessagesRead uint64) types.AccessList
}
type l1BlockBound int
@@ -168,7 +168,7 @@ type BatchPosterConfig struct {
L1BlockBound string `koanf:"l1-block-bound" reload:"hot"`
L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"`
UseAccessLists bool `koanf:"use-access-lists" reload:"hot"`
- GasEstimateBaseFeeMultipleBips arbmath.Bips `koanf:"gas-estimate-base-fee-multiple-bips"`
+ GasEstimateBaseFeeMultipleBips arbmath.UBips `koanf:"gas-estimate-base-fee-multiple-bips"`
Dangerous BatchPosterDangerousConfig `koanf:"dangerous"`
ReorgResistanceMargin time.Duration `koanf:"reorg-resistance-margin" reload:"hot"`
CheckBatchCorrectness bool `koanf:"check-batch-correctness"`
@@ -253,7 +253,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{
L1BlockBoundBypass: time.Hour,
UseAccessLists: true,
RedisLock: redislock.DefaultCfg,
- GasEstimateBaseFeeMultipleBips: arbmath.OneInBips * 3 / 2,
+ GasEstimateBaseFeeMultipleBips: arbmath.OneInUBips * 3 / 2,
ReorgResistanceMargin: 10 * time.Minute,
CheckBatchCorrectness: true,
}
@@ -285,7 +285,7 @@ var TestBatchPosterConfig = BatchPosterConfig{
L1BlockBound: "",
L1BlockBoundBypass: time.Hour,
UseAccessLists: true,
- GasEstimateBaseFeeMultipleBips: arbmath.OneInBips * 3 / 2,
+ GasEstimateBaseFeeMultipleBips: arbmath.OneInUBips * 3 / 2,
CheckBatchCorrectness: true,
}
@@ -374,7 +374,7 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e
}
// Dataposter sender may be external signer address, so we should initialize
// access list after initializing dataposter.
- b.accessList = func(SequencerInboxAccs, AfterDelayedMessagesRead int) types.AccessList {
+ b.accessList = func(SequencerInboxAccs, AfterDelayedMessagesRead uint64) types.AccessList {
if !b.config().UseAccessLists || opts.L1Reader.IsParentChainArbitrum() {
// Access lists cost gas instead of saving gas when posting to L2s,
// because data is expensive in comparison to computation.
@@ -433,8 +433,8 @@ type AccessListOpts struct {
BridgeAddr common.Address
DataPosterAddr common.Address
GasRefunderAddr common.Address
- SequencerInboxAccs int
- AfterDelayedMessagesRead int
+ SequencerInboxAccs uint64
+ AfterDelayedMessagesRead uint64
}
// AccessList returns access list (contracts, storage slots) for batchposter.
@@ -476,12 +476,12 @@ func AccessList(opts *AccessListOpts) types.AccessList {
},
}
- for _, v := range []struct{ slotIdx, val int }{
+ for _, v := range []struct{ slotIdx, val uint64 }{
{7, opts.SequencerInboxAccs - 1}, // - sequencerInboxAccs[sequencerInboxAccs.length - 1]; (keccak256(7, sequencerInboxAccs.length - 1))
{7, opts.SequencerInboxAccs}, // - sequencerInboxAccs.push(...); (keccak256(7, sequencerInboxAccs.length))
{6, opts.AfterDelayedMessagesRead - 1}, // - delayedInboxAccs[afterDelayedMessagesRead - 1]; (keccak256(6, afterDelayedMessagesRead - 1))
} {
- sb := arbutil.SumBytes(arbutil.PaddedKeccak256([]byte{byte(v.slotIdx)}), big.NewInt(int64(v.val)).Bytes())
+ sb := arbutil.SumBytes(arbutil.PaddedKeccak256([]byte{byte(v.slotIdx)}), new(big.Int).SetUint64(v.val).Bytes())
l[1].StorageKeys = append(l[1].StorageKeys, common.Hash(sb))
}
@@ -603,9 +603,12 @@ func (b *BatchPoster) pollForL1PriceData(ctx context.Context) {
l1GasPrice = blobFeePerByte.Uint64() / 16
}
}
+ // #nosec G115
blobGasUsedGauge.Update(int64(*h.BlobGasUsed))
}
+ // #nosec G115
blockGasUsedGauge.Update(int64(h.GasUsed))
+ // #nosec G115
blockGasLimitGauge.Update(int64(h.GasLimit))
suggestedTipCap, err := b.l1Reader.Client().SuggestGasTipCap(ctx)
if err != nil {
@@ -613,6 +616,7 @@ func (b *BatchPoster) pollForL1PriceData(ctx context.Context) {
} else {
suggestedTipCapGauge.Update(suggestedTipCap.Int64())
}
+ // #nosec G115
l1GasPriceGauge.Update(int64(l1GasPrice))
case <-ctx.Done():
return
@@ -1031,7 +1035,7 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte,
if err != nil {
return 0, err
}
- maxFeePerGas := arbmath.BigMulByBips(latestHeader.BaseFee, config.GasEstimateBaseFeeMultipleBips)
+ maxFeePerGas := arbmath.BigMulByUBips(latestHeader.BaseFee, config.GasEstimateBaseFeeMultipleBips)
if useNormalEstimation {
_, realBlobHashes, err := blobs.ComputeCommitmentsAndHashes(realBlobs)
if err != nil {
@@ -1176,6 +1180,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error)
if err != nil {
return false, err
}
+ // #nosec G115
firstMsgTime := time.Unix(int64(firstMsg.Message.Header.Timestamp), 0)
lastPotentialMsg, err := b.streamer.GetMessage(msgCount - 1)
@@ -1245,7 +1250,9 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error)
l1BoundMinTimestamp = arbmath.SaturatingUSub(latestHeader.Time, arbmath.BigToUintSaturating(maxTimeVariationDelaySeconds))
if config.L1BlockBoundBypass > 0 {
+ // #nosec G115
blockNumberWithPadding := arbmath.SaturatingUAdd(latestBlockNumber, uint64(config.L1BlockBoundBypass/ethPosBlockTime))
+ // #nosec G115
timestampWithPadding := arbmath.SaturatingUAdd(latestHeader.Time, uint64(config.L1BlockBoundBypass/time.Second))
l1BoundMinBlockNumberWithBypass = arbmath.SaturatingUSub(blockNumberWithPadding, arbmath.BigToUintSaturating(maxTimeVariationDelayBlocks))
l1BoundMinTimestampWithBypass = arbmath.SaturatingUSub(timestampWithPadding, arbmath.BigToUintSaturating(maxTimeVariationDelaySeconds))
@@ -1311,7 +1318,9 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error)
if hasL1Bound && config.ReorgResistanceMargin > 0 {
firstMsgBlockNumber := firstMsg.Message.Header.BlockNumber
firstMsgTimeStamp := firstMsg.Message.Header.Timestamp
+ // #nosec G115
batchNearL1BoundMinBlockNumber := firstMsgBlockNumber <= arbmath.SaturatingUAdd(l1BoundMinBlockNumber, uint64(config.ReorgResistanceMargin/ethPosBlockTime))
+ // #nosec G115
batchNearL1BoundMinTimestamp := firstMsgTimeStamp <= arbmath.SaturatingUAdd(l1BoundMinTimestamp, uint64(config.ReorgResistanceMargin/time.Second))
if batchNearL1BoundMinTimestamp || batchNearL1BoundMinBlockNumber {
log.Error(
@@ -1356,6 +1365,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error)
batchPosterDAFailureCounter.Inc(1)
return false, fmt.Errorf("%w: nonce changed from %d to %d while creating batch", storage.ErrStorageRace, nonce, gotNonce)
}
+ // #nosec G115
sequencerMsg, err = b.dapWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), config.DisableDapFallbackStoreDataOnChain)
if err != nil {
batchPosterDAFailureCounter.Inc(1)
@@ -1403,7 +1413,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error)
if len(kzgBlobs)*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock {
return false, fmt.Errorf("produced %v blobs for batch but a block can only hold %v (compressed batch was %v bytes long)", len(kzgBlobs), params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob, len(sequencerMsg))
}
- accessList := b.accessList(int(batchPosition.NextSeqNum), int(b.building.segments.delayedMsg))
+ accessList := b.accessList(batchPosition.NextSeqNum, b.building.segments.delayedMsg)
// On restart, we may be trying to estimate gas for a batch whose successor has
// already made it into pending state, if not latest state.
// In that case, we might get a revert with `DelayedBackwards()`.
@@ -1439,7 +1449,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error)
b.building.muxBackend.delayedInboxStart = batchPosition.DelayedMessageCount
b.building.muxBackend.SetPositionWithinMessage(0)
simMux := arbstate.NewInboxMultiplexer(b.building.muxBackend, batchPosition.DelayedMessageCount, dapReaders, daprovider.KeysetValidate)
- log.Info("Begin checking the correctness of batch against inbox multiplexer", "startMsgSeqNum", batchPosition.MessageCount, "endMsgSeqNum", b.building.msgCount-1)
+ log.Debug("Begin checking the correctness of batch against inbox multiplexer", "startMsgSeqNum", batchPosition.MessageCount, "endMsgSeqNum", b.building.msgCount-1)
for i := batchPosition.MessageCount; i < b.building.msgCount; i++ {
msg, err := simMux.Pop(ctx)
if err != nil {
@@ -1505,6 +1515,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error)
messagesPerBatch = 1
}
backlog := uint64(unpostedMessages) / messagesPerBatch
+ // #nosec G115
batchPosterEstimatedBatchBacklogGauge.Update(int64(backlog))
if backlog > 10 {
logLevel := log.Warn
diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go
index 15446fe855..acbf9c4cc8 100644
--- a/arbnode/dataposter/data_poster.go
+++ b/arbnode/dataposter/data_poster.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
@@ -39,7 +40,6 @@ import (
"github.com/offchainlabs/nitro/arbnode/dataposter/noop"
"github.com/offchainlabs/nitro/arbnode/dataposter/slice"
"github.com/offchainlabs/nitro/arbnode/dataposter/storage"
- "github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/util/arbmath"
"github.com/offchainlabs/nitro/util/blobs"
"github.com/offchainlabs/nitro/util/headerreader"
@@ -69,7 +69,7 @@ var (
type DataPoster struct {
stopwaiter.StopWaiter
headerReader *headerreader.HeaderReader
- client arbutil.L1Interface
+ client *ethclient.Client
auth *bind.TransactOpts
signer signerFn
config ConfigFetcher
@@ -359,6 +359,7 @@ func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64, thi
if err != nil {
return fmt.Errorf("getting nonce of a dataposter sender: %w", err)
}
+ // #nosec G115
latestUnconfirmedNonceGauge.Update(int64(unconfirmedNonce))
if nextNonce >= cfg.MaxMempoolTransactions+unconfirmedNonce {
return fmt.Errorf("%w: transaction nonce: %d, unconfirmed nonce: %d, max mempool size: %d", ErrExceedsMaxMempoolSize, nextNonce, unconfirmedNonce, cfg.MaxMempoolTransactions)
@@ -371,6 +372,7 @@ func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64, thi
if err != nil {
return fmt.Errorf("getting nonce of a dataposter sender: %w", err)
}
+ // #nosec G115
latestUnconfirmedNonceGauge.Update(int64(unconfirmedNonce))
if unconfirmedNonce > nextNonce {
return fmt.Errorf("latest on-chain nonce %v is greater than to next nonce %v", unconfirmedNonce, nextNonce)
@@ -525,6 +527,7 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to get latest nonce %v blocks ago (block %v): %w", config.NonceRbfSoftConfs, softConfBlock, err)
}
+ // #nosec G115
latestSoftConfirmedNonceGauge.Update(int64(softConfNonce))
suggestedTip, err := p.client.SuggestGasTipCap(ctx)
@@ -635,11 +638,11 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u
if config.MaxFeeBidMultipleBips > 0 {
// Limit the fee caps to be no greater than max(MaxFeeBidMultipleBips, minRbf)
- maxNonBlobFee := arbmath.BigMulByBips(currentNonBlobFee, config.MaxFeeBidMultipleBips)
+ maxNonBlobFee := arbmath.BigMulByUBips(currentNonBlobFee, config.MaxFeeBidMultipleBips)
if lastTx != nil {
maxNonBlobFee = arbmath.BigMax(maxNonBlobFee, arbmath.BigMulByBips(lastTx.GasFeeCap(), minRbfIncrease))
}
- maxBlobFee := arbmath.BigMulByBips(currentBlobFee, config.MaxFeeBidMultipleBips)
+ maxBlobFee := arbmath.BigMulByUBips(currentBlobFee, config.MaxFeeBidMultipleBips)
if lastTx != nil && lastTx.BlobGasFeeCap() != nil {
maxBlobFee = arbmath.BigMax(maxBlobFee, arbmath.BigMulByBips(lastTx.BlobGasFeeCap(), minRbfIncrease))
}
@@ -1052,6 +1055,7 @@ func (p *DataPoster) updateNonce(ctx context.Context) error {
}
return nil
}
+ // #nosec G115
latestFinalizedNonceGauge.Update(int64(nonce))
log.Info("Data poster transactions confirmed", "previousNonce", p.nonce, "newNonce", nonce, "previousL1Block", p.lastBlock, "newL1Block", header.Number)
if len(p.errorCount) > 0 {
@@ -1132,6 +1136,7 @@ func (p *DataPoster) Start(ctxIn context.Context) {
log.Warn("Failed to get latest nonce", "err", err)
return minWait
}
+ // #nosec G115
latestUnconfirmedNonceGauge.Update(int64(unconfirmedNonce))
// We use unconfirmedNonce here to replace-by-fee transactions that aren't in a block,
// excluding those that are in an unconfirmed block. If a reorg occurs, we'll continue
@@ -1143,7 +1148,7 @@ func (p *DataPoster) Start(ctxIn context.Context) {
}
latestQueued, err := p.queue.FetchLast(ctx)
if err != nil {
- log.Error("Failed to fetch lastest queued tx", "err", err)
+ log.Error("Failed to fetch last queued tx", "err", err)
return minWait
}
var latestCumulativeWeight, latestNonce uint64
@@ -1154,43 +1159,38 @@ func (p *DataPoster) Start(ctxIn context.Context) {
confirmedNonce := unconfirmedNonce - 1
confirmedMeta, err := p.queue.Get(ctx, confirmedNonce)
if err == nil && confirmedMeta != nil {
+ // #nosec G115
totalQueueWeightGauge.Update(int64(arbmath.SaturatingUSub(latestCumulativeWeight, confirmedMeta.CumulativeWeight())))
+ // #nosec G115
totalQueueLengthGauge.Update(int64(arbmath.SaturatingUSub(latestNonce, confirmedNonce)))
} else {
- log.Error("Failed to fetch latest confirmed tx from queue", "err", err, "confirmedMeta", confirmedMeta)
+ log.Error("Failed to fetch latest confirmed tx from queue", "confirmedNonce", confirmedNonce, "err", err, "confirmedMeta", confirmedMeta)
}
}
for _, tx := range queueContents {
- previouslyUnsent := !tx.Sent
- sendAttempted := false
if now.After(tx.NextReplacement) {
weightBacklog := arbmath.SaturatingUSub(latestCumulativeWeight, tx.CumulativeWeight())
nonceBacklog := arbmath.SaturatingUSub(latestNonce, tx.FullTx.Nonce())
err := p.replaceTx(ctx, tx, arbmath.MaxInt(nonceBacklog, weightBacklog))
- sendAttempted = true
p.maybeLogError(err, tx, "failed to replace-by-fee transaction")
+ } else {
+ err := p.sendTx(ctx, tx, tx)
+ p.maybeLogError(err, tx, "failed to re-send transaction")
+ }
+ tx, err = p.queue.Get(ctx, tx.FullTx.Nonce())
+ if err != nil {
+ log.Error("Failed to fetch tx from queue to check updated status", "nonce", tx.FullTx.Nonce(), "err", err)
+ return minWait
}
if nextCheck.After(tx.NextReplacement) {
nextCheck = tx.NextReplacement
}
- if !sendAttempted && previouslyUnsent {
- err := p.sendTx(ctx, tx, tx)
- sendAttempted = true
- p.maybeLogError(err, tx, "failed to re-send transaction")
- if err != nil {
- nextSend := time.Now().Add(time.Minute)
- if nextCheck.After(nextSend) {
- nextCheck = nextSend
- }
- }
- }
- if previouslyUnsent && sendAttempted {
- // Don't try to send more than 1 unsent transaction, to play nicely with parent chain mempools.
- // Transactions will be unsent if there was some error when originally sending them,
- // or if transaction type changes and the prior tx is not yet reorg resistant.
- break
+ if !tx.Sent {
+ // We can't progress any further if we failed to send this tx
+ // Retry sending this tx soon
+ return minWait
}
}
wait := time.Until(nextCheck)
@@ -1241,7 +1241,7 @@ type DataPosterConfig struct {
MinBlobTxTipCapGwei float64 `koanf:"min-blob-tx-tip-cap-gwei" reload:"hot"`
MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"`
MaxBlobTxTipCapGwei float64 `koanf:"max-blob-tx-tip-cap-gwei" reload:"hot"`
- MaxFeeBidMultipleBips arbmath.Bips `koanf:"max-fee-bid-multiple-bips" reload:"hot"`
+ MaxFeeBidMultipleBips arbmath.UBips `koanf:"max-fee-bid-multiple-bips" reload:"hot"`
NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"`
AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"`
UseDBStorage bool `koanf:"use-db-storage"`
@@ -1343,9 +1343,9 @@ var DefaultDataPosterConfig = DataPosterConfig{
MaxMempoolWeight: 18,
MinTipCapGwei: 0.05,
MinBlobTxTipCapGwei: 1, // default geth minimum, and relays aren't likely to accept lower values given propagation time
- MaxTipCapGwei: 5,
+ MaxTipCapGwei: 1.2,
MaxBlobTxTipCapGwei: 1, // lower than normal because 4844 rbf is a minimum of a 2x
- MaxFeeBidMultipleBips: arbmath.OneInBips * 10,
+ MaxFeeBidMultipleBips: arbmath.OneInUBips * 10,
NonceRbfSoftConfs: 1,
AllocateMempoolBalance: true,
UseDBStorage: true,
@@ -1380,7 +1380,7 @@ var TestDataPosterConfig = DataPosterConfig{
MinBlobTxTipCapGwei: 1,
MaxTipCapGwei: 5,
MaxBlobTxTipCapGwei: 1,
- MaxFeeBidMultipleBips: arbmath.OneInBips * 10,
+ MaxFeeBidMultipleBips: arbmath.OneInUBips * 10,
NonceRbfSoftConfs: 1,
AllocateMempoolBalance: true,
UseDBStorage: false,
diff --git a/arbnode/dataposter/dataposter_test.go b/arbnode/dataposter/dataposter_test.go
index 7f2f61c07e..7bf0f86e6f 100644
--- a/arbnode/dataposter/dataposter_test.go
+++ b/arbnode/dataposter/dataposter_test.go
@@ -2,17 +2,18 @@ package dataposter
import (
"context"
+ "errors"
"fmt"
"math/big"
"testing"
"time"
"github.com/Knetic/govaluate"
- "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
- "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
@@ -152,46 +153,36 @@ func TestMaxFeeCapFormulaCalculation(t *testing.T) {
}
}
-type stubL1Client struct {
+type stubL1ClientInner struct {
senderNonce uint64
suggestedGasTipCap *big.Int
-
- // Define most of the required methods that aren't used by feeAndTipCaps
- backends.SimulatedBackend
-}
-
-func (c *stubL1Client) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) {
- return c.senderNonce, nil
-}
-
-func (c *stubL1Client) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
- return c.suggestedGasTipCap, nil
-}
-
-// Not used but we need to define
-func (c *stubL1Client) BlockNumber(ctx context.Context) (uint64, error) {
- return 0, nil
-}
-
-func (c *stubL1Client) CallContractAtHash(ctx context.Context, msg ethereum.CallMsg, blockHash common.Hash) ([]byte, error) {
- return []byte{}, nil
}
-func (c *stubL1Client) CodeAtHash(ctx context.Context, address common.Address, blockHash common.Hash) ([]byte, error) {
- return []byte{}, nil
+func (c *stubL1ClientInner) CallContext(ctx_in context.Context, result interface{}, method string, args ...interface{}) error {
+ switch method {
+ case "eth_getTransactionCount":
+ ptr, ok := result.(*hexutil.Uint64)
+ if !ok {
+ return errors.New("result is not a *hexutil.Uint64")
+ }
+ *ptr = hexutil.Uint64(c.senderNonce)
+ case "eth_maxPriorityFeePerGas":
+ ptr, ok := result.(*hexutil.Big)
+ if !ok {
+ return errors.New("result is not a *hexutil.Big")
+ }
+ *ptr = hexutil.Big(*c.suggestedGasTipCap)
+ }
+ return nil
}
-func (c *stubL1Client) ChainID(ctx context.Context) (*big.Int, error) {
+func (c *stubL1ClientInner) EthSubscribe(ctx context.Context, channel interface{}, args ...interface{}) (*rpc.ClientSubscription, error) {
return nil, nil
}
-
-func (c *stubL1Client) Client() rpc.ClientInterface {
+func (c *stubL1ClientInner) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error {
return nil
}
-
-func (c *stubL1Client) TransactionSender(ctx context.Context, tx *types.Transaction, block common.Hash, index uint) (common.Address, error) {
- return common.Address{}, nil
-}
+func (c *stubL1ClientInner) Close() {}
func TestFeeAndTipCaps_EnoughBalance_NoBacklog_NoUnconfirmed_BlobTx(t *testing.T) {
conf := func() *DataPosterConfig {
@@ -204,7 +195,7 @@ func TestFeeAndTipCaps_EnoughBalance_NoBacklog_NoUnconfirmed_BlobTx(t *testing.T
MinBlobTxTipCapGwei: 1,
MaxTipCapGwei: 5,
MaxBlobTxTipCapGwei: 10,
- MaxFeeBidMultipleBips: arbmath.OneInBips * 10,
+ MaxFeeBidMultipleBips: arbmath.OneInUBips * 10,
AllocateMempoolBalance: true,
UrgencyGwei: 2.,
@@ -223,10 +214,10 @@ func TestFeeAndTipCaps_EnoughBalance_NoBacklog_NoUnconfirmed_BlobTx(t *testing.T
extraBacklog: func() uint64 { return 0 },
balance: big.NewInt(0).Mul(big.NewInt(params.Ether), big.NewInt(10)),
usingNoOpStorage: false,
- client: &stubL1Client{
+ client: ethclient.NewClient(&stubL1ClientInner{
senderNonce: 1,
suggestedGasTipCap: big.NewInt(2 * params.GWei),
- },
+ }),
auth: &bind.TransactOpts{
From: common.Address{},
},
@@ -335,7 +326,7 @@ func TestFeeAndTipCaps_RBF_RisingBlobFee_FallingBaseFee(t *testing.T) {
MinBlobTxTipCapGwei: 1,
MaxTipCapGwei: 5,
MaxBlobTxTipCapGwei: 10,
- MaxFeeBidMultipleBips: arbmath.OneInBips * 10,
+ MaxFeeBidMultipleBips: arbmath.OneInUBips * 10,
AllocateMempoolBalance: true,
UrgencyGwei: 2.,
@@ -354,10 +345,10 @@ func TestFeeAndTipCaps_RBF_RisingBlobFee_FallingBaseFee(t *testing.T) {
extraBacklog: func() uint64 { return 0 },
balance: big.NewInt(0).Mul(big.NewInt(params.Ether), big.NewInt(10)),
usingNoOpStorage: false,
- client: &stubL1Client{
+ client: ethclient.NewClient(&stubL1ClientInner{
senderNonce: 1,
suggestedGasTipCap: big.NewInt(2 * params.GWei),
- },
+ }),
auth: &bind.TransactOpts{
From: common.Address{},
},
diff --git a/arbnode/dataposter/dbstorage/storage.go b/arbnode/dataposter/dbstorage/storage.go
index 97055193a6..6a6cd3cfa4 100644
--- a/arbnode/dataposter/dbstorage/storage.go
+++ b/arbnode/dataposter/dbstorage/storage.go
@@ -42,7 +42,7 @@ func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResu
var res []*storage.QueuedTransaction
it := s.db.NewIterator([]byte(""), idxToKey(startingIndex))
defer it.Release()
- for i := 0; i < int(maxResults); i++ {
+ for i := uint64(0); i < maxResults; i++ {
if !it.Next() {
break
}
@@ -95,11 +95,11 @@ func (s *Storage) PruneAll(ctx context.Context) error {
if err != nil {
return fmt.Errorf("pruning all keys: %w", err)
}
- until, err := strconv.Atoi(string(idx))
+ until, err := strconv.ParseUint(string(idx), 10, 64)
if err != nil {
return fmt.Errorf("converting last item index bytes to integer: %w", err)
}
- return s.Prune(ctx, uint64(until+1))
+ return s.Prune(ctx, until+1)
}
func (s *Storage) Prune(ctx context.Context, until uint64) error {
diff --git a/arbnode/dataposter/slice/slicestorage.go b/arbnode/dataposter/slice/slicestorage.go
index 69de7564a3..8685ed6f54 100644
--- a/arbnode/dataposter/slice/slicestorage.go
+++ b/arbnode/dataposter/slice/slicestorage.go
@@ -89,8 +89,8 @@ func (s *Storage) Put(_ context.Context, index uint64, prev, new *storage.Queued
}
s.queue = append(s.queue, newEnc)
} else if index >= s.firstNonce {
- queueIdx := int(index - s.firstNonce)
- if queueIdx > len(s.queue) {
+ queueIdx := index - s.firstNonce
+ if queueIdx > uint64(len(s.queue)) {
return fmt.Errorf("attempted to set out-of-bounds index %v in queue starting at %v of length %v", index, s.firstNonce, len(s.queue))
}
prevEnc, err := s.encDec().Encode(prev)
diff --git a/arbnode/dataposter/storage/time.go b/arbnode/dataposter/storage/time.go
index aa15f29170..82f8a3dbf5 100644
--- a/arbnode/dataposter/storage/time.go
+++ b/arbnode/dataposter/storage/time.go
@@ -34,11 +34,13 @@ func (b *RlpTime) DecodeRLP(s *rlp.Stream) error {
if err != nil {
return err
}
+ // #nosec G115
*b = RlpTime(time.Unix(int64(enc.Seconds), int64(enc.Nanos)))
return nil
}
func (b RlpTime) EncodeRLP(w io.Writer) error {
+ // #nosec G115
return rlp.Encode(w, rlpTimeEncoding{
Seconds: uint64(time.Time(b).Unix()),
Nanos: uint64(time.Time(b).Nanosecond()),
diff --git a/arbnode/dataposter/storage_test.go b/arbnode/dataposter/storage_test.go
index e2aa321e0d..c6316caea7 100644
--- a/arbnode/dataposter/storage_test.go
+++ b/arbnode/dataposter/storage_test.go
@@ -72,24 +72,29 @@ func newRedisStorage(ctx context.Context, t *testing.T, encF storage.EncoderDeco
func valueOf(t *testing.T, i int) *storage.QueuedTransaction {
t.Helper()
+ // #nosec G115
meta, err := rlp.EncodeToBytes(storage.BatchPosterPosition{DelayedMessageCount: uint64(i)})
if err != nil {
t.Fatalf("Encoding batch poster position, error: %v", err)
}
return &storage.QueuedTransaction{
FullTx: types.NewTransaction(
+ // #nosec G115
uint64(i),
common.Address{},
big.NewInt(int64(i)),
+ // #nosec G115
uint64(i),
big.NewInt(int64(i)),
[]byte{byte(i)}),
Meta: meta,
DeprecatedData: types.DynamicFeeTx{
- ChainID: big.NewInt(int64(i)),
- Nonce: uint64(i),
- GasTipCap: big.NewInt(int64(i)),
- GasFeeCap: big.NewInt(int64(i)),
+ ChainID: big.NewInt(int64(i)),
+ // #nosec G115
+ Nonce: uint64(i),
+ GasTipCap: big.NewInt(int64(i)),
+ GasFeeCap: big.NewInt(int64(i)),
+ // #nosec G115
Gas: uint64(i),
Value: big.NewInt(int64(i)),
Data: []byte{byte(i % 8)},
@@ -113,6 +118,7 @@ func values(t *testing.T, from, to int) []*storage.QueuedTransaction {
func initStorage(ctx context.Context, t *testing.T, s QueueStorage) QueueStorage {
t.Helper()
for i := 0; i < 20; i++ {
+ // #nosec G115
if err := s.Put(ctx, uint64(i), nil, valueOf(t, i)); err != nil {
t.Fatalf("Error putting a key/value: %v", err)
}
@@ -153,6 +159,7 @@ func TestPruneAll(t *testing.T) {
s := newLevelDBStorage(t, func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} })
ctx := context.Background()
for i := 0; i < 20; i++ {
+ // #nosec G115
if err := s.Put(ctx, uint64(i), nil, valueOf(t, i)); err != nil {
t.Fatalf("Error putting a key/value: %v", err)
}
@@ -236,6 +243,7 @@ func TestLast(t *testing.T) {
ctx := context.Background()
for i := 0; i < cnt; i++ {
val := valueOf(t, i)
+ // #nosec G115
if err := s.Put(ctx, uint64(i), nil, val); err != nil {
t.Fatalf("Error putting a key/value: %v", err)
}
@@ -255,6 +263,7 @@ func TestLast(t *testing.T) {
for i := 0; i < cnt-1; i++ {
prev := valueOf(t, i)
newVal := valueOf(t, cnt+i)
+ // #nosec G115
if err := s.Put(ctx, uint64(i), prev, newVal); err != nil {
t.Fatalf("Error putting a key/value: %v, prev: %v, new: %v", err, prev, newVal)
}
@@ -362,6 +371,7 @@ func TestLength(t *testing.T) {
if err != nil {
t.Fatalf("Length() unexpected error: %v", err)
}
+ // #nosec G115
if want := arbmath.MaxInt(0, 20-int(tc.pruneFrom)); got != want {
t.Errorf("Length() = %d want %d", got, want)
}
diff --git a/arbnode/delayed.go b/arbnode/delayed.go
index c166aa2b90..354fa671b3 100644
--- a/arbnode/delayed.go
+++ b/arbnode/delayed.go
@@ -19,6 +19,7 @@ import (
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/offchainlabs/nitro/arbos/arbostypes"
"github.com/offchainlabs/nitro/arbutil"
@@ -58,11 +59,11 @@ type DelayedBridge struct {
con *bridgegen.IBridge
address common.Address
fromBlock uint64
- client arbutil.L1Interface
+ client *ethclient.Client
messageProviders map[common.Address]*bridgegen.IDelayedMessageProvider
}
-func NewDelayedBridge(client arbutil.L1Interface, addr common.Address, fromBlock uint64) (*DelayedBridge, error) {
+func NewDelayedBridge(client *ethclient.Client, addr common.Address, fromBlock uint64) (*DelayedBridge, error) {
con, err := bridgegen.NewIBridge(addr, client)
if err != nil {
return nil, err
@@ -215,7 +216,7 @@ func (b *DelayedBridge) logsToDeliveredMessages(ctx context.Context, logs []type
}
messages := make([]*DelayedInboxMessage, 0, len(logs))
- var lastParentChainBlockNumber uint64
+ var lastParentChainBlockHash common.Hash
var lastL1BlockNumber uint64
for _, parsedLog := range parsedLogs {
msgKey := common.BigToHash(parsedLog.MessageIndex)
@@ -228,17 +229,17 @@ func (b *DelayedBridge) logsToDeliveredMessages(ctx context.Context, logs []type
}
requestId := common.BigToHash(parsedLog.MessageIndex)
- parentChainBlockNumber := parsedLog.Raw.BlockNumber
+ parentChainBlockHash := parsedLog.Raw.BlockHash
var l1BlockNumber uint64
- if lastParentChainBlockNumber == parentChainBlockNumber && lastParentChainBlockNumber > 0 {
+ if lastParentChainBlockHash == parentChainBlockHash && lastParentChainBlockHash != (common.Hash{}) {
l1BlockNumber = lastL1BlockNumber
} else {
- var err error
- l1BlockNumber, err = arbutil.CorrespondingL1BlockNumber(ctx, b.client, parentChainBlockNumber)
+ parentChainHeader, err := b.client.HeaderByHash(ctx, parentChainBlockHash)
if err != nil {
return nil, err
}
- lastParentChainBlockNumber = parentChainBlockNumber
+ l1BlockNumber = arbutil.ParentHeaderToL1BlockNumber(parentChainHeader)
+ lastParentChainBlockHash = parentChainBlockHash
lastL1BlockNumber = l1BlockNumber
}
msg := &DelayedInboxMessage{
diff --git a/arbnode/delayed_sequencer.go b/arbnode/delayed_sequencer.go
index 4f18531a76..b29a66dd05 100644
--- a/arbnode/delayed_sequencer.go
+++ b/arbnode/delayed_sequencer.go
@@ -121,6 +121,7 @@ func (d *DelayedSequencer) sequenceWithoutLockout(ctx context.Context, lastBlock
if currentNum < config.FinalizeDistance {
return nil
}
+ // #nosec G115
finalized = uint64(currentNum - config.FinalizeDistance)
}
@@ -189,6 +190,7 @@ func (d *DelayedSequencer) sequenceWithoutLockout(ctx context.Context, lastBlock
return fmt.Errorf("inbox reader at delayed message %v db accumulator %v doesn't match delayed bridge accumulator %v at L1 block %v", pos-1, lastDelayedAcc, delayedBridgeAcc, finalized)
}
for i, msg := range messages {
+ // #nosec G115
err = d.exec.SequenceDelayedMessage(msg, startPos+uint64(i))
if err != nil {
return err
diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go
index 77a0b6e7a2..98104b2ea7 100644
--- a/arbnode/inbox_reader.go
+++ b/arbnode/inbox_reader.go
@@ -14,6 +14,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
flag "github.com/spf13/pflag"
@@ -93,7 +94,7 @@ type InboxReader struct {
delayedBridge *DelayedBridge
sequencerInbox *SequencerInbox
caughtUpChan chan struct{}
- client arbutil.L1Interface
+ client *ethclient.Client
l1Reader *headerreader.HeaderReader
// Atomic
@@ -101,7 +102,7 @@ type InboxReader struct {
lastReadBatchCount atomic.Uint64
}
-func NewInboxReader(tracker *InboxTracker, client arbutil.L1Interface, l1Reader *headerreader.HeaderReader, firstMessageBlock *big.Int, delayedBridge *DelayedBridge, sequencerInbox *SequencerInbox, config InboxReaderConfigFetcher) (*InboxReader, error) {
+func NewInboxReader(tracker *InboxTracker, client *ethclient.Client, l1Reader *headerreader.HeaderReader, firstMessageBlock *big.Int, delayedBridge *DelayedBridge, sequencerInbox *SequencerInbox, config InboxReaderConfigFetcher) (*InboxReader, error) {
err := config().Validate()
if err != nil {
return nil, err
@@ -437,8 +438,8 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error {
}
delayedMessages, err := r.delayedBridge.LookupMessagesInRange(ctx, from, to, func(batchNum uint64) ([]byte, error) {
if len(sequencerBatches) > 0 && batchNum >= sequencerBatches[0].SequenceNumber {
- idx := int(batchNum - sequencerBatches[0].SequenceNumber)
- if idx < len(sequencerBatches) {
+ idx := batchNum - sequencerBatches[0].SequenceNumber
+ if idx < uint64(len(sequencerBatches)) {
return sequencerBatches[idx].Serialize(ctx, r.l1Reader.Client())
}
log.Warn("missing mentioned batch in L1 message lookup", "batch", batchNum)
@@ -542,6 +543,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error {
} else {
from = arbmath.BigAddByUint(to, 1)
}
+ // #nosec G115
haveMessages := uint64(len(delayedMessages) + len(sequencerBatches))
if haveMessages <= (config.TargetMessagesRead / 2) {
blocksToFetch += (blocksToFetch + 4) / 5
diff --git a/arbnode/inbox_test.go b/arbnode/inbox_test.go
index 70392598d6..e588ef399b 100644
--- a/arbnode/inbox_test.go
+++ b/arbnode/inbox_test.go
@@ -72,7 +72,11 @@ func NewTransactionStreamerForTest(t *testing.T, ownerAddress common.Address) (*
if err != nil {
Fail(t, err)
}
- execEngine.Initialize(gethexec.DefaultCachingConfig.StylusLRUCache)
+ stylusTargetConfig := &gethexec.DefaultStylusTargetConfig
+ Require(t, stylusTargetConfig.Validate()) // pre-processes config (i.a. parses wasmTargets)
+ if err := execEngine.Initialize(gethexec.DefaultCachingConfig.StylusLRUCacheCapacity, &gethexec.DefaultStylusTargetConfig); err != nil {
+ Fail(t, err)
+ }
execSeq := &execClientWrapper{execEngine, t}
inbox, err := NewTransactionStreamer(arbDb, bc.Config(), execSeq, nil, make(chan error, 1), transactionStreamerConfigFetcher, &DefaultSnapSyncConfig)
if err != nil {
diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go
index 23b81bde62..7686fe413f 100644
--- a/arbnode/inbox_tracker.go
+++ b/arbnode/inbox_tracker.go
@@ -13,6 +13,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
@@ -599,7 +600,7 @@ type multiplexerBackend struct {
positionWithinMessage uint64
ctx context.Context
- client arbutil.L1Interface
+ client *ethclient.Client
inbox *InboxTracker
}
@@ -639,7 +640,7 @@ func (b *multiplexerBackend) ReadDelayedInbox(seqNum uint64) (*arbostypes.L1Inco
var delayedMessagesMismatch = errors.New("sequencer batch delayed messages missing or different")
-func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L1Interface, batches []*SequencerInboxBatch) error {
+func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client *ethclient.Client, batches []*SequencerInboxBatch) error {
var nextAcc common.Hash
var prevbatchmeta BatchMetadata
sequenceNumberToKeep := uint64(0)
@@ -804,6 +805,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L
if len(messages) > 0 {
latestTimestamp = messages[len(messages)-1].Message.Header.Timestamp
}
+ // #nosec G115
log.Info(
"InboxTracker",
"sequencerBatchCount", pos,
@@ -811,7 +813,9 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L
"l1Block", latestL1Block,
"l1Timestamp", time.Unix(int64(latestTimestamp), 0),
)
+ // #nosec G115
inboxLatestBatchGauge.Update(int64(pos))
+ // #nosec G115
inboxLatestBatchMessageGauge.Update(int64(newMessageCount))
if t.validator != nil {
diff --git a/arbnode/node.go b/arbnode/node.go
index 0676d6393c..c5b3bbe071 100644
--- a/arbnode/node.go
+++ b/arbnode/node.go
@@ -18,6 +18,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
@@ -339,6 +340,29 @@ func checkArbDbSchemaVersion(arbDb ethdb.Database) error {
return nil
}
+func DataposterOnlyUsedToCreateValidatorWalletContract(
+ ctx context.Context,
+ l1Reader *headerreader.HeaderReader,
+ transactOpts *bind.TransactOpts,
+ cfg *dataposter.DataPosterConfig,
+ parentChainID *big.Int,
+) (*dataposter.DataPoster, error) {
+ cfg.UseNoOpStorage = true
+ return dataposter.NewDataPoster(ctx,
+ &dataposter.DataPosterOpts{
+ HeaderReader: l1Reader,
+ Auth: transactOpts,
+ Config: func() *dataposter.DataPosterConfig {
+ return cfg
+ },
+ MetadataRetriever: func(ctx context.Context, blockNum *big.Int) ([]byte, error) {
+ return nil, nil
+ },
+ ParentChainID: parentChainID,
+ },
+ )
+}
+
func StakerDataposter(
ctx context.Context, db ethdb.Database, l1Reader *headerreader.HeaderReader,
transactOpts *bind.TransactOpts, cfgFetcher ConfigFetcher, syncMonitor *SyncMonitor,
@@ -384,7 +408,7 @@ func createNodeImpl(
arbDb ethdb.Database,
configFetcher ConfigFetcher,
l2Config *params.ChainConfig,
- l1client arbutil.L1Interface,
+ l1client *ethclient.Client,
deployInfo *chaininfo.RollupAddresses,
txOptsValidator *bind.TransactOpts,
txOptsBatchPoster *bind.TransactOpts,
@@ -515,6 +539,7 @@ func createNodeImpl(
if err != nil {
return nil, err
}
+ // #nosec G115
sequencerInbox, err := NewSequencerInbox(l1client, deployInfo.SequencerInbox, int64(deployInfo.DeployedAt))
if err != nil {
return nil, err
@@ -639,6 +664,7 @@ func createNodeImpl(
tmpAddress := common.HexToAddress(config.Staker.ContractWalletAddress)
existingWalletAddress = &tmpAddress
}
+ // #nosec G115
wallet, err = validatorwallet.NewContract(dp, existingWalletAddress, deployInfo.ValidatorWalletCreator, deployInfo.Rollup, l1Reader, txOptsValidator, int64(deployInfo.DeployedAt), func(common.Address) {}, getExtraGas)
if err != nil {
return nil, err
@@ -660,7 +686,7 @@ func createNodeImpl(
confirmedNotifiers = append(confirmedNotifiers, messagePruner)
}
- stakerObj, err = staker.NewStaker(l1Reader, wallet, bind.CallOpts{}, config.Staker, blockValidator, statelessBlockValidator, nil, confirmedNotifiers, deployInfo.ValidatorUtils, fatalErrChan)
+ stakerObj, err = staker.NewStaker(l1Reader, wallet, bind.CallOpts{}, func() *staker.L1ValidatorConfig { return &configFetcher.Get().Staker }, blockValidator, statelessBlockValidator, nil, confirmedNotifiers, deployInfo.ValidatorUtils, fatalErrChan)
if err != nil {
return nil, err
}
@@ -756,7 +782,7 @@ func CreateNode(
arbDb ethdb.Database,
configFetcher ConfigFetcher,
l2Config *params.ChainConfig,
- l1client arbutil.L1Interface,
+ l1client *ethclient.Client,
deployInfo *chaininfo.RollupAddresses,
txOptsValidator *bind.TransactOpts,
txOptsBatchPoster *bind.TransactOpts,
diff --git a/arbnode/resourcemanager/resource_management.go b/arbnode/resourcemanager/resource_management.go
index aba823cc25..249b689443 100644
--- a/arbnode/resourcemanager/resource_management.go
+++ b/arbnode/resourcemanager/resource_management.go
@@ -256,6 +256,7 @@ func readIntFromFile(fileName string) (int, error) {
if err != nil {
return 0, err
}
+ defer file.Close()
var limit int
if _, err = fmt.Fscanf(file, "%d", &limit); err != nil {
@@ -269,6 +270,7 @@ func readFromMemStats(fileName string, re *regexp.Regexp) (int, error) {
if err != nil {
return 0, err
}
+ defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
diff --git a/arbnode/seq_coordinator.go b/arbnode/seq_coordinator.go
index 85299ddc63..1fb09c5ca8 100644
--- a/arbnode/seq_coordinator.go
+++ b/arbnode/seq_coordinator.go
@@ -39,6 +39,7 @@ type SeqCoordinator struct {
redisutil.RedisCoordinator
+ sync *SyncMonitor
streamer *TransactionStreamer
sequencer execution.ExecutionSequencer
delayedSequencer *DelayedSequencer
@@ -69,9 +70,10 @@ type SeqCoordinatorConfig struct {
SafeShutdownDelay time.Duration `koanf:"safe-shutdown-delay"`
ReleaseRetries int `koanf:"release-retries"`
// Max message per poll.
- MsgPerPoll arbutil.MessageIndex `koanf:"msg-per-poll"`
- MyUrl string `koanf:"my-url"`
- Signer signature.SignVerifyConfig `koanf:"signer"`
+ MsgPerPoll arbutil.MessageIndex `koanf:"msg-per-poll"`
+ MyUrl string `koanf:"my-url"`
+ DeleteFinalizedMsgs bool `koanf:"delete-finalized-msgs"`
+ Signer signature.SignVerifyConfig `koanf:"signer"`
}
func (c *SeqCoordinatorConfig) Url() string {
@@ -95,6 +97,7 @@ func SeqCoordinatorConfigAddOptions(prefix string, f *flag.FlagSet) {
f.Int(prefix+".release-retries", DefaultSeqCoordinatorConfig.ReleaseRetries, "the number of times to retry releasing the wants lockout and chosen one status on shutdown")
f.Uint64(prefix+".msg-per-poll", uint64(DefaultSeqCoordinatorConfig.MsgPerPoll), "will only be marked as wanting the lockout if not too far behind")
f.String(prefix+".my-url", DefaultSeqCoordinatorConfig.MyUrl, "url for this sequencer if it is the chosen")
+ f.Bool(prefix+".delete-finalized-msgs", DefaultSeqCoordinatorConfig.DeleteFinalizedMsgs, "enable deleting of finalized messages from redis")
signature.SignVerifyConfigAddOptions(prefix+".signer", f)
}
@@ -104,7 +107,7 @@ var DefaultSeqCoordinatorConfig = SeqCoordinatorConfig{
RedisUrl: "",
LockoutDuration: time.Minute,
LockoutSpare: 30 * time.Second,
- SeqNumDuration: 24 * time.Hour,
+ SeqNumDuration: 10 * 24 * time.Hour,
UpdateInterval: 250 * time.Millisecond,
HandoffTimeout: 30 * time.Second,
SafeShutdownDelay: 5 * time.Second,
@@ -112,23 +115,25 @@ var DefaultSeqCoordinatorConfig = SeqCoordinatorConfig{
RetryInterval: 50 * time.Millisecond,
MsgPerPoll: 2000,
MyUrl: redisutil.INVALID_URL,
+ DeleteFinalizedMsgs: true,
Signer: signature.DefaultSignVerifyConfig,
}
var TestSeqCoordinatorConfig = SeqCoordinatorConfig{
- Enable: false,
- RedisUrl: "",
- LockoutDuration: time.Second * 2,
- LockoutSpare: time.Millisecond * 10,
- SeqNumDuration: time.Minute * 10,
- UpdateInterval: time.Millisecond * 10,
- HandoffTimeout: time.Millisecond * 200,
- SafeShutdownDelay: time.Millisecond * 100,
- ReleaseRetries: 4,
- RetryInterval: time.Millisecond * 3,
- MsgPerPoll: 20,
- MyUrl: redisutil.INVALID_URL,
- Signer: signature.DefaultSignVerifyConfig,
+ Enable: false,
+ RedisUrl: "",
+ LockoutDuration: time.Second * 2,
+ LockoutSpare: time.Millisecond * 10,
+ SeqNumDuration: time.Minute * 10,
+ UpdateInterval: time.Millisecond * 10,
+ HandoffTimeout: time.Millisecond * 200,
+ SafeShutdownDelay: time.Millisecond * 100,
+ ReleaseRetries: 4,
+ RetryInterval: time.Millisecond * 3,
+ MsgPerPoll: 20,
+ MyUrl: redisutil.INVALID_URL,
+ DeleteFinalizedMsgs: true,
+ Signer: signature.DefaultSignVerifyConfig,
}
func NewSeqCoordinator(
@@ -149,6 +154,7 @@ func NewSeqCoordinator(
}
coordinator := &SeqCoordinator{
RedisCoordinator: *redisCoordinator,
+ sync: sync,
streamer: streamer,
sequencer: sequencer,
config: config,
@@ -338,6 +344,14 @@ func (c *SeqCoordinator) acquireLockoutAndWriteMessage(ctx context.Context, msgC
return nil
}
+func (c *SeqCoordinator) getRemoteFinalizedMsgCount(ctx context.Context) (arbutil.MessageIndex, error) {
+ resStr, err := c.Client.Get(ctx, redisutil.FINALIZED_MSG_COUNT_KEY).Result()
+ if err != nil {
+ return 0, err
+ }
+ return c.signedBytesToMsgCount(ctx, []byte(resStr))
+}
+
func (c *SeqCoordinator) getRemoteMsgCountImpl(ctx context.Context, r redis.Cmdable) (arbutil.MessageIndex, error) {
resStr, err := r.Get(ctx, redisutil.MSG_COUNT_KEY).Result()
if errors.Is(err, redis.Nil) {
@@ -473,6 +487,17 @@ func (c *SeqCoordinator) updateWithLockout(ctx context.Context, nextChosen strin
return c.noRedisError()
}
// Was, and still is, the active sequencer
+ if c.config.DeleteFinalizedMsgs {
+ // Before proceeding, first try deleting finalized messages from redis and setting the finalizedMsgCount key
+ finalized, err := c.sync.GetFinalizedMsgCount(ctx)
+ if err != nil {
+ log.Warn("Error getting finalizedMessageCount from syncMonitor", "err", err)
+ } else if finalized == 0 {
+ log.Warn("SyncMonitor returned zero finalizedMessageCount")
+ } else if err := c.deleteFinalizedMsgsFromRedis(ctx, finalized); err != nil {
+ log.Warn("Coordinator failed to delete finalized messages from redis", "err", err)
+ }
+ }
// We leave a margin of error of either a five times the update interval or a fifth of the lockout duration, whichever is greater.
marginOfError := arbmath.MaxInt(c.config.LockoutDuration/5, c.config.UpdateInterval*5)
if time.Now().Add(marginOfError).Before(atomicTimeRead(&c.lockoutUntil)) {
@@ -492,6 +517,62 @@ func (c *SeqCoordinator) updateWithLockout(ctx context.Context, nextChosen strin
return c.noRedisError()
}
+func (c *SeqCoordinator) deleteFinalizedMsgsFromRedis(ctx context.Context, finalized arbutil.MessageIndex) error {
+ deleteMsgsAndUpdateFinalizedMsgCount := func(keys []string) error {
+ if len(keys) > 0 {
+ // To support cases during init we delete keys from reverse (i.e lowest seq num first), so that even if deletion fails in one of the iterations
+ // next time deleteFinalizedMsgsFromRedis is called we dont miss undeleted messages, as exists is checked from higher seqnum to lower.
+ // In non-init cases it doesn't matter how we delete as we always try to delete from prevFinalized to finalized
+ batchDeleteCount := 1000
+ for i := len(keys); i > 0; i -= batchDeleteCount {
+ if err := c.Client.Del(ctx, keys[max(0, i-batchDeleteCount):i]...).Err(); err != nil {
+ return fmt.Errorf("error deleting finalized messages and their signatures from redis: %w", err)
+ }
+ }
+ }
+ finalizedBytes, err := c.msgCountToSignedBytes(finalized)
+ if err != nil {
+ return err
+ }
+ if err = c.Client.Set(ctx, redisutil.FINALIZED_MSG_COUNT_KEY, finalizedBytes, c.config.SeqNumDuration).Err(); err != nil {
+ return fmt.Errorf("couldn't set %s key to current finalizedMsgCount in redis: %w", redisutil.FINALIZED_MSG_COUNT_KEY, err)
+ }
+ return nil
+ }
+ prevFinalized, err := c.getRemoteFinalizedMsgCount(ctx)
+ if errors.Is(err, redis.Nil) {
+ var keys []string
+ for msg := finalized - 1; msg > 0; msg-- {
+ exists, err := c.Client.Exists(ctx, redisutil.MessageKeyFor(msg), redisutil.MessageSigKeyFor(msg)).Result()
+ if err != nil {
+ // If there is an error deleting finalized messages during init, we retry later either from this sequencer or from another
+ return err
+ }
+ if exists == 0 {
+ break
+ }
+ keys = append(keys, redisutil.MessageKeyFor(msg), redisutil.MessageSigKeyFor(msg))
+ }
+ log.Info("Initializing finalizedMsgCount and deleting finalized messages from redis", "finalizedMsgCount", finalized)
+ return deleteMsgsAndUpdateFinalizedMsgCount(keys)
+ } else if err != nil {
+ return fmt.Errorf("error getting finalizedMsgCount value from redis: %w", err)
+ }
+ remoteMsgCount, err := c.getRemoteMsgCountImpl(ctx, c.Client)
+ if err != nil {
+ return fmt.Errorf("cannot get remote message count: %w", err)
+ }
+ msgToDelete := min(finalized, remoteMsgCount)
+ if prevFinalized < msgToDelete {
+ var keys []string
+ for msg := prevFinalized; msg < msgToDelete; msg++ {
+ keys = append(keys, redisutil.MessageKeyFor(msg), redisutil.MessageSigKeyFor(msg))
+ }
+ return deleteMsgsAndUpdateFinalizedMsgCount(keys)
+ }
+ return nil
+}
+
func (c *SeqCoordinator) update(ctx context.Context) time.Duration {
chosenSeq, err := c.RecommendSequencerWantingLockout(ctx)
if err != nil {
@@ -522,19 +603,24 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration {
log.Error("cannot read message count", "err", err)
return c.config.UpdateInterval
}
+ remoteFinalizedMsgCount, err := c.getRemoteFinalizedMsgCount(ctx)
+ if err != nil {
+ loglevel := log.Error
+ if errors.Is(err, redis.Nil) {
+ loglevel = log.Debug
+ }
+ loglevel("Cannot get remote finalized message count, might encounter failed to read message warnings later", "err", err)
+ }
remoteMsgCount, err := c.GetRemoteMsgCount()
if err != nil {
log.Warn("cannot get remote message count", "err", err)
return c.retryAfterRedisError()
}
- readUntil := remoteMsgCount
- if readUntil > localMsgCount+c.config.MsgPerPoll {
- readUntil = localMsgCount + c.config.MsgPerPoll
- }
+ readUntil := min(localMsgCount+c.config.MsgPerPoll, remoteMsgCount)
var messages []arbostypes.MessageWithMetadata
msgToRead := localMsgCount
var msgReadErr error
- for msgToRead < readUntil {
+ for msgToRead < readUntil && localMsgCount >= remoteFinalizedMsgCount {
var resString string
resString, msgReadErr = c.Client.Get(ctx, redisutil.MessageKeyFor(msgToRead)).Result()
if msgReadErr != nil && c.sequencer.Synced() {
diff --git a/arbnode/seq_coordinator_atomic_test.go b/arbnode/seq_coordinator_test.go
similarity index 57%
rename from arbnode/seq_coordinator_atomic_test.go
rename to arbnode/seq_coordinator_test.go
index 9b9d9dea81..6498543f3a 100644
--- a/arbnode/seq_coordinator_atomic_test.go
+++ b/arbnode/seq_coordinator_test.go
@@ -156,3 +156,94 @@ func TestRedisSeqCoordinatorAtomic(t *testing.T) {
}
}
+
+func TestSeqCoordinatorDeletesFinalizedMessages(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ coordConfig := TestSeqCoordinatorConfig
+ coordConfig.LockoutDuration = time.Millisecond * 100
+ coordConfig.LockoutSpare = time.Millisecond * 10
+ coordConfig.Signer.ECDSA.AcceptSequencer = false
+ coordConfig.Signer.SymmetricFallback = true
+ coordConfig.Signer.SymmetricSign = true
+ coordConfig.Signer.Symmetric.Dangerous.DisableSignatureVerification = true
+ coordConfig.Signer.Symmetric.SigningKey = ""
+
+ nullSigner, err := signature.NewSignVerify(&coordConfig.Signer, nil, nil)
+ Require(t, err)
+
+ redisUrl := redisutil.CreateTestRedis(ctx, t)
+ coordConfig.RedisUrl = redisUrl
+
+ config := coordConfig
+ config.MyUrl = "test"
+ redisCoordinator, err := redisutil.NewRedisCoordinator(config.RedisUrl)
+ Require(t, err)
+ coordinator := &SeqCoordinator{
+ RedisCoordinator: *redisCoordinator,
+ config: config,
+ signer: nullSigner,
+ }
+
+ // Add messages to redis
+ var keys []string
+ msgBytes, err := coordinator.msgCountToSignedBytes(0)
+ Require(t, err)
+ for i := arbutil.MessageIndex(1); i <= 10; i++ {
+ err = coordinator.Client.Set(ctx, redisutil.MessageKeyFor(i), msgBytes, time.Hour).Err()
+ Require(t, err)
+ err = coordinator.Client.Set(ctx, redisutil.MessageSigKeyFor(i), msgBytes, time.Hour).Err()
+ Require(t, err)
+ keys = append(keys, redisutil.MessageKeyFor(i), redisutil.MessageSigKeyFor(i))
+ }
+ // Set msgCount key
+ msgCountBytes, err := coordinator.msgCountToSignedBytes(11)
+ Require(t, err)
+ err = coordinator.Client.Set(ctx, redisutil.MSG_COUNT_KEY, msgCountBytes, time.Hour).Err()
+ Require(t, err)
+ exists, err := coordinator.Client.Exists(ctx, keys...).Result()
+ Require(t, err)
+ if exists != 20 {
+ t.Fatal("couldn't find all messages and signatures in redis")
+ }
+
+ // Set finalizedMsgCount and delete finalized messages
+ err = coordinator.deleteFinalizedMsgsFromRedis(ctx, 5)
+ Require(t, err)
+
+ // Check if messages and signatures were deleted successfully
+ exists, err = coordinator.Client.Exists(ctx, keys[:8]...).Result()
+ Require(t, err)
+ if exists != 0 {
+ t.Fatal("finalized messages and signatures in range 1 to 4 were not deleted")
+ }
+
+ // Check if finalizedMsgCount was set to correct value
+ finalized, err := coordinator.getRemoteFinalizedMsgCount(ctx)
+ Require(t, err)
+ if finalized != 5 {
+ t.Fatalf("incorrect finalizedMsgCount, want: 5, have: %d", finalized)
+ }
+
+ // Try deleting finalized messages when theres already a finalizedMsgCount
+ err = coordinator.deleteFinalizedMsgsFromRedis(ctx, 7)
+ Require(t, err)
+ exists, err = coordinator.Client.Exists(ctx, keys[8:12]...).Result()
+ Require(t, err)
+ if exists != 0 {
+ t.Fatal("finalized messages and signatures in range 5 to 6 were not deleted")
+ }
+ finalized, err = coordinator.getRemoteFinalizedMsgCount(ctx)
+ Require(t, err)
+ if finalized != 7 {
+ t.Fatalf("incorrect finalizedMsgCount, want: 7, have: %d", finalized)
+ }
+
+ // Check that non-finalized messages are still available in redis
+ exists, err = coordinator.Client.Exists(ctx, keys[12:]...).Result()
+ Require(t, err)
+ if exists != 8 {
+ t.Fatal("non-finalized messages and signatures in range 7 to 10 are not fully available")
+ }
+}
diff --git a/arbnode/sequencer_inbox.go b/arbnode/sequencer_inbox.go
index 73e52ded53..81146ed46e 100644
--- a/arbnode/sequencer_inbox.go
+++ b/arbnode/sequencer_inbox.go
@@ -15,6 +15,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/offchainlabs/nitro/arbstate/daprovider"
"github.com/offchainlabs/nitro/arbutil"
@@ -52,10 +53,10 @@ type SequencerInbox struct {
con *bridgegen.SequencerInbox
address common.Address
fromBlock int64
- client arbutil.L1Interface
+ client *ethclient.Client
}
-func NewSequencerInbox(client arbutil.L1Interface, addr common.Address, fromBlock int64) (*SequencerInbox, error) {
+func NewSequencerInbox(client *ethclient.Client, addr common.Address, fromBlock int64) (*SequencerInbox, error) {
con, err := bridgegen.NewSequencerInbox(addr, client)
if err != nil {
return nil, err
@@ -111,7 +112,7 @@ type SequencerInboxBatch struct {
serialized []byte // nil if serialization isn't cached yet
}
-func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client arbutil.L1Interface) ([]byte, error) {
+func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client *ethclient.Client) ([]byte, error) {
switch m.dataLocation {
case batchDataTxInput:
data, err := arbutil.GetLogEmitterTxData(ctx, client, m.rawLog)
@@ -169,7 +170,7 @@ func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client arbut
}
}
-func (m *SequencerInboxBatch) Serialize(ctx context.Context, client arbutil.L1Interface) ([]byte, error) {
+func (m *SequencerInboxBatch) Serialize(ctx context.Context, client *ethclient.Client) ([]byte, error) {
if m.serialized != nil {
return m.serialized, nil
}
diff --git a/arbnode/sync_monitor.go b/arbnode/sync_monitor.go
index d3b9a7e1c6..5ab1ede2d6 100644
--- a/arbnode/sync_monitor.go
+++ b/arbnode/sync_monitor.go
@@ -72,6 +72,13 @@ func (s *SyncMonitor) SyncTargetMessageCount() arbutil.MessageIndex {
return s.syncTarget
}
+func (s *SyncMonitor) GetFinalizedMsgCount(ctx context.Context) (arbutil.MessageIndex, error) {
+ if s.inboxReader != nil && s.inboxReader.l1Reader != nil {
+ return s.inboxReader.GetFinalizedMsgCount(ctx)
+ }
+ return 0, nil
+}
+
func (s *SyncMonitor) maxMessageCount() (arbutil.MessageIndex, error) {
msgCount, err := s.txStreamer.GetMessageCount()
if err != nil {
diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go
index 90e7feddc6..38b1c003db 100644
--- a/arbnode/transaction_streamer.go
+++ b/arbnode/transaction_streamer.go
@@ -279,6 +279,7 @@ func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageInde
return err
}
config := s.config()
+ // #nosec G115
maxResequenceMsgCount := count + arbutil.MessageIndex(config.MaxReorgResequenceDepth)
if config.MaxReorgResequenceDepth >= 0 && maxResequenceMsgCount < targetMsgCount {
log.Error(
@@ -388,6 +389,7 @@ func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageInde
}
for i := 0; i < len(messagesResults); i++ {
+ // #nosec G115
pos := count + arbutil.MessageIndex(i)
err = s.storeResult(pos, *messagesResults[i], batch)
if err != nil {
@@ -680,7 +682,7 @@ func (s *TransactionStreamer) AddMessagesAndEndBatch(pos arbutil.MessageIndex, m
if err != nil {
return err
}
- if dups == len(messages) {
+ if dups == uint64(len(messages)) {
return endBatch(batch)
}
// cant keep reorg lock when catching insertionMutex.
@@ -715,10 +717,10 @@ func (s *TransactionStreamer) countDuplicateMessages(
pos arbutil.MessageIndex,
messages []arbostypes.MessageWithMetadataAndBlockHash,
batch *ethdb.Batch,
-) (int, bool, *arbostypes.MessageWithMetadata, error) {
- curMsg := 0
+) (uint64, bool, *arbostypes.MessageWithMetadata, error) {
+ var curMsg uint64
for {
- if len(messages) == curMsg {
+ if uint64(len(messages)) == curMsg {
break
}
key := dbKey(messagePrefix, uint64(pos))
@@ -818,7 +820,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil
broadcastStartPos := arbutil.MessageIndex(s.broadcasterQueuedMessagesPos.Load())
if messagesAreConfirmed {
- var duplicates int
+ var duplicates uint64
var err error
duplicates, confirmedReorg, oldMsg, err = s.countDuplicateMessages(messageStartPos, messages, &batch)
if err != nil {
@@ -840,6 +842,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil
// Active broadcast reorg and L1 messages at or before start of broadcast messages
// Or no active broadcast reorg and broadcast messages start before or immediately after last L1 message
if messagesAfterPos >= broadcastStartPos {
+ // #nosec G115
broadcastSliceIndex := int(messagesAfterPos - broadcastStartPos)
messagesOldLen := len(messages)
if broadcastSliceIndex < len(s.broadcasterQueuedMessages) {
@@ -856,7 +859,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil
var feedReorg bool
if !hasNewConfirmedMessages {
- var duplicates int
+ var duplicates uint64
var err error
duplicates, feedReorg, oldMsg, err = s.countDuplicateMessages(messageStartPos, messages, nil)
if err != nil {
@@ -888,6 +891,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil
// Validate delayed message counts of remaining messages
for i, msg := range messages {
+ // #nosec G115
msgPos := messageStartPos + arbutil.MessageIndex(i)
diff := msg.MessageWithMeta.DelayedMessagesRead - lastDelayedRead
if diff != 0 && diff != 1 {
@@ -923,6 +927,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil
// Check if new messages were added at the end of cache, if they were, then dont remove those particular messages
if len(s.broadcasterQueuedMessages) > cacheClearLen {
s.broadcasterQueuedMessages = s.broadcasterQueuedMessages[cacheClearLen:]
+ // #nosec G115
s.broadcasterQueuedMessagesPos.Store(uint64(broadcastStartPos) + uint64(cacheClearLen))
} else {
s.broadcasterQueuedMessages = s.broadcasterQueuedMessages[:0]
@@ -1043,6 +1048,7 @@ func (s *TransactionStreamer) writeMessages(pos arbutil.MessageIndex, messages [
batch = s.db.NewBatch()
}
for i, msg := range messages {
+ // #nosec G115
err := s.writeMessage(pos+arbutil.MessageIndex(i), msg, batch)
if err != nil {
return err
@@ -1134,7 +1140,7 @@ func (s *TransactionStreamer) storeResult(
// exposed for testing
// return value: true if should be called again immediately
-func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution.ExecutionSequencer) bool {
+func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context) bool {
if ctx.Err() != nil {
return false
}
@@ -1206,7 +1212,7 @@ func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution
}
func (s *TransactionStreamer) executeMessages(ctx context.Context, ignored struct{}) time.Duration {
- if s.ExecuteNextMsg(ctx, s.exec) {
+ if s.ExecuteNextMsg(ctx) {
return 0
}
return s.config().ExecuteMessageLoopDelay
diff --git a/arbos/activate_test.go b/arbos/activate_test.go
index 55440bb208..a89a38639a 100644
--- a/arbos/activate_test.go
+++ b/arbos/activate_test.go
@@ -20,6 +20,7 @@ func TestActivationDataFee(t *testing.T) {
rand.Seed(time.Now().UTC().UnixNano())
state, _ := arbosState.NewArbosMemoryBackedArbOSState()
pricer := state.Programs().DataPricer()
+ // #nosec G115
time := uint64(time.Now().Unix())
assert := func(cond bool) {
diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go
index 1f09ff1440..156f36e7e7 100644
--- a/arbos/addressSet/addressSet.go
+++ b/arbos/addressSet/addressSet.go
@@ -79,6 +79,7 @@ func (as *AddressSet) AllMembers(maxNumToReturn uint64) ([]common.Address, error
}
ret := make([]common.Address, size)
for i := range ret {
+ // #nosec G115
sba := as.backingStorage.OpenStorageBackedAddress(uint64(i + 1))
ret[i], err = sba.Get()
if err != nil {
diff --git a/arbos/addressSet/addressSet_test.go b/arbos/addressSet/addressSet_test.go
index 7d06c74f0b..d32e07a546 100644
--- a/arbos/addressSet/addressSet_test.go
+++ b/arbos/addressSet/addressSet_test.go
@@ -316,6 +316,7 @@ func checkIfRectifyMappingWorks(t *testing.T, aset *AddressSet, owners []common.
Fail(t, "RectifyMapping did not fix the mismatch")
}
+ // #nosec G115
if clearList && int(size(t, aset)) != index+1 {
Fail(t, "RectifyMapping did not fix the mismatch")
}
diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go
index 3fbb7b3782..6ae271060d 100644
--- a/arbos/addressTable/addressTable.go
+++ b/arbos/addressTable/addressTable.go
@@ -103,6 +103,7 @@ func (atab *AddressTable) Decompress(buf []byte) (common.Address, uint64, error)
return common.Address{}, 0, err
}
if len(input) == 20 {
+ // #nosec G115
numBytesRead := uint64(rd.Size() - int64(rd.Len()))
return common.BytesToAddress(input), numBytesRead, nil
} else {
@@ -118,6 +119,7 @@ func (atab *AddressTable) Decompress(buf []byte) (common.Address, uint64, error)
if !exists {
return common.Address{}, 0, errors.New("invalid index in compressed address")
}
+ // #nosec G115
numBytesRead := uint64(rd.Size() - int64(rd.Len()))
return addr, numBytesRead, nil
}
diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go
index 91c2207aae..f53d9c892a 100644
--- a/arbos/arbosState/arbosstate.go
+++ b/arbos/arbosState/arbosstate.go
@@ -41,28 +41,29 @@ import (
// persisted beyond the end of the test.)
type ArbosState struct {
- arbosVersion uint64 // version of the ArbOS storage format and semantics
- maxArbosVersionSupported uint64 // maximum ArbOS version supported by this code
- maxDebugArbosVersionSupported uint64 // maximum ArbOS version supported by this code in debug mode
- upgradeVersion storage.StorageBackedUint64 // version we're planning to upgrade to, or 0 if not planning to upgrade
- upgradeTimestamp storage.StorageBackedUint64 // when to do the planned upgrade
- networkFeeAccount storage.StorageBackedAddress
- l1PricingState *l1pricing.L1PricingState
- l2PricingState *l2pricing.L2PricingState
- retryableState *retryables.RetryableState
- addressTable *addressTable.AddressTable
- chainOwners *addressSet.AddressSet
- sendMerkle *merkleAccumulator.MerkleAccumulator
- programs *programs.Programs
- blockhashes *blockhash.Blockhashes
- chainId storage.StorageBackedBigInt
- chainConfig storage.StorageBackedBytes
- genesisBlockNum storage.StorageBackedUint64
- infraFeeAccount storage.StorageBackedAddress
- brotliCompressionLevel storage.StorageBackedUint64 // brotli compression level used for pricing
- backingStorage *storage.Storage
- Burner burn.Burner
-}
+ arbosVersion uint64 // version of the ArbOS storage format and semantics
+ upgradeVersion storage.StorageBackedUint64 // version we're planning to upgrade to, or 0 if not planning to upgrade
+ upgradeTimestamp storage.StorageBackedUint64 // when to do the planned upgrade
+ networkFeeAccount storage.StorageBackedAddress
+ l1PricingState *l1pricing.L1PricingState
+ l2PricingState *l2pricing.L2PricingState
+ retryableState *retryables.RetryableState
+ addressTable *addressTable.AddressTable
+ chainOwners *addressSet.AddressSet
+ sendMerkle *merkleAccumulator.MerkleAccumulator
+ programs *programs.Programs
+ blockhashes *blockhash.Blockhashes
+ chainId storage.StorageBackedBigInt
+ chainConfig storage.StorageBackedBytes
+ genesisBlockNum storage.StorageBackedUint64
+ infraFeeAccount storage.StorageBackedAddress
+ brotliCompressionLevel storage.StorageBackedUint64 // brotli compression level used for pricing
+ backingStorage *storage.Storage
+ Burner burn.Burner
+}
+
+const MaxArbosVersionSupported uint64 = params.ArbosVersion_StylusChargingFixes
+const MaxDebugArbosVersionSupported uint64 = params.ArbosVersion_StylusChargingFixes
var ErrUninitializedArbOS = errors.New("ArbOS uninitialized")
var ErrAlreadyInitialized = errors.New("ArbOS is already initialized")
@@ -78,8 +79,6 @@ func OpenArbosState(stateDB vm.StateDB, burner burn.Burner) (*ArbosState, error)
}
return &ArbosState{
arbosVersion,
- 31,
- 31,
backingStorage.OpenStorageBackedUint64(uint64(upgradeVersionOffset)),
backingStorage.OpenStorageBackedUint64(uint64(upgradeTimestampOffset)),
backingStorage.OpenStorageBackedAddress(uint64(networkFeeAccountOffset)),
@@ -332,6 +331,9 @@ func (state *ArbosState) UpgradeArbosVersion(
ensure(params.UpgradeToVersion(2))
ensure(params.Save())
+ case 32:
+ // no change state needed
+
default:
return fmt.Errorf(
"the chain is upgrading to unsupported ArbOS version %v, %w",
@@ -416,14 +418,6 @@ func (state *ArbosState) RetryableState() *retryables.RetryableState {
return state.retryableState
}
-func (state *ArbosState) MaxArbosVersionSupported() uint64 {
- return state.maxArbosVersionSupported
-}
-
-func (state *ArbosState) MaxDebugArbosVersionSupported() uint64 {
- return state.maxDebugArbosVersionSupported
-}
-
func (state *ArbosState) L1PricingState() *l1pricing.L1PricingState {
return state.l1PricingState
}
diff --git a/arbos/arbosState/initialization_test.go b/arbos/arbosState/initialization_test.go
index 34802392fe..5e605b8bd2 100644
--- a/arbos/arbosState/initialization_test.go
+++ b/arbos/arbosState/initialization_test.go
@@ -109,6 +109,7 @@ func pseudorandomAccountInitInfoForTesting(prand *testhelpers.PseudoRandomDataSo
}
func pseudorandomHashHashMapForTesting(prand *testhelpers.PseudoRandomDataSource, maxItems uint64) map[common.Hash]common.Hash {
+ // #nosec G115
size := int(prand.GetUint64() % maxItems)
ret := make(map[common.Hash]common.Hash)
for i := 0; i < size; i++ {
@@ -125,6 +126,7 @@ func checkAddressTable(arbState *ArbosState, addrTable []common.Address, t *test
Fail(t)
}
for i, addr := range addrTable {
+ // #nosec G115
res, exists, err := atab.LookupIndex(uint64(i))
Require(t, err)
if !exists {
diff --git a/arbos/arbosState/initialize.go b/arbos/arbosState/initialize.go
index c44febf386..427bdc3087 100644
--- a/arbos/arbosState/initialize.go
+++ b/arbos/arbosState/initialize.go
@@ -6,10 +6,12 @@ package arbosState
import (
"errors"
"math/big"
+ "regexp"
"sort"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
@@ -64,6 +66,8 @@ func InitializeArbosInDatabase(db ethdb.Database, cacheConfig *core.CacheConfig,
log.Crit("failed to init empty statedb", "error", err)
}
+ noStateTrieChangesToCommitError := regexp.MustCompile("^triedb layer .+ is disk layer$")
+
// commit avoids keeping the entire state in memory while importing the state.
// At some time it was also used to avoid reprocessing the whole import in case of a crash.
commit := func() (common.Hash, error) {
@@ -73,7 +77,11 @@ func InitializeArbosInDatabase(db ethdb.Database, cacheConfig *core.CacheConfig,
}
err = stateDatabase.TrieDB().Commit(root, true)
if err != nil {
- return common.Hash{}, err
+ // pathdb returns an error when there are no state trie changes to commit and we try to commit.
+ // This checks if the error is the expected one and ignores it.
+ if (cacheConfig.StateScheme != rawdb.PathScheme) || !noStateTrieChangesToCommitError.MatchString(err.Error()) {
+ return common.Hash{}, err
+ }
}
statedb, err = state.New(root, stateDatabase, nil)
if err != nil {
@@ -100,7 +108,7 @@ func InitializeArbosInDatabase(db ethdb.Database, cacheConfig *core.CacheConfig,
if err != nil {
return common.Hash{}, err
}
- for i := 0; addressReader.More(); i++ {
+ for i := uint64(0); addressReader.More(); i++ {
addr, err := addressReader.GetNext()
if err != nil {
return common.Hash{}, err
@@ -109,7 +117,7 @@ func InitializeArbosInDatabase(db ethdb.Database, cacheConfig *core.CacheConfig,
if err != nil {
return common.Hash{}, err
}
- if uint64(i) != slot {
+ if i != slot {
return common.Hash{}, errors.New("address table slot mismatch")
}
}
diff --git a/arbos/arbostypes/incomingmessage.go b/arbos/arbostypes/incomingmessage.go
index 04ce8ebe2e..c4c2dc037b 100644
--- a/arbos/arbostypes/incomingmessage.go
+++ b/arbos/arbostypes/incomingmessage.go
@@ -182,6 +182,17 @@ func (msg *L1IncomingMessage) FillInBatchGasCost(batchFetcher FallibleBatchFetch
return nil
}
+func (msg *L1IncomingMessage) PastBatchesRequired() ([]uint64, error) {
+ if msg.Header.Kind != L1MessageType_BatchPostingReport {
+ return nil, nil
+ }
+ _, _, _, batchNum, _, _, err := ParseBatchPostingReportMessageFields(bytes.NewReader(msg.L2msg))
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse batch posting report: %w", err)
+ }
+ return []uint64{batchNum}, nil
+}
+
func ParseIncomingL1Message(rd io.Reader, batchFetcher FallibleBatchFetcher) (*L1IncomingMessage, error) {
var kindBuf [1]byte
_, err := rd.Read(kindBuf[:])
diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go
index 9e00eeb581..392bf36d37 100644
--- a/arbos/l1pricing/l1pricing.go
+++ b/arbos/l1pricing/l1pricing.go
@@ -509,7 +509,7 @@ func (ps *L1PricingState) getPosterUnitsWithoutCache(tx *types.Transaction, post
return 0
}
- l1Bytes, err := byteCountAfterBrotliLevel(txBytes, int(brotliCompressionLevel))
+ l1Bytes, err := byteCountAfterBrotliLevel(txBytes, brotliCompressionLevel)
if err != nil {
panic(fmt.Sprintf("failed to compress tx: %v", err))
}
@@ -594,7 +594,7 @@ func (ps *L1PricingState) PosterDataCost(message *core.Message, poster common.Ad
return am.BigMulByUint(pricePerUnit, units), units
}
-func byteCountAfterBrotliLevel(input []byte, level int) (uint64, error) {
+func byteCountAfterBrotliLevel(input []byte, level uint64) (uint64, error) {
compressed, err := arbcompress.CompressLevel(input, level)
if err != nil {
return 0, err
diff --git a/arbos/l1pricing_test.go b/arbos/l1pricing_test.go
index 6e2b1b7eec..6f9e3ecb35 100644
--- a/arbos/l1pricing_test.go
+++ b/arbos/l1pricing_test.go
@@ -100,7 +100,7 @@ func expectedResultsForL1Test(input *l1PricingTest) *l1TestExpectedResults {
availableFunds = availableFundsCap
}
}
- fundsWantedForRewards := big.NewInt(int64(input.unitReward * input.unitsPerSecond))
+ fundsWantedForRewards := new(big.Int).SetUint64(input.unitReward * input.unitsPerSecond)
unitsAllocated := arbmath.UintToBig(input.unitsPerSecond)
if arbmath.BigLessThan(availableFunds, fundsWantedForRewards) {
ret.rewardRecipientBalance = availableFunds
@@ -111,7 +111,7 @@ func expectedResultsForL1Test(input *l1PricingTest) *l1TestExpectedResults {
uncappedAvailableFunds = arbmath.BigSub(uncappedAvailableFunds, ret.rewardRecipientBalance)
ret.unitsRemaining = (3 * input.unitsPerSecond) - unitsAllocated.Uint64()
- maxCollectable := big.NewInt(int64(input.fundsSpent))
+ maxCollectable := new(big.Int).SetUint64(input.fundsSpent)
if arbmath.BigLessThan(availableFunds, maxCollectable) {
maxCollectable = availableFunds
}
@@ -170,7 +170,7 @@ func _testL1PricingFundsDue(t *testing.T, testParams *l1PricingTest, expectedRes
Require(t, err)
// create some fake collection
- balanceAdded := big.NewInt(int64(testParams.fundsCollectedPerSecond * 3))
+ balanceAdded := new(big.Int).SetUint64(testParams.fundsCollectedPerSecond * 3)
unitsAdded := testParams.unitsPerSecond * 3
evm.StateDB.AddBalance(l1pricing.L1PricerFundsPoolAddress, uint256.MustFromBig(balanceAdded))
err = l1p.SetL1FeesAvailable(balanceAdded)
@@ -279,7 +279,9 @@ func _testL1PriceEquilibration(t *testing.T, initialL1BasefeeEstimate *big.Int,
evm.StateDB,
evm,
3,
+ // #nosec G115
uint64(10*(i+1)),
+ // #nosec G115
uint64(10*(i+1)+5),
bpAddr,
arbmath.BigMulByUint(equilibriumL1BasefeeEstimate, unitsToAdd),
diff --git a/arbos/l2pricing/l2pricing_test.go b/arbos/l2pricing/l2pricing_test.go
index 57759d7f82..aa1e785f70 100644
--- a/arbos/l2pricing/l2pricing_test.go
+++ b/arbos/l2pricing/l2pricing_test.go
@@ -40,6 +40,7 @@ func TestPricingModelExp(t *testing.T) {
// show that running at the speed limit with a full pool is a steady-state
colors.PrintBlue("full pool & speed limit")
for seconds := 0; seconds < 4; seconds++ {
+ // #nosec G115
fakeBlockUpdate(t, pricing, int64(seconds)*int64(limit), uint64(seconds))
if getPrice(t, pricing) != minPrice {
Fail(t, "price changed when it shouldn't have")
@@ -50,6 +51,7 @@ func TestPricingModelExp(t *testing.T) {
// note that for large enough spans of time the price will rise a miniscule amount due to the pool's avg
colors.PrintBlue("pool target & speed limit")
for seconds := 0; seconds < 4; seconds++ {
+ // #nosec G115
fakeBlockUpdate(t, pricing, int64(seconds)*int64(limit), uint64(seconds))
if getPrice(t, pricing) != minPrice {
Fail(t, "price changed when it shouldn't have")
@@ -59,6 +61,7 @@ func TestPricingModelExp(t *testing.T) {
// show that running over the speed limit escalates the price before the pool drains
colors.PrintBlue("exceeding the speed limit")
for {
+ // #nosec G115
fakeBlockUpdate(t, pricing, 8*int64(limit), 1)
newPrice := getPrice(t, pricing)
if newPrice < price {
diff --git a/arbos/l2pricing/model.go b/arbos/l2pricing/model.go
index 131af2c2cf..476effa8aa 100644
--- a/arbos/l2pricing/model.go
+++ b/arbos/l2pricing/model.go
@@ -30,22 +30,26 @@ func (ps *L2PricingState) AddToGasPool(gas int64) error {
return err
}
// pay off some of the backlog with the added gas, stopping at 0
- backlog = arbmath.SaturatingUCast[uint64](arbmath.SaturatingSub(int64(backlog), gas))
+ if gas > 0 {
+ backlog = arbmath.SaturatingUSub(backlog, uint64(gas))
+ } else {
+ backlog = arbmath.SaturatingUAdd(backlog, uint64(-gas))
+ }
return ps.SetGasBacklog(backlog)
}
// UpdatePricingModel updates the pricing model with info from the last block
func (ps *L2PricingState) UpdatePricingModel(l2BaseFee *big.Int, timePassed uint64, debug bool) {
speedLimit, _ := ps.SpeedLimitPerSecond()
- _ = ps.AddToGasPool(int64(timePassed * speedLimit))
+ _ = ps.AddToGasPool(arbmath.SaturatingCast[int64](arbmath.SaturatingUMul(timePassed, speedLimit)))
inertia, _ := ps.PricingInertia()
tolerance, _ := ps.BacklogTolerance()
backlog, _ := ps.GasBacklog()
minBaseFee, _ := ps.MinBaseFeeWei()
baseFee := minBaseFee
if backlog > tolerance*speedLimit {
- excess := int64(backlog - tolerance*speedLimit)
- exponentBips := arbmath.NaturalToBips(excess) / arbmath.Bips(inertia*speedLimit)
+ excess := arbmath.SaturatingCast[int64](backlog - tolerance*speedLimit)
+ exponentBips := arbmath.NaturalToBips(excess) / arbmath.SaturatingCast[arbmath.Bips](inertia*speedLimit)
baseFee = arbmath.BigMulByBips(minBaseFee, arbmath.ApproxExpBasisPoints(exponentBips, 4))
}
_ = ps.SetBaseFeeWei(baseFee)
diff --git a/arbos/merkleAccumulator/merkleAccumulator.go b/arbos/merkleAccumulator/merkleAccumulator.go
index 2e060c5840..e62303e5fd 100644
--- a/arbos/merkleAccumulator/merkleAccumulator.go
+++ b/arbos/merkleAccumulator/merkleAccumulator.go
@@ -97,6 +97,7 @@ func (acc *MerkleAccumulator) GetPartials() ([]*common.Hash, error) {
}
partials := make([]*common.Hash, CalcNumPartials(size))
for i := range partials {
+ // #nosec G115
p, err := acc.getPartial(uint64(i))
if err != nil {
return nil, err
diff --git a/arbos/programs/api.go b/arbos/programs/api.go
index 504289322f..3e59031b2d 100644
--- a/arbos/programs/api.go
+++ b/arbos/programs/api.go
@@ -400,9 +400,9 @@ func newApiClosures(
}
startInk := takeU64()
endInk := takeU64()
- nameLen := takeU16()
- argsLen := takeU16()
- outsLen := takeU16()
+ nameLen := takeU32()
+ argsLen := takeU32()
+ outsLen := takeU32()
name := string(takeFixed(int(nameLen)))
args := takeFixed(int(argsLen))
outs := takeFixed(int(outsLen))
diff --git a/arbos/programs/cgo_test.go b/arbos/programs/cgo_test.go
new file mode 100644
index 0000000000..e16c362ef8
--- /dev/null
+++ b/arbos/programs/cgo_test.go
@@ -0,0 +1,52 @@
+// Copyright 2024, Offchain Labs, Inc.
+// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE
+
+//go:build !wasm
+// +build !wasm
+
+package programs
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+)
+
+func TestConstants(t *testing.T) {
+ err := testConstants()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// normal test will not write anything to disk
+// to test cross-compilation:
+// * run test with TEST_COMPILE=STORE on one machine
+// * copy target/testdata to the other machine
+// * run test with TEST_COMPILE=LOAD on the other machine
+func TestCompileArch(t *testing.T) {
+ compile_env := os.Getenv("TEST_COMPILE")
+ if compile_env == "" {
+ fmt.Print("use TEST_COMPILE=[STORE|LOAD] to allow store/load in compile test")
+ }
+ store := strings.Contains(compile_env, "STORE")
+ err := testCompileArch(store)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if store || strings.Contains(compile_env, "LOAD") {
+ err = testCompileLoad()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = resetNativeTarget()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = testCompileLoad()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
diff --git a/arbos/programs/constant_test.go b/arbos/programs/constant_test.go
deleted file mode 100644
index fe29bcf3d9..0000000000
--- a/arbos/programs/constant_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2024, Offchain Labs, Inc.
-// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE
-
-package programs
-
-import "testing"
-
-func TestConstants(t *testing.T) {
- err := testConstants()
- if err != nil {
- t.Fatal(err)
- }
-}
diff --git a/arbos/programs/data_pricer.go b/arbos/programs/data_pricer.go
index ed7c98556d..d82aa81f04 100644
--- a/arbos/programs/data_pricer.go
+++ b/arbos/programs/data_pricer.go
@@ -83,8 +83,8 @@ func (p *DataPricer) UpdateModel(tempBytes uint32, time uint64) (*big.Int, error
}
exponent := arbmath.OneInBips * arbmath.Bips(demand) / arbmath.Bips(inertia)
- multiplier := arbmath.ApproxExpBasisPoints(exponent, 12).Uint64()
- costPerByte := arbmath.SaturatingUMul(uint64(minPrice), multiplier) / 10000
+ multiplier := arbmath.ApproxExpBasisPoints(exponent, 12)
+ costPerByte := arbmath.UintSaturatingMulByBips(uint64(minPrice), multiplier)
costInWei := arbmath.SaturatingUMul(costPerByte, uint64(tempBytes))
return arbmath.UintToBig(costInWei), nil
}
diff --git a/arbos/programs/native.go b/arbos/programs/native.go
index a0976afb2f..5fbc512211 100644
--- a/arbos/programs/native.go
+++ b/arbos/programs/native.go
@@ -7,7 +7,7 @@
package programs
/*
-#cgo CFLAGS: -g -Wall -I../../target/include/
+#cgo CFLAGS: -g -I../../target/include/
#cgo LDFLAGS: ${SRCDIR}/../../target/lib/libstylus.a -ldl -lm
#include "arbitrator.h"
@@ -27,7 +27,9 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
"github.com/offchainlabs/nitro/arbos/burn"
"github.com/offchainlabs/nitro/arbos/util"
"github.com/offchainlabs/nitro/arbutil"
@@ -44,21 +46,30 @@ type bytes32 = C.Bytes32
type rustBytes = C.RustBytes
type rustSlice = C.RustSlice
+var (
+ stylusLRUCacheSizeBytesGauge = metrics.NewRegisteredGauge("arb/arbos/stylus/cache/lru/size_bytes", nil)
+ stylusLRUCacheSizeCountGauge = metrics.NewRegisteredGauge("arb/arbos/stylus/cache/lru/count", nil)
+ stylusLRUCacheSizeHitsCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/lru/hits", nil)
+ stylusLRUCacheSizeMissesCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/lru/misses", nil)
+ stylusLRUCacheSizeDoesNotFitCounter = metrics.NewRegisteredCounter("arb/arbos/stylus/cache/lru/does_not_fit", nil)
+)
+
func activateProgram(
db vm.StateDB,
program common.Address,
codehash common.Hash,
wasm []byte,
page_limit uint16,
- version uint16,
+ stylusVersion uint16,
+ arbosVersionForGas uint64,
debug bool,
burner burn.Burner,
) (*activationInfo, error) {
- info, asm, module, err := activateProgramInternal(db, program, codehash, wasm, page_limit, version, debug, burner.GasLeft())
+ info, asmMap, err := activateProgramInternal(db, program, codehash, wasm, page_limit, stylusVersion, arbosVersionForGas, debug, burner.GasLeft())
if err != nil {
return nil, err
}
- db.ActivateWasm(info.moduleHash, asm, module)
+ db.ActivateWasm(info.moduleHash, asmMap)
return info, nil
}
@@ -68,44 +79,90 @@ func activateProgramInternal(
codehash common.Hash,
wasm []byte,
page_limit uint16,
- version uint16,
+ stylusVersion uint16,
+ arbosVersionForGas uint64,
debug bool,
gasLeft *uint64,
-) (*activationInfo, []byte, []byte, error) {
+) (*activationInfo, map[ethdb.WasmTarget][]byte, error) {
output := &rustBytes{}
- asmLen := usize(0)
moduleHash := &bytes32{}
stylusData := &C.StylusData{}
codeHash := hashToBytes32(codehash)
- status := userStatus(C.stylus_activate(
+ status_mod := userStatus(C.stylus_activate(
goSlice(wasm),
u16(page_limit),
- u16(version),
+ u16(stylusVersion),
+ u64(arbosVersionForGas),
cbool(debug),
output,
- &asmLen,
&codeHash,
moduleHash,
stylusData,
(*u64)(gasLeft),
))
- data, msg, err := status.toResult(output.intoBytes(), debug)
+ module, msg, err := status_mod.toResult(output.intoBytes(), debug)
if err != nil {
if debug {
log.Warn("activation failed", "err", err, "msg", msg, "program", addressForLogging)
}
if errors.Is(err, vm.ErrExecutionReverted) {
- return nil, nil, nil, fmt.Errorf("%w: %s", ErrProgramActivation, msg)
+ return nil, nil, fmt.Errorf("%w: %s", ErrProgramActivation, msg)
}
- return nil, nil, nil, err
+ return nil, nil, err
}
-
hash := moduleHash.toHash()
- split := int(asmLen)
- asm := data[:split]
- module := data[split:]
+ targets := db.Database().WasmTargets()
+ type result struct {
+ target ethdb.WasmTarget
+ asm []byte
+ err error
+ }
+ results := make(chan result, len(targets))
+ for _, target := range targets {
+ target := target
+ if target == rawdb.TargetWavm {
+ results <- result{target, module, nil}
+ } else {
+ go func() {
+ output := &rustBytes{}
+ status_asm := C.stylus_compile(
+ goSlice(wasm),
+ u16(stylusVersion),
+ cbool(debug),
+ goSlice([]byte(target)),
+ output,
+ )
+ asm := output.intoBytes()
+ if status_asm != 0 {
+ results <- result{target, nil, fmt.Errorf("%w: %s", ErrProgramActivation, string(asm))}
+ return
+ }
+ results <- result{target, asm, nil}
+ }()
+ }
+ }
+ asmMap := make(map[ethdb.WasmTarget][]byte, len(targets))
+ for range targets {
+ res := <-results
+ if res.err != nil {
+ err = errors.Join(res.err, err)
+ } else {
+ asmMap[res.target] = res.asm
+ }
+ }
+ if err != nil {
+ log.Error(
+ "Compilation failed for one or more targets despite activation succeeding",
+ "address", addressForLogging,
+ "codeHash", codeHash,
+ "moduleHash", hash,
+ "targets", targets,
+ "err", err,
+ )
+ panic(fmt.Sprintf("Compilation of %v failed for one or more targets despite activation succeeding: %v", addressForLogging, err))
+ }
info := &activationInfo{
moduleHash: hash,
@@ -114,11 +171,12 @@ func activateProgramInternal(
asmEstimate: uint32(stylusData.asm_estimate),
footprint: uint16(stylusData.footprint),
}
- return info, asm, module, err
+ return info, asmMap, err
}
func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, addressForLogging common.Address, code []byte, codeHash common.Hash, pagelimit uint16, time uint64, debugMode bool, program Program) ([]byte, error) {
- localAsm, err := statedb.TryGetActivatedAsm(moduleHash)
+ localTarget := rawdb.LocalTarget()
+ localAsm, err := statedb.TryGetActivatedAsm(localTarget, moduleHash)
if err == nil && len(localAsm) > 0 {
return localAsm, nil
}
@@ -130,9 +188,12 @@ func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, addressForLogging c
return nil, fmt.Errorf("failed to reactivate program address: %v err: %w", addressForLogging, err)
}
- unlimitedGas := uint64(0xffffffffffff)
+ // don't charge gas
+ zeroArbosVersion := uint64(0)
+ zeroGas := uint64(0)
+
// we know program is activated, so it must be in correct version and not use too much memory
- info, asm, module, err := activateProgramInternal(statedb, addressForLogging, codeHash, wasm, pagelimit, program.version, debugMode, &unlimitedGas)
+ info, asmMap, err := activateProgramInternal(statedb, addressForLogging, codeHash, wasm, pagelimit, program.version, zeroArbosVersion, debugMode, &zeroGas)
if err != nil {
log.Error("failed to reactivate program", "address", addressForLogging, "expected moduleHash", moduleHash, "err", err)
return nil, fmt.Errorf("failed to reactivate program address: %v err: %w", addressForLogging, err)
@@ -148,14 +209,23 @@ func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, addressForLogging c
// stylus program is active on-chain, and was activated in the past
// so we store it directly to database
batch := statedb.Database().WasmStore().NewBatch()
- rawdb.WriteActivation(batch, moduleHash, asm, module)
+ rawdb.WriteActivation(batch, moduleHash, asmMap)
if err := batch.Write(); err != nil {
log.Error("failed writing re-activation to state", "address", addressForLogging, "err", err)
}
} else {
// program activated recently, possibly in this eth_call
// store it to statedb. It will be stored to database if statedb is commited
- statedb.ActivateWasm(info.moduleHash, asm, module)
+ statedb.ActivateWasm(info.moduleHash, asmMap)
+ }
+ asm, exists := asmMap[localTarget]
+ if !exists {
+ var availableTargets []ethdb.WasmTarget
+ for target := range asmMap {
+ availableTargets = append(availableTargets, target)
+ }
+ log.Error("failed to reactivate program - missing asm for local target", "address", addressForLogging, "local target", localTarget, "available targets", availableTargets)
+ return nil, fmt.Errorf("failed to reactivate program - missing asm for local target, address: %v, local target: %v, available targets: %v", addressForLogging, localTarget, availableTargets)
}
return asm, nil
}
@@ -181,8 +251,8 @@ func callProgram(
panic("missing asm")
}
- if db, ok := db.(*state.StateDB); ok {
- db.RecordProgram(moduleHash)
+ if stateDb, ok := db.(*state.StateDB); ok {
+ stateDb.RecordProgram(db.Database().WasmTargets(), moduleHash)
}
evmApi := newApi(interpreter, tracingInfo, scope, memoryModel)
@@ -259,8 +329,58 @@ func init() {
}
}
-func ResizeWasmLruCache(size uint32) {
- C.stylus_cache_lru_resize(u32(size))
+func SetWasmLruCacheCapacity(capacityBytes uint64) {
+ C.stylus_set_cache_lru_capacity(u64(capacityBytes))
+}
+
+// exported for testing
+type WasmLruCacheMetrics struct {
+ SizeBytes uint64
+ Count uint32
+}
+
+func GetWasmLruCacheMetrics() *WasmLruCacheMetrics {
+ metrics := C.stylus_get_lru_cache_metrics()
+
+ stylusLRUCacheSizeBytesGauge.Update(int64(metrics.size_bytes))
+ stylusLRUCacheSizeCountGauge.Update(int64(metrics.count))
+ stylusLRUCacheSizeHitsCounter.Inc(int64(metrics.hits))
+ stylusLRUCacheSizeMissesCounter.Inc(int64(metrics.misses))
+ stylusLRUCacheSizeDoesNotFitCounter.Inc(int64(metrics.does_not_fit))
+
+ return &WasmLruCacheMetrics{
+ SizeBytes: uint64(metrics.size_bytes),
+ Count: uint32(metrics.count),
+ }
+}
+
+// Used for testing
+func ClearWasmLruCache() {
+ C.stylus_clear_lru_cache()
+}
+
+// Used for testing
+func GetLruEntrySizeEstimateBytes(module []byte, version uint16, debug bool) uint64 {
+ return uint64(C.stylus_get_lru_entry_size_estimate_bytes(goSlice(module), u16(version), cbool(debug)))
+}
+
+const DefaultTargetDescriptionArm = "arm64-linux-unknown+neon"
+const DefaultTargetDescriptionX86 = "x86_64-linux-unknown+sse4.2+lzcnt+bmi"
+
+func SetTarget(name ethdb.WasmTarget, description string, native bool) error {
+ output := &rustBytes{}
+ status := userStatus(C.stylus_target_set(
+ goSlice([]byte(name)),
+ goSlice([]byte(description)),
+ output,
+ cbool(native),
+ ))
+ if status != userSuccess {
+ msg := arbutil.ToStringOrHex(output.intoBytes())
+ log.Error("failed to set stylus compilation target", "status", status, "msg", msg)
+ return fmt.Errorf("failed to set stylus compilation target, status %v: %v", status, msg)
+ }
+ return nil
}
func (value bytes32) toHash() common.Hash {
@@ -325,6 +445,7 @@ func (params *ProgParams) encode() C.StylusConfig {
func (data *EvmData) encode() C.EvmData {
return C.EvmData{
+ arbos_version: u64(data.arbosVersion),
block_basefee: hashToBytes32(data.blockBasefee),
chainid: u64(data.chainId),
block_coinbase: addressToBytes20(data.blockCoinbase),
diff --git a/arbos/programs/native_api.go b/arbos/programs/native_api.go
index 6fbb630ef3..6cecb8ef63 100644
--- a/arbos/programs/native_api.go
+++ b/arbos/programs/native_api.go
@@ -7,7 +7,7 @@
package programs
/*
-#cgo CFLAGS: -g -Wall -I../../target/include/
+#cgo CFLAGS: -g -I../../target/include/
#cgo LDFLAGS: ${SRCDIR}/../../target/lib/libstylus.a -ldl -lm
#include "arbitrator.h"
diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go
index 12102bac84..06ff4137da 100644
--- a/arbos/programs/programs.go
+++ b/arbos/programs/programs.go
@@ -82,7 +82,7 @@ func (p Programs) CacheManagers() *addressSet.AddressSet {
return p.cacheManagers
}
-func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, runMode core.MessageRunMode, debugMode bool) (
+func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, arbosVersion uint64, runMode core.MessageRunMode, debugMode bool) (
uint16, common.Hash, common.Hash, *big.Int, bool, error,
) {
statedb := evm.StateDB
@@ -116,7 +116,7 @@ func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, runMode c
// require the program's footprint not exceed the remaining memory budget
pageLimit := am.SaturatingUSub(params.PageLimit, statedb.GetStylusPagesOpen())
- info, err := activateProgram(statedb, address, codeHash, wasm, pageLimit, stylusVersion, debugMode, burner)
+ info, err := activateProgram(statedb, address, codeHash, wasm, pageLimit, stylusVersion, arbosVersion, debugMode, burner)
if err != nil {
return 0, codeHash, common.Hash{}, nil, true, err
}
@@ -127,6 +127,7 @@ func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, runMode c
if err != nil {
return 0, codeHash, common.Hash{}, nil, true, err
}
+
evictProgram(statedb, oldModuleHash, currentVersion, debugMode, runMode, expired)
}
if err := p.moduleHashes.Set(codeHash, info.moduleHash); err != nil {
@@ -222,6 +223,7 @@ func (p Programs) CallProgram(
}
evmData := &EvmData{
+ arbosVersion: evm.Context.ArbOSVersion,
blockBasefee: common.BigToHash(evm.Context.BaseFee),
chainId: evm.ChainConfig().ChainID.Uint64(),
blockCoinbase: evm.Context.Coinbase,
@@ -517,6 +519,7 @@ func (p Programs) progParams(version uint16, debug bool, params *StylusParams) *
}
type EvmData struct {
+ arbosVersion uint64
blockBasefee common.Hash
chainId uint64
blockCoinbase common.Address
diff --git a/arbos/programs/testcompile.go b/arbos/programs/testcompile.go
new file mode 100644
index 0000000000..615b0f3f72
--- /dev/null
+++ b/arbos/programs/testcompile.go
@@ -0,0 +1,268 @@
+// Copyright 2024, Offchain Labs, Inc.
+// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE
+
+//go:build !wasm
+// +build !wasm
+
+package programs
+
+// This file exists because cgo isn't allowed in tests
+
+/*
+#cgo CFLAGS: -g -I../../target/include/
+#include "arbitrator.h"
+
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+typedef size_t usize;
+
+void handleReqWrap(usize api, u32 req_type, RustSlice *data, u64 *out_cost, GoSliceData *out_result, GoSliceData *out_raw_data);
+*/
+import "C"
+import (
+ "fmt"
+ "os"
+ "runtime"
+
+ "github.com/ethereum/go-ethereum/core/rawdb"
+)
+
+func Wat2Wasm(wat []byte) ([]byte, error) {
+ output := &rustBytes{}
+
+ status := C.wat_to_wasm(goSlice(wat), output)
+
+ if status != 0 {
+ return nil, fmt.Errorf("failed reading wat file: %v", string(output.intoBytes()))
+ }
+
+ return output.intoBytes(), nil
+}
+
+func testCompileArch(store bool) error {
+
+ localTarget := rawdb.LocalTarget()
+ nativeArm64 := localTarget == rawdb.TargetArm64
+ nativeAmd64 := localTarget == rawdb.TargetAmd64
+
+ arm64CompileName := []byte(rawdb.TargetArm64)
+ amd64CompileName := []byte(rawdb.TargetAmd64)
+
+ arm64TargetString := []byte(DefaultTargetDescriptionArm)
+ amd64TargetString := []byte(DefaultTargetDescriptionX86)
+
+ output := &rustBytes{}
+
+ _, err := fmt.Print("starting test.. native arm? ", nativeArm64, " amd? ", nativeAmd64, " GOARCH/GOOS: ", runtime.GOARCH+"/"+runtime.GOOS, "\n")
+ if err != nil {
+ return err
+ }
+
+ status := C.stylus_target_set(goSlice(arm64CompileName),
+ goSlice(arm64TargetString),
+ output,
+ cbool(nativeArm64))
+
+ if status != 0 {
+ return fmt.Errorf("failed setting compilation target arm: %v", string(output.intoBytes()))
+ }
+
+ status = C.stylus_target_set(goSlice(amd64CompileName),
+ goSlice(amd64TargetString),
+ output,
+ cbool(nativeAmd64))
+
+ if status != 0 {
+ return fmt.Errorf("failed setting compilation target amd: %v", string(output.intoBytes()))
+ }
+
+ source, err := os.ReadFile("../../arbitrator/stylus/tests/add.wat")
+ if err != nil {
+ return fmt.Errorf("failed reading stylus contract: %w", err)
+ }
+
+ wasm, err := Wat2Wasm(source)
+ if err != nil {
+ return err
+ }
+
+ if store {
+ _, err := fmt.Print("storing compiled files to ../../target/testdata/\n")
+ if err != nil {
+ return err
+ }
+ err = os.MkdirAll("../../target/testdata", 0755)
+ if err != nil {
+ return err
+ }
+ }
+
+ status = C.stylus_compile(
+ goSlice(wasm),
+ u16(1),
+ cbool(true),
+ goSlice([]byte("booga")),
+ output,
+ )
+ if status == 0 {
+ return fmt.Errorf("succeeded compiling non-existent arch: %v", string(output.intoBytes()))
+ }
+
+ status = C.stylus_compile(
+ goSlice(wasm),
+ u16(1),
+ cbool(true),
+ goSlice([]byte{}),
+ output,
+ )
+ if status != 0 {
+ return fmt.Errorf("failed compiling native: %v", string(output.intoBytes()))
+ }
+ if store && !nativeAmd64 && !nativeArm64 {
+ _, err := fmt.Printf("writing host file\n")
+ if err != nil {
+ return err
+ }
+
+ err = os.WriteFile("../../target/testdata/host.bin", output.intoBytes(), 0644)
+ if err != nil {
+ return err
+ }
+ }
+
+ status = C.stylus_compile(
+ goSlice(wasm),
+ u16(1),
+ cbool(true),
+ goSlice(arm64CompileName),
+ output,
+ )
+ if status != 0 {
+ return fmt.Errorf("failed compiling arm: %v", string(output.intoBytes()))
+ }
+ if store {
+ _, err := fmt.Printf("writing arm file\n")
+ if err != nil {
+ return err
+ }
+
+ err = os.WriteFile("../../target/testdata/arm64.bin", output.intoBytes(), 0644)
+ if err != nil {
+ return err
+ }
+ }
+
+ status = C.stylus_compile(
+ goSlice(wasm),
+ u16(1),
+ cbool(true),
+ goSlice(amd64CompileName),
+ output,
+ )
+ if status != 0 {
+ return fmt.Errorf("failed compiling amd: %v", string(output.intoBytes()))
+ }
+ if store {
+ _, err := fmt.Printf("writing amd64 file\n")
+ if err != nil {
+ return err
+ }
+
+ err = os.WriteFile("../../target/testdata/amd64.bin", output.intoBytes(), 0644)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func resetNativeTarget() error {
+ output := &rustBytes{}
+
+ _, err := fmt.Print("resetting native target\n")
+ if err != nil {
+ return err
+ }
+
+ localCompileName := []byte("local")
+
+ status := C.stylus_target_set(goSlice(localCompileName),
+ goSlice([]byte{}),
+ output,
+ cbool(true))
+
+ if status != 0 {
+ return fmt.Errorf("failed setting compilation target arm: %v", string(output.intoBytes()))
+ }
+
+ return nil
+}
+
+func testCompileLoad() error {
+ filePath := "../../target/testdata/host.bin"
+ localTarget := rawdb.LocalTarget()
+ if localTarget == rawdb.TargetArm64 {
+ filePath = "../../target/testdata/arm64.bin"
+ }
+ if localTarget == rawdb.TargetAmd64 {
+ filePath = "../../target/testdata/amd64.bin"
+ }
+
+ _, err := fmt.Print("starting load test. FilePath: ", filePath, " GOARCH/GOOS: ", runtime.GOARCH+"/"+runtime.GOOS, "\n")
+ if err != nil {
+ return err
+ }
+
+ localAsm, err := os.ReadFile(filePath)
+ if err != nil {
+ return err
+ }
+
+ calldata := []byte{}
+
+ evmData := EvmData{}
+ progParams := ProgParams{
+ MaxDepth: 10000,
+ InkPrice: 1,
+ DebugMode: true,
+ }
+ reqHandler := C.NativeRequestHandler{
+ handle_request_fptr: (*[0]byte)(C.handleReqWrap),
+ id: 0,
+ }
+
+ inifiniteGas := u64(0xfffffffffffffff)
+
+ output := &rustBytes{}
+
+ _, err = fmt.Print("launching program..\n")
+ if err != nil {
+ return err
+ }
+
+ status := userStatus(C.stylus_call(
+ goSlice(localAsm),
+ goSlice(calldata),
+ progParams.encode(),
+ reqHandler,
+ evmData.encode(),
+ cbool(true),
+ output,
+ &inifiniteGas,
+ u32(0),
+ ))
+
+ _, err = fmt.Print("returned: ", status, "\n")
+ if err != nil {
+ return err
+ }
+
+ _, msg, err := status.toResult(output.intoBytes(), true)
+ if status == userFailure {
+ err = fmt.Errorf("%w: %v", err, msg)
+ }
+
+ return err
+}
diff --git a/arbos/programs/testconstants.go b/arbos/programs/testconstants.go
index 1ab0e6e93b..44f69a52de 100644
--- a/arbos/programs/testconstants.go
+++ b/arbos/programs/testconstants.go
@@ -9,7 +9,7 @@ package programs
// This file exists because cgo isn't allowed in tests
/*
-#cgo CFLAGS: -g -Wall -I../../target/include/
+#cgo CFLAGS: -g -I../../target/include/
#include "arbitrator.h"
*/
import "C"
diff --git a/arbos/programs/wasm.go b/arbos/programs/wasm.go
index f7191dca8f..12c23a724c 100644
--- a/arbos/programs/wasm.go
+++ b/arbos/programs/wasm.go
@@ -36,7 +36,7 @@ type rustConfig byte
type rustModule byte
type rustEvmData byte
-//go:wasmimport programs activate
+//go:wasmimport programs activate_v2
func programActivate(
wasm_ptr unsafe.Pointer,
wasm_size uint32,
@@ -44,7 +44,8 @@ func programActivate(
asm_estimation_ptr unsafe.Pointer,
init_gas_ptr unsafe.Pointer,
cached_init_gas_ptr unsafe.Pointer,
- version uint32,
+ stylusVersion uint32,
+ arbosVersion uint64,
debug uint32,
codehash unsafe.Pointer,
module_hash_ptr unsafe.Pointer,
@@ -59,7 +60,8 @@ func activateProgram(
codehash common.Hash,
wasm []byte,
pageLimit u16,
- version u16,
+ stylusVersion u16,
+ arbosVersion uint64,
debug bool,
burner burn.Burner,
) (*activationInfo, error) {
@@ -79,7 +81,8 @@ func activateProgram(
unsafe.Pointer(&asmEstimate),
unsafe.Pointer(&initGas),
unsafe.Pointer(&cachedInitGas),
- uint32(version),
+ uint32(stylusVersion),
+ arbosVersion,
debugMode,
arbutil.SliceToUnsafePointer(codehash[:]),
arbutil.SliceToUnsafePointer(moduleHash[:]),
@@ -151,6 +154,8 @@ func callProgram(
return retData, err
}
+func GetWasmLruCacheMetrics() {}
+
func CallProgramLoop(
moduleHash common.Hash,
calldata []byte,
diff --git a/arbos/programs/wasm_api.go b/arbos/programs/wasm_api.go
index d7bac056c0..a4ebc1f778 100644
--- a/arbos/programs/wasm_api.go
+++ b/arbos/programs/wasm_api.go
@@ -20,8 +20,9 @@ func createStylusConfig(version uint32, max_depth uint32, ink_price uint32, debu
type evmDataHandler uint64
-//go:wasmimport programs create_evm_data
+//go:wasmimport programs create_evm_data_v2
func createEvmData(
+ arbosVersion uint64,
blockBaseFee unsafe.Pointer,
chainid uint64,
blockCoinbase unsafe.Pointer,
@@ -45,6 +46,7 @@ func (params *ProgParams) createHandler() stylusConfigHandler {
func (data *EvmData) createHandler() evmDataHandler {
return createEvmData(
+ data.arbosVersion,
arbutil.SliceToUnsafePointer(data.blockBasefee[:]),
data.chainId,
arbutil.SliceToUnsafePointer(data.blockCoinbase[:]),
diff --git a/arbos/programs/wasmstorehelper.go b/arbos/programs/wasmstorehelper.go
index 9e69178694..c2d1aa65b0 100644
--- a/arbos/programs/wasmstorehelper.go
+++ b/arbos/programs/wasmstorehelper.go
@@ -17,12 +17,12 @@ import (
// SaveActiveProgramToWasmStore is used to save active stylus programs to wasm store during rebuilding
func (p Programs) SaveActiveProgramToWasmStore(statedb *state.StateDB, codeHash common.Hash, code []byte, time uint64, debugMode bool, rebuildingStartBlockTime uint64) error {
- params, err := p.Params()
+ progParams, err := p.Params()
if err != nil {
return err
}
- program, err := p.getActiveProgram(codeHash, time, params)
+ program, err := p.getActiveProgram(codeHash, time, progParams)
if err != nil {
// The program is not active so return early
log.Info("program is not active, getActiveProgram returned error, hence do not include in rebuilding", "err", err)
@@ -43,9 +43,10 @@ func (p Programs) SaveActiveProgramToWasmStore(statedb *state.StateDB, codeHash
return err
}
+ targets := statedb.Database().WasmTargets()
// If already in wasm store then return early
- localAsm, err := statedb.TryGetActivatedAsm(moduleHash)
- if err == nil && len(localAsm) > 0 {
+ _, err = statedb.TryGetActivatedAsmMap(targets, moduleHash)
+ if err == nil {
return nil
}
@@ -55,10 +56,13 @@ func (p Programs) SaveActiveProgramToWasmStore(statedb *state.StateDB, codeHash
return fmt.Errorf("failed to reactivate program while rebuilding wasm store: %w", err)
}
- unlimitedGas := uint64(0xffffffffffff)
+ // don't charge gas
+ zeroArbosVersion := uint64(0)
+ zeroGas := uint64(0)
+
// We know program is activated, so it must be in correct version and not use too much memory
// Empty program address is supplied because we dont have access to this during rebuilding of wasm store
- info, asm, module, err := activateProgramInternal(statedb, common.Address{}, codeHash, wasm, params.PageLimit, program.version, debugMode, &unlimitedGas)
+ info, asmMap, err := activateProgramInternal(statedb, common.Address{}, codeHash, wasm, progParams.PageLimit, program.version, zeroArbosVersion, debugMode, &zeroGas)
if err != nil {
log.Error("failed to reactivate program while rebuilding wasm store", "expected moduleHash", moduleHash, "err", err)
return fmt.Errorf("failed to reactivate program while rebuilding wasm store: %w", err)
@@ -70,7 +74,7 @@ func (p Programs) SaveActiveProgramToWasmStore(statedb *state.StateDB, codeHash
}
batch := statedb.Database().WasmStore().NewBatch()
- rawdb.WriteActivation(batch, moduleHash, asm, module)
+ rawdb.WriteActivation(batch, moduleHash, asmMap)
if err := batch.Write(); err != nil {
log.Error("failed writing re-activation to state while rebuilding wasm store", "err", err)
return err
diff --git a/arbos/retryable_test.go b/arbos/retryable_test.go
index ddb88348dd..2eccaea6c2 100644
--- a/arbos/retryable_test.go
+++ b/arbos/retryable_test.go
@@ -38,6 +38,7 @@ func TestRetryableLifecycle(t *testing.T) {
retryableState := state.RetryableState()
lifetime := uint64(retryables.RetryableLifetimeSeconds)
+ // #nosec G115
timestampAtCreation := uint64(rand.Int63n(1 << 16))
timeoutAtCreation := timestampAtCreation + lifetime
currentTime := timeoutAtCreation
@@ -57,6 +58,7 @@ func TestRetryableLifecycle(t *testing.T) {
checkQueueSize := func(expected int, message string) {
timeoutQueueSize, err := retryableState.TimeoutQueue.Size()
Require(t, err)
+ // #nosec G115
if timeoutQueueSize != uint64(expected) {
Fail(t, currentTime, message, timeoutQueueSize)
}
@@ -167,6 +169,7 @@ func TestRetryableCleanup(t *testing.T) {
callvalue := big.NewInt(0)
calldata := testhelpers.RandomizeSlice(make([]byte, rand.Intn(1<<12)))
+ // #nosec G115
timeout := uint64(rand.Int63n(1 << 16))
timestamp := 2 * timeout
diff --git a/arbos/retryables/retryable.go b/arbos/retryables/retryable.go
index e1cfe48bcf..5938244782 100644
--- a/arbos/retryables/retryable.go
+++ b/arbos/retryables/retryable.go
@@ -367,5 +367,7 @@ func RetryableEscrowAddress(ticketId common.Hash) common.Address {
}
func RetryableSubmissionFee(calldataLengthInBytes int, l1BaseFee *big.Int) *big.Int {
- return arbmath.BigMulByUint(l1BaseFee, uint64(1400+6*calldataLengthInBytes))
+ // This can't overflow because calldataLengthInBytes would need to be 3 exabytes
+ // #nosec G115
+ return arbmath.BigMulByUint(l1BaseFee, 1400+6*uint64(calldataLengthInBytes))
}
diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go
index 6e6c976644..bc16491af0 100644
--- a/arbos/storage/storage.go
+++ b/arbos/storage/storage.go
@@ -156,11 +156,6 @@ func (s *Storage) GetUint64ByUint64(key uint64) (uint64, error) {
return s.GetUint64(util.UintToHash(key))
}
-func (s *Storage) GetUint32(key common.Hash) (uint32, error) {
- value, err := s.Get(key)
- return uint32(value.Big().Uint64()), err
-}
-
func (s *Storage) Set(key common.Hash, value common.Hash) error {
if s.burner.ReadOnly() {
log.Error("Read-only burner attempted to mutate state", "key", key, "value", value)
@@ -327,11 +322,11 @@ func (s *Storage) Burner() burn.Burner {
}
func (s *Storage) Keccak(data ...[]byte) ([]byte, error) {
- byteCount := 0
+ var byteCount uint64
for _, part := range data {
- byteCount += len(part)
+ byteCount += uint64(len(part))
}
- cost := 30 + 6*arbmath.WordsForBytes(uint64(byteCount))
+ cost := 30 + 6*arbmath.WordsForBytes(byteCount)
if err := s.burner.Burn(cost); err != nil {
return nil, err
}
@@ -420,10 +415,12 @@ func (sbu *StorageBackedInt64) Get() (int64, error) {
if !raw.Big().IsUint64() {
panic("invalid value found in StorageBackedInt64 storage")
}
+ // #nosec G115
return int64(raw.Big().Uint64()), err // see implementation note above
}
func (sbu *StorageBackedInt64) Set(value int64) error {
+ // #nosec G115
return sbu.StorageSlot.Set(util.UintToHash(uint64(value))) // see implementation note above
}
@@ -460,7 +457,7 @@ func (sbu *StorageBackedUBips) Get() (arbmath.UBips, error) {
}
func (sbu *StorageBackedUBips) Set(bips arbmath.UBips) error {
- return sbu.backing.Set(bips.Uint64())
+ return sbu.backing.Set(uint64(bips))
}
type StorageBackedUint16 struct {
@@ -477,6 +474,7 @@ func (sbu *StorageBackedUint16) Get() (uint16, error) {
if !big.IsUint64() || big.Uint64() > math.MaxUint16 {
panic("expected uint16 compatible value in storage")
}
+ // #nosec G115
return uint16(big.Uint64()), err
}
@@ -517,6 +515,7 @@ func (sbu *StorageBackedUint32) Get() (uint32, error) {
if !big.IsUint64() || big.Uint64() > math.MaxUint32 {
panic("expected uint32 compatible value in storage")
}
+ // #nosec G115
return uint32(big.Uint64()), err
}
diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go
index b08c7c5d30..d6c35339f6 100644
--- a/arbos/tx_processor.go
+++ b/arbos/tx_processor.go
@@ -532,6 +532,20 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) {
refund := func(refundFrom common.Address, amount *big.Int) {
const errLog = "fee address doesn't have enough funds to give user refund"
+ logMissingRefund := func(err error) {
+ if !errors.Is(err, vm.ErrInsufficientBalance) {
+ log.Error("unexpected error refunding balance", "err", err, "feeAddress", refundFrom)
+ return
+ }
+ logLevel := log.Error
+ isContract := p.evm.StateDB.GetCodeSize(refundFrom) > 0
+ if isContract {
+ // It's expected that the balance might not still be in this address if it's a contract.
+ logLevel = log.Debug
+ }
+ logLevel(errLog, "err", err, "feeAddress", refundFrom)
+ }
+
// Refund funds to the fee refund address without overdrafting the L1 deposit.
toRefundAddr := takeFunds(maxRefund, amount)
err = util.TransferBalance(&refundFrom, &inner.RefundTo, toRefundAddr, p.evm, scenario, "refund")
@@ -539,13 +553,13 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) {
// Normally the network fee address should be holding any collected fees.
// However, in theory, they could've been transferred out during the redeem attempt.
// If the network fee address doesn't have the necessary balance, log an error and don't give a refund.
- log.Error(errLog, "err", err, "feeAddress", refundFrom)
+ logMissingRefund(err)
}
// Any extra refund can't be given to the fee refund address if it didn't come from the L1 deposit.
// Instead, give the refund to the retryable from address.
err = util.TransferBalance(&refundFrom, &inner.From, arbmath.BigSub(amount, toRefundAddr), p.evm, scenario, "refund")
if err != nil {
- log.Error(errLog, "err", err, "feeAddress", refundFrom)
+ logMissingRefund(err)
}
}
diff --git a/arbos/util/storage_cache.go b/arbos/util/storage_cache.go
index bf05a5824d..9573d1ffc7 100644
--- a/arbos/util/storage_cache.go
+++ b/arbos/util/storage_cache.go
@@ -5,6 +5,7 @@ package util
import (
"github.com/ethereum/go-ethereum/common"
+ "slices"
)
type storageCacheEntry struct {
@@ -67,6 +68,10 @@ func (s *storageCache) Flush() []storageCacheStores {
})
}
}
+ sortFunc := func(a, b storageCacheStores) int {
+ return a.Key.Cmp(b.Key)
+ }
+ slices.SortFunc(stores, sortFunc)
return stores
}
diff --git a/arbos/util/storage_cache_test.go b/arbos/util/storage_cache_test.go
index 1cc4ea14ec..9fd452851d 100644
--- a/arbos/util/storage_cache_test.go
+++ b/arbos/util/storage_cache_test.go
@@ -4,7 +4,6 @@
package util
import (
- "bytes"
"slices"
"testing"
@@ -76,7 +75,7 @@ func TestStorageCache(t *testing.T) {
{Key: keys[2], Value: values[2]},
}
sortFunc := func(a, b storageCacheStores) int {
- return bytes.Compare(a.Key.Bytes(), b.Key.Bytes())
+ return a.Key.Cmp(b.Key)
}
slices.SortFunc(stores, sortFunc)
slices.SortFunc(expected, sortFunc)
diff --git a/arbstate/inbox.go b/arbstate/inbox.go
index 753ca19cd6..b58a7420b7 100644
--- a/arbstate/inbox.go
+++ b/arbstate/inbox.go
@@ -246,7 +246,7 @@ func (r *inboxMultiplexer) IsCachedSegementLast() bool {
if r.delayedMessagesRead < seqMsg.afterDelayedMessages {
return false
}
- for segmentNum := int(r.cachedSegmentNum) + 1; segmentNum < len(seqMsg.segments); segmentNum++ {
+ for segmentNum := r.cachedSegmentNum + 1; segmentNum < uint64(len(seqMsg.segments)); segmentNum++ {
segment := seqMsg.segments[segmentNum]
if len(segment) == 0 {
continue
@@ -276,7 +276,7 @@ func (r *inboxMultiplexer) getNextMsg() (*arbostypes.MessageWithMetadata, error)
if segmentNum >= uint64(len(seqMsg.segments)) {
break
}
- segment = seqMsg.segments[int(segmentNum)]
+ segment = seqMsg.segments[segmentNum]
if len(segment) == 0 {
segmentNum++
continue
@@ -322,7 +322,7 @@ func (r *inboxMultiplexer) getNextMsg() (*arbostypes.MessageWithMetadata, error)
log.Warn("reading virtual delayed message segment", "delayedMessagesRead", r.delayedMessagesRead, "afterDelayedMessages", seqMsg.afterDelayedMessages)
segment = []byte{BatchSegmentKindDelayedMessages}
} else {
- segment = seqMsg.segments[int(segmentNum)]
+ segment = seqMsg.segments[segmentNum]
}
if len(segment) == 0 {
log.Error("empty sequencer message segment", "sequence", r.cachedSegmentNum, "segmentNum", segmentNum)
diff --git a/arbutil/block_message_relation.go b/arbutil/block_message_relation.go
index a69f9079ee..dcf4c86084 100644
--- a/arbutil/block_message_relation.go
+++ b/arbutil/block_message_relation.go
@@ -11,9 +11,11 @@ func BlockNumberToMessageCount(blockNumber uint64, genesisBlockNumber uint64) Me
// Block number must correspond to a message count, meaning it may not be less than -1
func SignedBlockNumberToMessageCount(blockNumber int64, genesisBlockNumber uint64) MessageIndex {
+ // #nosec G115
return MessageIndex(uint64(blockNumber+1) - genesisBlockNumber)
}
func MessageCountToBlockNumber(messageCount MessageIndex, genesisBlockNumber uint64) int64 {
+ // #nosec G115
return int64(uint64(messageCount)+genesisBlockNumber) - 1
}
diff --git a/arbutil/correspondingl1blocknumber.go b/arbutil/correspondingl1blocknumber.go
index 05323ed183..c8770e2034 100644
--- a/arbutil/correspondingl1blocknumber.go
+++ b/arbutil/correspondingl1blocknumber.go
@@ -19,7 +19,12 @@ func ParentHeaderToL1BlockNumber(header *types.Header) uint64 {
return header.Number.Uint64()
}
-func CorrespondingL1BlockNumber(ctx context.Context, client L1Interface, parentBlockNumber uint64) (uint64, error) {
+type ParentHeaderFetcher interface {
+ HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
+}
+
+func CorrespondingL1BlockNumber(ctx context.Context, client ParentHeaderFetcher, parentBlockNumber uint64) (uint64, error) {
+ // #nosec G115
header, err := client.HeaderByNumber(ctx, big.NewInt(int64(parentBlockNumber)))
if err != nil {
return 0, fmt.Errorf("error getting L1 block number %d header : %w", parentBlockNumber, err)
diff --git a/arbutil/transaction_data.go b/arbutil/transaction_data.go
index 8270a628bd..c5728967c7 100644
--- a/arbutil/transaction_data.go
+++ b/arbutil/transaction_data.go
@@ -8,9 +8,10 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethclient"
)
-func GetLogTransaction(ctx context.Context, client L1Interface, log types.Log) (*types.Transaction, error) {
+func GetLogTransaction(ctx context.Context, client *ethclient.Client, log types.Log) (*types.Transaction, error) {
tx, err := client.TransactionInBlock(ctx, log.BlockHash, log.TxIndex)
if err != nil {
return nil, err
@@ -22,7 +23,7 @@ func GetLogTransaction(ctx context.Context, client L1Interface, log types.Log) (
}
// GetLogEmitterTxData requires that the tx's data is at least 4 bytes long
-func GetLogEmitterTxData(ctx context.Context, client L1Interface, log types.Log) ([]byte, error) {
+func GetLogEmitterTxData(ctx context.Context, client *ethclient.Client, log types.Log) ([]byte, error) {
tx, err := GetLogTransaction(ctx, client, log)
if err != nil {
return nil, err
diff --git a/arbutil/wait_for_l1.go b/arbutil/wait_for_l1.go
index 4b4819156d..80dd356b24 100644
--- a/arbutil/wait_for_l1.go
+++ b/arbutil/wait_for_l1.go
@@ -10,27 +10,13 @@ import (
"math/big"
"github.com/ethereum/go-ethereum"
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethereum/go-ethereum/ethclient"
)
-type L1Interface interface {
- bind.ContractBackend
- bind.BlockHashContractCaller
- ethereum.ChainReader
- ethereum.ChainStateReader
- ethereum.TransactionReader
- TransactionSender(ctx context.Context, tx *types.Transaction, block common.Hash, index uint) (common.Address, error)
- BlockNumber(ctx context.Context) (uint64, error)
- PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error)
- ChainID(ctx context.Context) (*big.Int, error)
- Client() rpc.ClientInterface
-}
-
-func SendTxAsCall(ctx context.Context, client L1Interface, tx *types.Transaction, from common.Address, blockNum *big.Int, unlimitedGas bool) ([]byte, error) {
+func SendTxAsCall(ctx context.Context, client *ethclient.Client, tx *types.Transaction, from common.Address, blockNum *big.Int, unlimitedGas bool) ([]byte, error) {
var gas uint64
if unlimitedGas {
gas = 0
@@ -50,7 +36,7 @@ func SendTxAsCall(ctx context.Context, client L1Interface, tx *types.Transaction
return client.CallContract(ctx, callMsg, blockNum)
}
-func GetPendingCallBlockNumber(ctx context.Context, client L1Interface) (*big.Int, error) {
+func GetPendingCallBlockNumber(ctx context.Context, client *ethclient.Client) (*big.Int, error) {
msg := ethereum.CallMsg{
// Pretend to be a contract deployment to execute EVM code without calling a contract.
To: nil,
@@ -70,7 +56,7 @@ func GetPendingCallBlockNumber(ctx context.Context, client L1Interface) (*big.In
return new(big.Int).SetBytes(callRes), nil
}
-func DetailTxError(ctx context.Context, client L1Interface, tx *types.Transaction, txRes *types.Receipt) error {
+func DetailTxError(ctx context.Context, client *ethclient.Client, tx *types.Transaction, txRes *types.Receipt) error {
// Re-execute the transaction as a call to get a better error
if ctx.Err() != nil {
return ctx.Err()
@@ -96,7 +82,7 @@ func DetailTxError(ctx context.Context, client L1Interface, tx *types.Transactio
return fmt.Errorf("SendTxAsCall got: %w for tx hash %v", err, tx.Hash())
}
-func DetailTxErrorUsingCallMsg(ctx context.Context, client L1Interface, txHash common.Hash, txRes *types.Receipt, callMsg ethereum.CallMsg) error {
+func DetailTxErrorUsingCallMsg(ctx context.Context, client *ethclient.Client, txHash common.Hash, txRes *types.Receipt, callMsg ethereum.CallMsg) error {
// Re-execute the transaction as a call to get a better error
if ctx.Err() != nil {
return ctx.Err()
diff --git a/audits/ConsenSys_Diligence_Arbitrum_Contracts_11_2021.pdf b/audits/ConsenSys_Diligence_Arbitrum_Contracts_11_2021.pdf
deleted file mode 100644
index 4e93ced017..0000000000
Binary files a/audits/ConsenSys_Diligence_Arbitrum_Contracts_11_2021.pdf and /dev/null differ
diff --git a/audits/ConsenSys_Diligence_Nitro_Contracts_5_2022.pdf b/audits/ConsenSys_Diligence_Nitro_Contracts_5_2022.pdf
deleted file mode 100644
index 7fb9bc8f61..0000000000
Binary files a/audits/ConsenSys_Diligence_Nitro_Contracts_5_2022.pdf and /dev/null differ
diff --git a/audits/Trail_Of_Bits_Nitro_10_2022.pdf b/audits/Trail_Of_Bits_Nitro_10_2022.pdf
deleted file mode 100644
index 06a0516928..0000000000
Binary files a/audits/Trail_Of_Bits_Nitro_10_2022.pdf and /dev/null differ
diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go
index 1e4a06fe90..b43999a7db 100644
--- a/blocks_reexecutor/blocks_reexecutor.go
+++ b/blocks_reexecutor/blocks_reexecutor.go
@@ -102,7 +102,8 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block
if rng > end-start {
rng = end - start
}
- start += uint64(rand.Intn(int(end - start - rng + 1)))
+ // #nosec G115
+ start += uint64(rand.Int63n(int64(end - start - rng + 1)))
end = start + rng
}
// Inclusive of block reexecution [start, end]
@@ -112,6 +113,7 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block
}
// Divide work equally among available threads when BlocksPerThread is zero
if c.BlocksPerThread == 0 {
+ // #nosec G115
work := (end - start) / uint64(c.Room)
if work > 0 {
blocksPerThread = work
diff --git a/broadcastclient/broadcastclient.go b/broadcastclient/broadcastclient.go
index 7d27c57fe9..4e97ca8cd0 100644
--- a/broadcastclient/broadcastclient.go
+++ b/broadcastclient/broadcastclient.go
@@ -280,6 +280,18 @@ func (bc *BroadcastClient) connect(ctx context.Context, nextSeqNum arbutil.Messa
MinVersion: tls.VersionTLS12,
},
Extensions: extensions,
+ NetDial: func(ctx context.Context, network, addr string) (net.Conn, error) {
+ var netDialer net.Dialer
+ // For tcp connections, prefer IPv4 over IPv6 to avoid rate limiting issues
+ if network == "tcp" {
+ conn, err := netDialer.DialContext(ctx, "tcp4", addr)
+ if err == nil {
+ return conn, nil
+ }
+ return netDialer.DialContext(ctx, "tcp6", addr)
+ }
+ return netDialer.DialContext(ctx, network, addr)
+ },
}
if bc.isShuttingDown() {
diff --git a/broadcastclient/broadcastclient_test.go b/broadcastclient/broadcastclient_test.go
index 44b48192ab..a499628cd5 100644
--- a/broadcastclient/broadcastclient_test.go
+++ b/broadcastclient/broadcastclient_test.go
@@ -105,6 +105,7 @@ func testReceiveMessages(t *testing.T, clientCompression bool, serverCompression
go func() {
for i := 0; i < messageCount; i++ {
+ // #nosec G115
Require(t, b.BroadcastSingle(arbostypes.TestMessageWithMetadataAndRequestId, arbutil.MessageIndex(i), nil))
}
}()
@@ -156,6 +157,7 @@ func TestInvalidSignature(t *testing.T) {
go func() {
for i := 0; i < messageCount; i++ {
+ // #nosec G115
Require(t, b.BroadcastSingle(arbostypes.TestMessageWithMetadataAndRequestId, arbutil.MessageIndex(i), nil))
}
}()
diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go
index f6501105c2..b7b935fb7a 100644
--- a/broadcaster/backlog/backlog.go
+++ b/broadcaster/backlog/backlog.go
@@ -97,6 +97,7 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error {
if err != nil {
log.Warn("error calculating backlogSizeInBytes", "err", err)
} else {
+ // #nosec G115
backlogSizeInBytesGauge.Update(int64(size))
}
}
@@ -108,6 +109,7 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error {
segment = newBacklogSegment()
b.head.Store(segment)
b.tail.Store(segment)
+ // #nosec G115
confirmedSequenceNumberGauge.Update(int64(msg.SequenceNumber))
}
@@ -143,9 +145,11 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error {
}
lookupByIndex.Store(uint64(msg.SequenceNumber), segment)
b.messageCount.Add(1)
+ // #nosec G115
backlogSizeInBytesGauge.Inc(int64(msg.Size()))
}
+ // #nosec G115
backlogSizeGauge.Update(int64(b.Count()))
return nil
}
@@ -174,7 +178,7 @@ func (b *backlog) Get(start, end uint64) (*m.BroadcastMessage, error) {
}
bm := &m.BroadcastMessage{Version: 1}
- required := int(end-start) + 1
+ required := end - start + 1
for {
segMsgs, err := segment.Get(arbmath.MaxInt(start, segment.Start()), arbmath.MinInt(end, segment.End()))
if err != nil {
@@ -183,7 +187,7 @@ func (b *backlog) Get(start, end uint64) (*m.BroadcastMessage, error) {
bm.Messages = append(bm.Messages, segMsgs...)
segment = segment.Next()
- if len(bm.Messages) == required {
+ if uint64(len(bm.Messages)) == required {
break
} else if segment == nil {
return nil, errOutOfBounds
@@ -213,6 +217,7 @@ func (b *backlog) delete(confirmed uint64) {
return
}
+ // #nosec G115
confirmedSequenceNumberGauge.Update(int64(confirmed))
// find the segment containing the confirmed message
diff --git a/broadcaster/backlog/backlog_test.go b/broadcaster/backlog/backlog_test.go
index ee712de9ed..d74389f692 100644
--- a/broadcaster/backlog/backlog_test.go
+++ b/broadcaster/backlog/backlog_test.go
@@ -33,8 +33,8 @@ func validateBacklog(t *testing.T, b *backlog, count, start, end uint64, lookupK
}
}
- expLen := len(lookupKeys)
- actualLen := int(b.Count())
+ expLen := uint64(len(lookupKeys))
+ actualLen := b.Count()
if expLen != actualLen {
t.Errorf("expected length of lookupByIndex map (%d) does not equal actual length (%d)", expLen, actualLen)
}
diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go
index ba95f2d8af..4fe8657bfa 100644
--- a/broadcaster/broadcaster.go
+++ b/broadcaster/broadcaster.go
@@ -104,6 +104,7 @@ func (b *Broadcaster) BroadcastMessages(
}()
var feedMessages []*m.BroadcastFeedMessage
for i, msg := range messagesWithBlockHash {
+ // #nosec G115
bfm, err := b.NewBroadcastFeedMessage(msg.MessageWithMeta, seq+arbutil.MessageIndex(i), msg.BlockHash)
if err != nil {
return err
@@ -145,6 +146,7 @@ func (b *Broadcaster) ListenerAddr() net.Addr {
}
func (b *Broadcaster) GetCachedMessageCount() int {
+ // #nosec G115
return int(b.backlog.Count())
}
diff --git a/broadcaster/message/message.go b/broadcaster/message/message.go
index aca9598754..1e26e6da5e 100644
--- a/broadcaster/message/message.go
+++ b/broadcaster/message/message.go
@@ -41,6 +41,7 @@ type BroadcastFeedMessage struct {
}
func (m *BroadcastFeedMessage) Size() uint64 {
+ // #nosec G115
return uint64(len(m.Signature) + len(m.Message.Message.L2msg) + 160)
}
diff --git a/cmd/chaininfo/arbitrum_chain_info.json b/cmd/chaininfo/arbitrum_chain_info.json
index 7d47d13e84..f862c6dfbf 100644
--- a/cmd/chaininfo/arbitrum_chain_info.json
+++ b/cmd/chaininfo/arbitrum_chain_info.json
@@ -164,7 +164,7 @@
"EnableArbOS": true,
"AllowDebugPrecompiles": true,
"DataAvailabilityCommittee": false,
- "InitialArbOSVersion": 11,
+ "InitialArbOSVersion": 32,
"InitialChainOwner": "0x0000000000000000000000000000000000000000",
"GenesisBlockNum": 0
}
@@ -196,7 +196,7 @@
"EnableArbOS": true,
"AllowDebugPrecompiles": true,
"DataAvailabilityCommittee": true,
- "InitialArbOSVersion": 11,
+ "InitialArbOSVersion": 32,
"InitialChainOwner": "0x0000000000000000000000000000000000000000",
"GenesisBlockNum": 0
}
diff --git a/cmd/conf/database.go b/cmd/conf/database.go
index a75cca77d5..af18bacd57 100644
--- a/cmd/conf/database.go
+++ b/cmd/conf/database.go
@@ -43,7 +43,7 @@ func PersistentConfigAddOptions(prefix string, f *flag.FlagSet) {
f.Int(prefix+".handles", PersistentConfigDefault.Handles, "number of file descriptor handles to use for the database")
f.String(prefix+".ancient", PersistentConfigDefault.Ancient, "directory of ancient where the chain freezer can be opened")
f.String(prefix+".db-engine", PersistentConfigDefault.DBEngine, "backing database implementation to use. If set to empty string the database type will be autodetected and if no pre-existing database is found it will default to creating new pebble database ('leveldb', 'pebble' or '' = auto-detect)")
- PebbleConfigAddOptions(prefix+".pebble", f)
+ PebbleConfigAddOptions(prefix+".pebble", f, &PersistentConfigDefault.Pebble)
}
func (c *PersistentConfig) ResolveDirectoryNames() error {
@@ -120,9 +120,9 @@ var PebbleConfigDefault = PebbleConfig{
Experimental: PebbleExperimentalConfigDefault,
}
-func PebbleConfigAddOptions(prefix string, f *flag.FlagSet) {
- f.Int(prefix+".max-concurrent-compactions", PebbleConfigDefault.MaxConcurrentCompactions, "maximum number of concurrent compactions")
- PebbleExperimentalConfigAddOptions(prefix+".experimental", f)
+func PebbleConfigAddOptions(prefix string, f *flag.FlagSet, defaultConfig *PebbleConfig) {
+ f.Int(prefix+".max-concurrent-compactions", defaultConfig.MaxConcurrentCompactions, "maximum number of concurrent compactions")
+ PebbleExperimentalConfigAddOptions(prefix+".experimental", f, &defaultConfig.Experimental)
}
func (c *PebbleConfig) Validate() error {
@@ -189,29 +189,29 @@ var PebbleExperimentalConfigDefault = PebbleExperimentalConfig{
ForceWriterParallelism: false,
}
-func PebbleExperimentalConfigAddOptions(prefix string, f *flag.FlagSet) {
- f.Int(prefix+".bytes-per-sync", PebbleExperimentalConfigDefault.BytesPerSync, "number of bytes to write to a SSTable before calling Sync on it in the background")
- f.Int(prefix+".l0-compaction-file-threshold", PebbleExperimentalConfigDefault.L0CompactionFileThreshold, "count of L0 files necessary to trigger an L0 compaction")
- f.Int(prefix+".l0-compaction-threshold", PebbleExperimentalConfigDefault.L0CompactionThreshold, "amount of L0 read-amplification necessary to trigger an L0 compaction")
- f.Int(prefix+".l0-stop-writes-threshold", PebbleExperimentalConfigDefault.L0StopWritesThreshold, "hard limit on L0 read-amplification, computed as the number of L0 sublevels. Writes are stopped when this threshold is reached")
- f.Int64(prefix+".l-base-max-bytes", PebbleExperimentalConfigDefault.LBaseMaxBytes, "The maximum number of bytes for LBase. The base level is the level which L0 is compacted into. The base level is determined dynamically based on the existing data in the LSM. The maximum number of bytes for other levels is computed dynamically based on the base level's maximum size. When the maximum number of bytes for a level is exceeded, compaction is requested.")
- f.Int(prefix+".mem-table-stop-writes-threshold", PebbleExperimentalConfigDefault.MemTableStopWritesThreshold, "hard limit on the number of queued of MemTables")
- f.Bool(prefix+".disable-automatic-compactions", PebbleExperimentalConfigDefault.DisableAutomaticCompactions, "disables automatic compactions")
- f.Int(prefix+".wal-bytes-per-sync", PebbleExperimentalConfigDefault.WALBytesPerSync, "number of bytes to write to a write-ahead log (WAL) before calling Sync on it in the background")
- f.String(prefix+".wal-dir", PebbleExperimentalConfigDefault.WALDir, "absolute path of directory to store write-ahead logs (WALs) in. If empty, WALs will be stored in the same directory as sstables")
- f.Int(prefix+".wal-min-sync-interval", PebbleExperimentalConfigDefault.WALMinSyncInterval, "minimum duration in microseconds between syncs of the WAL. If WAL syncs are requested faster than this interval, they will be artificially delayed.")
- f.Int(prefix+".target-byte-deletion-rate", PebbleExperimentalConfigDefault.TargetByteDeletionRate, "rate (in bytes per second) at which sstable file deletions are limited to (under normal circumstances).")
- f.Int(prefix+".block-size", PebbleExperimentalConfigDefault.BlockSize, "target uncompressed size in bytes of each table block")
- f.Int(prefix+".index-block-size", PebbleExperimentalConfigDefault.IndexBlockSize, fmt.Sprintf("target uncompressed size in bytes of each index block. When the index block size is larger than this target, two-level indexes are automatically enabled. Setting this option to a large value (such as %d) disables the automatic creation of two-level indexes.", math.MaxInt32))
- f.Int64(prefix+".target-file-size", PebbleExperimentalConfigDefault.TargetFileSize, "target file size for the level 0")
- f.Bool(prefix+".target-file-size-equal-levels", PebbleExperimentalConfigDefault.TargetFileSizeEqualLevels, "if true same target-file-size will be uses for all levels, otherwise target size for layer n = 2 * target size for layer n - 1")
+func PebbleExperimentalConfigAddOptions(prefix string, f *flag.FlagSet, defaultConfig *PebbleExperimentalConfig) {
+ f.Int(prefix+".bytes-per-sync", defaultConfig.BytesPerSync, "number of bytes to write to a SSTable before calling Sync on it in the background")
+ f.Int(prefix+".l0-compaction-file-threshold", defaultConfig.L0CompactionFileThreshold, "count of L0 files necessary to trigger an L0 compaction")
+ f.Int(prefix+".l0-compaction-threshold", defaultConfig.L0CompactionThreshold, "amount of L0 read-amplification necessary to trigger an L0 compaction")
+ f.Int(prefix+".l0-stop-writes-threshold", defaultConfig.L0StopWritesThreshold, "hard limit on L0 read-amplification, computed as the number of L0 sublevels. Writes are stopped when this threshold is reached")
+ f.Int64(prefix+".l-base-max-bytes", defaultConfig.LBaseMaxBytes, "The maximum number of bytes for LBase. The base level is the level which L0 is compacted into. The base level is determined dynamically based on the existing data in the LSM. The maximum number of bytes for other levels is computed dynamically based on the base level's maximum size. When the maximum number of bytes for a level is exceeded, compaction is requested.")
+ f.Int(prefix+".mem-table-stop-writes-threshold", defaultConfig.MemTableStopWritesThreshold, "hard limit on the number of queued of MemTables")
+ f.Bool(prefix+".disable-automatic-compactions", defaultConfig.DisableAutomaticCompactions, "disables automatic compactions")
+ f.Int(prefix+".wal-bytes-per-sync", defaultConfig.WALBytesPerSync, "number of bytes to write to a write-ahead log (WAL) before calling Sync on it in the background")
+ f.String(prefix+".wal-dir", defaultConfig.WALDir, "absolute path of directory to store write-ahead logs (WALs) in. If empty, WALs will be stored in the same directory as sstables")
+ f.Int(prefix+".wal-min-sync-interval", defaultConfig.WALMinSyncInterval, "minimum duration in microseconds between syncs of the WAL. If WAL syncs are requested faster than this interval, they will be artificially delayed.")
+ f.Int(prefix+".target-byte-deletion-rate", defaultConfig.TargetByteDeletionRate, "rate (in bytes per second) at which sstable file deletions are limited to (under normal circumstances).")
+ f.Int(prefix+".block-size", defaultConfig.BlockSize, "target uncompressed size in bytes of each table block")
+ f.Int(prefix+".index-block-size", defaultConfig.IndexBlockSize, fmt.Sprintf("target uncompressed size in bytes of each index block. When the index block size is larger than this target, two-level indexes are automatically enabled. Setting this option to a large value (such as %d) disables the automatic creation of two-level indexes.", math.MaxInt32))
+ f.Int64(prefix+".target-file-size", defaultConfig.TargetFileSize, "target file size for the level 0")
+ f.Bool(prefix+".target-file-size-equal-levels", defaultConfig.TargetFileSizeEqualLevels, "if true same target-file-size will be uses for all levels, otherwise target size for layer n = 2 * target size for layer n - 1")
- f.Int(prefix+".l0-compaction-concurrency", PebbleExperimentalConfigDefault.L0CompactionConcurrency, "threshold of L0 read-amplification at which compaction concurrency is enabled (if compaction-debt-concurrency was not already exceeded). Every multiple of this value enables another concurrent compaction up to max-concurrent-compactions.")
- f.Uint64(prefix+".compaction-debt-concurrency", PebbleExperimentalConfigDefault.CompactionDebtConcurrency, "controls the threshold of compaction debt at which additional compaction concurrency slots are added. For every multiple of this value in compaction debt bytes, an additional concurrent compaction is added. This works \"on top\" of l0-compaction-concurrency, so the higher of the count of compaction concurrency slots as determined by the two options is chosen.")
- f.Int64(prefix+".read-compaction-rate", PebbleExperimentalConfigDefault.ReadCompactionRate, "controls the frequency of read triggered compactions by adjusting `AllowedSeeks` in manifest.FileMetadata: AllowedSeeks = FileSize / ReadCompactionRate")
- f.Int64(prefix+".read-sampling-multiplier", PebbleExperimentalConfigDefault.ReadSamplingMultiplier, "a multiplier for the readSamplingPeriod in iterator.maybeSampleRead() to control the frequency of read sampling to trigger a read triggered compaction. A value of -1 prevents sampling and disables read triggered compactions. Geth default is -1. The pebble default is 1 << 4. which gets multiplied with a constant of 1 << 16 to yield 1 << 20 (1MB).")
- f.Int(prefix+".max-writer-concurrency", PebbleExperimentalConfigDefault.MaxWriterConcurrency, "maximum number of compression workers the compression queue is allowed to use. If max-writer-concurrency > 0, then the Writer will use parallelism, to compress and write blocks to disk. Otherwise, the writer will compress and write blocks to disk synchronously.")
- f.Bool(prefix+".force-writer-parallelism", PebbleExperimentalConfigDefault.ForceWriterParallelism, "force parallelism in the sstable Writer for the metamorphic tests. Even with the MaxWriterConcurrency option set, pebble only enables parallelism in the sstable Writer if there is enough CPU available, and this option bypasses that.")
+ f.Int(prefix+".l0-compaction-concurrency", defaultConfig.L0CompactionConcurrency, "threshold of L0 read-amplification at which compaction concurrency is enabled (if compaction-debt-concurrency was not already exceeded). Every multiple of this value enables another concurrent compaction up to max-concurrent-compactions.")
+ f.Uint64(prefix+".compaction-debt-concurrency", defaultConfig.CompactionDebtConcurrency, "controls the threshold of compaction debt at which additional compaction concurrency slots are added. For every multiple of this value in compaction debt bytes, an additional concurrent compaction is added. This works \"on top\" of l0-compaction-concurrency, so the higher of the count of compaction concurrency slots as determined by the two options is chosen.")
+ f.Int64(prefix+".read-compaction-rate", defaultConfig.ReadCompactionRate, "controls the frequency of read triggered compactions by adjusting `AllowedSeeks` in manifest.FileMetadata: AllowedSeeks = FileSize / ReadCompactionRate")
+ f.Int64(prefix+".read-sampling-multiplier", defaultConfig.ReadSamplingMultiplier, "a multiplier for the readSamplingPeriod in iterator.maybeSampleRead() to control the frequency of read sampling to trigger a read triggered compaction. A value of -1 prevents sampling and disables read triggered compactions. Geth default is -1. The pebble default is 1 << 4. which gets multiplied with a constant of 1 << 16 to yield 1 << 20 (1MB).")
+ f.Int(prefix+".max-writer-concurrency", defaultConfig.MaxWriterConcurrency, "maximum number of compression workers the compression queue is allowed to use. If max-writer-concurrency > 0, then the Writer will use parallelism, to compress and write blocks to disk. Otherwise, the writer will compress and write blocks to disk synchronously.")
+ f.Bool(prefix+".force-writer-parallelism", defaultConfig.ForceWriterParallelism, "force parallelism in the sstable Writer for the metamorphic tests. Even with the MaxWriterConcurrency option set, pebble only enables parallelism in the sstable Writer if there is enough CPU available, and this option bypasses that.")
}
func (c *PebbleExperimentalConfig) Validate() error {
diff --git a/cmd/conf/init.go b/cmd/conf/init.go
index a3b5504077..f01d99f8b7 100644
--- a/cmd/conf/init.go
+++ b/cmd/conf/init.go
@@ -7,7 +7,6 @@ import (
"time"
"github.com/ethereum/go-ethereum/log"
- "github.com/offchainlabs/nitro/execution/gethexec"
"github.com/spf13/pflag"
)
@@ -23,6 +22,7 @@ type InitConfig struct {
DevInitAddress string `koanf:"dev-init-address"`
DevInitBlockNum uint64 `koanf:"dev-init-blocknum"`
Empty bool `koanf:"empty"`
+ ImportWasm bool `koanf:"import-wasm"`
AccountsPerSync uint `koanf:"accounts-per-sync"`
ImportFile string `koanf:"import-file"`
ThenQuit bool `koanf:"then-quit"`
@@ -31,7 +31,7 @@ type InitConfig struct {
PruneThreads int `koanf:"prune-threads"`
PruneTrieCleanCache int `koanf:"prune-trie-clean-cache"`
RecreateMissingStateFrom uint64 `koanf:"recreate-missing-state-from"`
- RebuildLocalWasm bool `koanf:"rebuild-local-wasm"`
+ RebuildLocalWasm string `koanf:"rebuild-local-wasm"`
ReorgToBatch int64 `koanf:"reorg-to-batch"`
ReorgToMessageBatch int64 `koanf:"reorg-to-message-batch"`
ReorgToBlockBatch int64 `koanf:"reorg-to-block-batch"`
@@ -49,15 +49,16 @@ var InitConfigDefault = InitConfig{
DevInitAddress: "",
DevInitBlockNum: 0,
Empty: false,
+ ImportWasm: false,
ImportFile: "",
AccountsPerSync: 100000,
ThenQuit: false,
Prune: "",
PruneBloomSize: 2048,
PruneThreads: runtime.NumCPU(),
- PruneTrieCleanCache: gethexec.DefaultCachingConfig.TrieCleanCache,
+ PruneTrieCleanCache: 600,
RecreateMissingStateFrom: 0, // 0 = disabled
- RebuildLocalWasm: true,
+ RebuildLocalWasm: "auto",
ReorgToBatch: -1,
ReorgToMessageBatch: -1,
ReorgToBlockBatch: -1,
@@ -75,6 +76,7 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) {
f.String(prefix+".dev-init-address", InitConfigDefault.DevInitAddress, "Address of dev-account. Leave empty to use the dev-wallet.")
f.Uint64(prefix+".dev-init-blocknum", InitConfigDefault.DevInitBlockNum, "Number of preinit blocks. Must exist in ancient database.")
f.Bool(prefix+".empty", InitConfigDefault.Empty, "init with empty state")
+ f.Bool(prefix+".import-wasm", InitConfigDefault.ImportWasm, "if set, import the wasm directory when downloading a database (contains executable code - only use with highly trusted source)")
f.Bool(prefix+".then-quit", InitConfigDefault.ThenQuit, "quit after init is done")
f.String(prefix+".import-file", InitConfigDefault.ImportFile, "path for json data to import")
f.Uint(prefix+".accounts-per-sync", InitConfigDefault.AccountsPerSync, "during init - sync database every X accounts. Lower value for low-memory systems. 0 disables.")
@@ -83,10 +85,14 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) {
f.Int(prefix+".prune-threads", InitConfigDefault.PruneThreads, "the number of threads to use when pruning")
f.Int(prefix+".prune-trie-clean-cache", InitConfigDefault.PruneTrieCleanCache, "amount of memory in megabytes to cache unchanged state trie nodes with when traversing state database during pruning")
f.Uint64(prefix+".recreate-missing-state-from", InitConfigDefault.RecreateMissingStateFrom, "block number to start recreating missing states from (0 = disabled)")
- f.Bool(prefix+".rebuild-local-wasm", InitConfigDefault.RebuildLocalWasm, "rebuild local wasm database on boot if needed (otherwise-will be done lazily)")
f.Int64(prefix+".reorg-to-batch", InitConfigDefault.ReorgToBatch, "rolls back the blockchain to a specified batch number")
f.Int64(prefix+".reorg-to-message-batch", InitConfigDefault.ReorgToMessageBatch, "rolls back the blockchain to the first batch at or before a given message index")
f.Int64(prefix+".reorg-to-block-batch", InitConfigDefault.ReorgToBlockBatch, "rolls back the blockchain to the first batch at or before a given block number")
+ f.String(prefix+".rebuild-local-wasm", InitConfigDefault.RebuildLocalWasm, "rebuild local wasm database on boot if needed (otherwise-will be done lazily). Three modes are supported \n"+
+ "\"auto\"- (enabled by default) if any previous rebuilding attempt was successful then rebuilding is disabled else continues to rebuild,\n"+
+ "\"force\"- force rebuilding which would commence rebuilding despite the status of previous attempts,\n"+
+ "\"false\"- do not rebuild on startup",
+ )
}
func (c *InitConfig) Validate() error {
@@ -111,6 +117,10 @@ func (c *InitConfig) Validate() error {
}
}
}
+ c.RebuildLocalWasm = strings.ToLower(c.RebuildLocalWasm)
+ if c.RebuildLocalWasm != "auto" && c.RebuildLocalWasm != "force" && c.RebuildLocalWasm != "false" {
+ return fmt.Errorf("invalid value of rebuild-local-wasm, want: auto or force or false, got: %s", c.RebuildLocalWasm)
+ }
return nil
}
diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go
index ba60cbbd4d..f791d8cbc4 100644
--- a/cmd/datool/datool.go
+++ b/cmd/datool/datool.go
@@ -166,8 +166,10 @@ func startClientStore(args []string) error {
if err != nil {
return err
}
+ // #nosec G115
cert, err = client.Store(ctx, message, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()))
} else if len(config.Message) > 0 {
+ // #nosec G115
cert, err = client.Store(ctx, []byte(config.Message), uint64(time.Now().Add(config.DASRetentionPeriod).Unix()))
} else {
return errors.New("--message or --random-message-size must be specified")
@@ -363,6 +365,7 @@ func dumpKeyset(args []string) error {
return err
}
+ // #nosec G115
keysetHash, keysetBytes, err := das.KeysetHashFromServices(services, uint64(config.Keyset.AssumedHonest))
if err != nil {
return err
diff --git a/cmd/dbconv/dbconv/config.go b/cmd/dbconv/dbconv/config.go
new file mode 100644
index 0000000000..917f34261d
--- /dev/null
+++ b/cmd/dbconv/dbconv/config.go
@@ -0,0 +1,95 @@
+package dbconv
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/offchainlabs/nitro/cmd/conf"
+ "github.com/offchainlabs/nitro/cmd/genericconf"
+ flag "github.com/spf13/pflag"
+)
+
+type DBConfig struct {
+ Data string `koanf:"data"`
+ DBEngine string `koanf:"db-engine"`
+ Handles int `koanf:"handles"`
+ Cache int `koanf:"cache"`
+ Namespace string `koanf:"namespace"`
+ Pebble conf.PebbleConfig `koanf:"pebble"`
+}
+
+var DBConfigDefaultDst = DBConfig{
+ DBEngine: "pebble",
+ Handles: conf.PersistentConfigDefault.Handles,
+ Cache: 2048, // 2048 MB
+ Namespace: "dstdb/",
+ Pebble: conf.PebbleConfigDefault,
+}
+
+var DBConfigDefaultSrc = DBConfig{
+ DBEngine: "leveldb",
+ Handles: conf.PersistentConfigDefault.Handles,
+ Cache: 2048, // 2048 MB
+ Namespace: "srcdb/",
+}
+
+func DBConfigAddOptions(prefix string, f *flag.FlagSet, defaultConfig *DBConfig) {
+ f.String(prefix+".data", defaultConfig.Data, "directory of stored chain state")
+ f.String(prefix+".db-engine", defaultConfig.DBEngine, "backing database implementation to use ('leveldb' or 'pebble')")
+ f.Int(prefix+".handles", defaultConfig.Handles, "number of files to be open simultaneously")
+ f.Int(prefix+".cache", defaultConfig.Cache, "the capacity(in megabytes) of the data caching")
+ f.String(prefix+".namespace", defaultConfig.Namespace, "metrics namespace")
+ conf.PebbleConfigAddOptions(prefix+".pebble", f, &defaultConfig.Pebble)
+}
+
+type DBConvConfig struct {
+ Src DBConfig `koanf:"src"`
+ Dst DBConfig `koanf:"dst"`
+ IdealBatchSize int `koanf:"ideal-batch-size"`
+ Convert bool `koanf:"convert"`
+ Compact bool `koanf:"compact"`
+ Verify string `koanf:"verify"`
+ LogLevel string `koanf:"log-level"`
+ LogType string `koanf:"log-type"`
+ Metrics bool `koanf:"metrics"`
+ MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"`
+}
+
+var DefaultDBConvConfig = DBConvConfig{
+ Src: DBConfigDefaultSrc,
+ Dst: DBConfigDefaultDst,
+ IdealBatchSize: 100 * 1024 * 1024, // 100 MB
+ Convert: false,
+ Compact: false,
+ Verify: "",
+ LogLevel: "INFO",
+ LogType: "plaintext",
+ Metrics: false,
+ MetricsServer: genericconf.MetricsServerConfigDefault,
+}
+
+func DBConvConfigAddOptions(f *flag.FlagSet) {
+ DBConfigAddOptions("src", f, &DefaultDBConvConfig.Src)
+ DBConfigAddOptions("dst", f, &DefaultDBConvConfig.Dst)
+ f.Int("ideal-batch-size", DefaultDBConvConfig.IdealBatchSize, "ideal write batch size in bytes")
+ f.Bool("convert", DefaultDBConvConfig.Convert, "enables conversion step")
+ f.Bool("compact", DefaultDBConvConfig.Compact, "enables compaction step")
+ f.String("verify", DefaultDBConvConfig.Verify, "enables verification step (\"\" = disabled, \"keys\" = only keys, \"full\" = keys and values)")
+ f.String("log-level", DefaultDBConvConfig.LogLevel, "log level, valid values are CRIT, ERROR, WARN, INFO, DEBUG, TRACE")
+ f.String("log-type", DefaultDBConvConfig.LogType, "log type (plaintext or json)")
+ f.Bool("metrics", DefaultDBConvConfig.Metrics, "enable metrics")
+ genericconf.MetricsServerAddOptions("metrics-server", f)
+}
+
+func (c *DBConvConfig) Validate() error {
+ if c.Verify != "keys" && c.Verify != "full" && c.Verify != "" {
+ return fmt.Errorf("Invalid verify mode: %v", c.Verify)
+ }
+ if !c.Convert && c.Verify == "" && !c.Compact {
+ return errors.New("nothing to be done, conversion, verification and compaction disabled")
+ }
+ if c.IdealBatchSize <= 0 {
+ return fmt.Errorf("Invalid ideal batch size: %d, has to be greater then 0", c.IdealBatchSize)
+ }
+ return nil
+}
diff --git a/cmd/dbconv/dbconv/dbconv.go b/cmd/dbconv/dbconv/dbconv.go
new file mode 100644
index 0000000000..6a97df31c0
--- /dev/null
+++ b/cmd/dbconv/dbconv/dbconv.go
@@ -0,0 +1,172 @@
+package dbconv
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/offchainlabs/nitro/util/dbutil"
+)
+
+type DBConverter struct {
+ config *DBConvConfig
+ stats Stats
+}
+
+func NewDBConverter(config *DBConvConfig) *DBConverter {
+ return &DBConverter{
+ config: config,
+ }
+}
+
+func openDB(config *DBConfig, name string, readonly bool) (ethdb.Database, error) {
+ db, err := rawdb.Open(rawdb.OpenOptions{
+ Type: config.DBEngine,
+ Directory: config.Data,
+ // we don't open freezer, it doesn't need to be converted as it has format independent of db-engine
+ // note: user needs to handle copying/moving the ancient directory
+ AncientsDirectory: "",
+ Namespace: config.Namespace,
+ Cache: config.Cache,
+ Handles: config.Handles,
+ ReadOnly: readonly,
+ PebbleExtraOptions: config.Pebble.ExtraOptions(name),
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := dbutil.UnfinishedConversionCheck(db); err != nil {
+ if closeErr := db.Close(); closeErr != nil {
+ err = errors.Join(err, closeErr)
+ }
+ return nil, err
+ }
+
+ return db, nil
+}
+
+func (c *DBConverter) Convert(ctx context.Context) error {
+ var err error
+ src, err := openDB(&c.config.Src, "src", true)
+ if err != nil {
+ return err
+ }
+ defer src.Close()
+ dst, err := openDB(&c.config.Dst, "dst", false)
+ if err != nil {
+ return err
+ }
+ defer dst.Close()
+ c.stats.Reset()
+ log.Info("Converting database", "src", c.config.Src.Data, "dst", c.config.Dst.Data, "db-engine", c.config.Dst.DBEngine)
+ if err = dbutil.PutUnfinishedConversionCanary(dst); err != nil {
+ return err
+ }
+ it := src.NewIterator(nil, nil)
+ defer it.Release()
+ batch := dst.NewBatch()
+ entriesInBatch := 0
+ for it.Next() && ctx.Err() == nil {
+ if err = batch.Put(it.Key(), it.Value()); err != nil {
+ return err
+ }
+ entriesInBatch++
+ if batchSize := batch.ValueSize(); batchSize >= c.config.IdealBatchSize {
+ if err = batch.Write(); err != nil {
+ return err
+ }
+ c.stats.LogEntries(int64(entriesInBatch))
+ c.stats.LogBytes(int64(batchSize))
+ batch.Reset()
+ entriesInBatch = 0
+ }
+ }
+ if err = ctx.Err(); err == nil {
+ batchSize := batch.ValueSize()
+ if err = batch.Write(); err != nil {
+ return err
+ }
+ c.stats.LogEntries(int64(entriesInBatch))
+ c.stats.LogBytes(int64(batchSize))
+ }
+ if err == nil {
+ if err = dbutil.DeleteUnfinishedConversionCanary(dst); err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+func (c *DBConverter) CompactDestination() error {
+ dst, err := openDB(&c.config.Dst, "dst", false)
+ if err != nil {
+ return err
+ }
+ defer dst.Close()
+ start := time.Now()
+ log.Info("Compacting destination database", "dst", c.config.Dst.Data)
+ if err := dst.Compact(nil, nil); err != nil {
+ return err
+ }
+ log.Info("Compaction done", "elapsed", time.Since(start))
+ return nil
+}
+
+func (c *DBConverter) Verify(ctx context.Context) error {
+ if c.config.Verify == "keys" {
+ log.Info("Starting quick verification - verifying only keys existence")
+ } else if c.config.Verify == "full" {
+ log.Info("Starting full verification - verifying keys and values")
+ }
+ var err error
+ src, err := openDB(&c.config.Src, "src", true)
+ if err != nil {
+ return err
+ }
+ defer src.Close()
+
+ dst, err := openDB(&c.config.Dst, "dst", true)
+ if err != nil {
+ return err
+ }
+ defer dst.Close()
+
+ c.stats.Reset()
+ it := src.NewIterator(nil, nil)
+ defer it.Release()
+ for it.Next() && ctx.Err() == nil {
+ switch c.config.Verify {
+ case "keys":
+ has, err := dst.Has(it.Key())
+ if err != nil {
+ return fmt.Errorf("Failed to check key existence in destination db, key: %v, err: %w", it.Key(), err)
+ }
+ if !has {
+ return fmt.Errorf("Missing key in destination db, key: %v", it.Key())
+ }
+ c.stats.LogBytes(int64(len(it.Key())))
+ case "full":
+ dstValue, err := dst.Get(it.Key())
+ if err != nil {
+ return err
+ }
+ if !bytes.Equal(dstValue, it.Value()) {
+ return fmt.Errorf("Value mismatch for key: %v, src value: %v, dst value: %s", it.Key(), it.Value(), dstValue)
+ }
+ c.stats.LogBytes(int64(len(it.Key()) + len(dstValue)))
+ default:
+ return fmt.Errorf("Invalid verify config value: %v", c.config.Verify)
+ }
+ c.stats.LogEntries(1)
+ }
+ return ctx.Err()
+}
+
+func (c *DBConverter) Stats() *Stats {
+ return &c.stats
+}
diff --git a/cmd/dbconv/dbconv/dbconv_test.go b/cmd/dbconv/dbconv/dbconv_test.go
new file mode 100644
index 0000000000..31aa0c3917
--- /dev/null
+++ b/cmd/dbconv/dbconv/dbconv_test.go
@@ -0,0 +1,70 @@
+package dbconv
+
+import (
+ "context"
+ "testing"
+
+ "github.com/offchainlabs/nitro/util/testhelpers"
+)
+
+func TestConversion(t *testing.T) {
+ oldDBConfig := DBConfigDefaultSrc
+ oldDBConfig.Data = t.TempDir()
+
+ newDBConfig := DBConfigDefaultDst
+ newDBConfig.Data = t.TempDir()
+
+ func() {
+ oldDb, err := openDB(&oldDBConfig, "", false)
+ defer oldDb.Close()
+ Require(t, err)
+ err = oldDb.Put([]byte{}, []byte{0xde, 0xed, 0xbe, 0xef})
+ Require(t, err)
+ for i := 0; i < 20; i++ {
+ err = oldDb.Put([]byte{byte(i)}, []byte{byte(i + 1)})
+ Require(t, err)
+ }
+ }()
+
+ config := DefaultDBConvConfig
+ config.Src = oldDBConfig
+ config.Dst = newDBConfig
+ config.IdealBatchSize = 5
+ config.Verify = "full"
+ conv := NewDBConverter(&config)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ err := conv.Convert(ctx)
+ Require(t, err)
+
+ err = conv.Verify(ctx)
+ Require(t, err)
+
+ // check if new database doesn't have any extra keys
+ oldDb, err := openDB(&oldDBConfig, "", true)
+ Require(t, err)
+ defer oldDb.Close()
+ newDb, err := openDB(&newDBConfig, "", true)
+ Require(t, err)
+ defer newDb.Close()
+ it := newDb.NewIterator(nil, nil)
+ defer it.Release()
+ for it.Next() {
+ has, err := oldDb.Has(it.Key())
+ Require(t, err)
+ if !has {
+ Fail(t, "Unexpected key in the converted db, key:", it.Key())
+ }
+ }
+}
+
+func Require(t *testing.T, err error, printables ...interface{}) {
+ t.Helper()
+ testhelpers.RequireImpl(t, err, printables...)
+}
+
+func Fail(t *testing.T, printables ...interface{}) {
+ t.Helper()
+ testhelpers.FailImpl(t, printables...)
+}
diff --git a/cmd/dbconv/dbconv/stats.go b/cmd/dbconv/dbconv/stats.go
new file mode 100644
index 0000000000..729a408f38
--- /dev/null
+++ b/cmd/dbconv/dbconv/stats.go
@@ -0,0 +1,96 @@
+package dbconv
+
+import (
+ "sync/atomic"
+ "time"
+)
+
+type Stats struct {
+ entries atomic.Int64
+ bytes atomic.Int64
+
+ startTimestamp int64
+ prevEntries int64
+ prevBytes int64
+ prevEntriesTimestamp int64
+ prevBytesTimestamp int64
+}
+
+func (s *Stats) Reset() {
+ now := time.Now().UnixNano()
+ s.entries.Store(0)
+ s.bytes.Store(0)
+ s.startTimestamp = now
+ s.prevEntries = 0
+ s.prevBytes = 0
+ s.prevEntriesTimestamp = now
+ s.prevBytesTimestamp = now
+}
+
+func (s *Stats) LogEntries(entries int64) {
+ s.entries.Add(entries)
+}
+
+func (s *Stats) Entries() int64 {
+ return s.entries.Load()
+}
+
+func (s *Stats) LogBytes(bytes int64) {
+ s.bytes.Add(bytes)
+}
+
+func (s *Stats) Bytes() int64 {
+ return s.bytes.Load()
+}
+
+func (s *Stats) Elapsed() time.Duration {
+ now := time.Now().UnixNano()
+ dt := now - s.startTimestamp
+ return time.Duration(dt)
+}
+
+// not thread safe vs itself
+func (s *Stats) EntriesPerSecond() float64 {
+ now := time.Now().UnixNano()
+ current := s.Entries()
+ dt := now - s.prevEntriesTimestamp
+ if dt == 0 {
+ dt = 1
+ }
+ de := current - s.prevEntries
+ s.prevEntries = current
+ s.prevEntriesTimestamp = now
+ return float64(de) * 1e9 / float64(dt)
+}
+
+// not thread safe vs itself
+func (s *Stats) BytesPerSecond() float64 {
+ now := time.Now().UnixNano()
+ current := s.Bytes()
+ dt := now - s.prevBytesTimestamp
+ if dt == 0 {
+ dt = 1
+ }
+ db := current - s.prevBytes
+ s.prevBytes = current
+ s.prevBytesTimestamp = now
+ return float64(db) * 1e9 / float64(dt)
+}
+
+func (s *Stats) AverageEntriesPerSecond() float64 {
+ now := time.Now().UnixNano()
+ dt := now - s.startTimestamp
+ if dt == 0 {
+ dt = 1
+ }
+ return float64(s.Entries()) * 1e9 / float64(dt)
+}
+
+func (s *Stats) AverageBytesPerSecond() float64 {
+ now := time.Now().UnixNano()
+ dt := now - s.startTimestamp
+ if dt == 0 {
+ dt = 1
+ }
+ return float64(s.Bytes()) * 1e9 / float64(dt)
+}
diff --git a/cmd/dbconv/main.go b/cmd/dbconv/main.go
new file mode 100644
index 0000000000..2d61c96552
--- /dev/null
+++ b/cmd/dbconv/main.go
@@ -0,0 +1,110 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/metrics/exp"
+ "github.com/offchainlabs/nitro/cmd/dbconv/dbconv"
+ "github.com/offchainlabs/nitro/cmd/genericconf"
+ "github.com/offchainlabs/nitro/cmd/util/confighelpers"
+ flag "github.com/spf13/pflag"
+)
+
+func parseDBConv(args []string) (*dbconv.DBConvConfig, error) {
+ f := flag.NewFlagSet("dbconv", flag.ContinueOnError)
+ dbconv.DBConvConfigAddOptions(f)
+ k, err := confighelpers.BeginCommonParse(f, args)
+ if err != nil {
+ return nil, err
+ }
+ var config dbconv.DBConvConfig
+ if err := confighelpers.EndCommonParse(k, &config); err != nil {
+ return nil, err
+ }
+ return &config, config.Validate()
+}
+
+func printSampleUsage(name string) {
+ fmt.Printf("Sample usage: %s --help \n\n", name)
+}
+
+func printProgress(conv *dbconv.DBConverter) {
+ stats := conv.Stats()
+ fmt.Printf("Progress:\n")
+ fmt.Printf("\tprocessed entries: %d\n", stats.Entries())
+ fmt.Printf("\tprocessed data (MB): %d\n", stats.Bytes()/1024/1024)
+ fmt.Printf("\telapsed:\t%v\n", stats.Elapsed())
+ fmt.Printf("\tcurrent:\t%.3e entries/s\t%.3f MB/s\n", stats.EntriesPerSecond()/1000, stats.BytesPerSecond()/1024/1024)
+ fmt.Printf("\taverage:\t%.3e entries/s\t%.3f MB/s\n", stats.AverageEntriesPerSecond()/1000, stats.AverageBytesPerSecond()/1024/1024)
+}
+
+func main() {
+ args := os.Args[1:]
+ config, err := parseDBConv(args)
+ if err != nil {
+ confighelpers.PrintErrorAndExit(err, printSampleUsage)
+ }
+
+ err = genericconf.InitLog(config.LogType, config.LogLevel, &genericconf.FileLoggingConfig{Enable: false}, nil)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error initializing logging: %v\n", err)
+ os.Exit(1)
+ }
+
+ if config.Metrics {
+ go metrics.CollectProcessMetrics(config.MetricsServer.UpdateInterval)
+ exp.Setup(fmt.Sprintf("%v:%v", config.MetricsServer.Addr, config.MetricsServer.Port))
+ }
+
+ conv := dbconv.NewDBConverter(config)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ ticker := time.NewTicker(10 * time.Second)
+ go func() {
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ printProgress(conv)
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+
+ if config.Convert {
+ err = conv.Convert(ctx)
+ if err != nil {
+ log.Error("Conversion error", "err", err)
+ os.Exit(1)
+ }
+ stats := conv.Stats()
+ log.Info("Conversion finished.", "entries", stats.Entries(), "MB", stats.Bytes()/1024/1024, "avg entries/s", fmt.Sprintf("%.3e", stats.AverageEntriesPerSecond()/1000), "avg MB/s", stats.AverageBytesPerSecond()/1024/1024, "elapsed", stats.Elapsed())
+ }
+
+ if config.Compact {
+ ticker.Stop()
+ err = conv.CompactDestination()
+ if err != nil {
+ log.Error("Compaction error", "err", err)
+ os.Exit(1)
+ }
+ }
+
+ if config.Verify != "" {
+ ticker.Reset(10 * time.Second)
+ err = conv.Verify(ctx)
+ if err != nil {
+ log.Error("Verification error", "err", err)
+ os.Exit(1)
+ }
+ stats := conv.Stats()
+ log.Info("Verification completed successfully.", "elapsed", stats.Elapsed())
+ }
+}
diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go
index d8c0aeeac4..c70ceb1d94 100644
--- a/cmd/deploy/deploy.go
+++ b/cmd/deploy/deploy.go
@@ -61,7 +61,6 @@ func main() {
authorizevalidators := flag.Uint64("authorizevalidators", 0, "Number of validators to preemptively authorize")
txTimeout := flag.Duration("txtimeout", 10*time.Minute, "Timeout when waiting for a transaction to be included in a block")
prod := flag.Bool("prod", false, "Whether to configure the rollup for production or testing")
- isUsingFeeToken := flag.Bool("isUsingFeeToken", false, "true if the chain uses custom fee token")
flag.Parse()
l1ChainId := new(big.Int).SetUint64(*l1ChainIdUint)
maxDataSize := new(big.Int).SetUint64(*maxDataSizeUint)
@@ -180,7 +179,7 @@ func main() {
defer l1Reader.StopAndWait()
nativeToken := common.HexToAddress(*nativeTokenAddressString)
- deployedAddresses, err := deploycode.DeployOnL1(
+ deployedAddresses, err := deploycode.DeployOnParentChain(
ctx,
l1Reader,
l1TransactionOpts,
@@ -190,7 +189,7 @@ func main() {
arbnode.GenerateRollupConfig(*prod, moduleRoot, ownerAddress, &chainConfig, chainConfigJson, loserEscrowAddress),
nativeToken,
maxDataSize,
- *isUsingFeeToken,
+ true,
)
if err != nil {
flag.Usage()
diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go
index a958572458..9e3ecec747 100644
--- a/cmd/nitro/init.go
+++ b/cmd/nitro/init.go
@@ -24,11 +24,12 @@ import (
"time"
"github.com/cavaliergopher/grab/v3"
- extract "github.com/codeclysm/extract/v3"
+ "github.com/codeclysm/extract/v3"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
@@ -37,7 +38,6 @@ import (
"github.com/offchainlabs/nitro/arbnode"
"github.com/offchainlabs/nitro/arbos/arbosState"
"github.com/offchainlabs/nitro/arbos/arbostypes"
- "github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/cmd/chaininfo"
"github.com/offchainlabs/nitro/cmd/conf"
"github.com/offchainlabs/nitro/cmd/ipfshelper"
@@ -46,6 +46,7 @@ import (
"github.com/offchainlabs/nitro/execution/gethexec"
"github.com/offchainlabs/nitro/statetransfer"
"github.com/offchainlabs/nitro/util/arbmath"
+ "github.com/offchainlabs/nitro/util/dbutil"
)
var notFoundError = errors.New("file not found")
@@ -300,6 +301,7 @@ func setLatestSnapshotUrl(ctx context.Context, initConfig *conf.InitConfig, chai
return fmt.Errorf("failed to parse latest mirror \"%s\": %w", initConfig.LatestBase, err)
}
latestFileUrl := baseUrl.JoinPath(chain, "latest-"+initConfig.Latest+".txt").String()
+ latestFileUrl = strings.ToLower(latestFileUrl)
latestFileBytes, err := httpGet(ctx, latestFileUrl)
if err != nil {
return fmt.Errorf("failed to get latest file at \"%s\": %w", latestFileUrl, err)
@@ -311,6 +313,7 @@ func setLatestSnapshotUrl(ctx context.Context, initConfig *conf.InitConfig, chai
} else {
initConfig.Url = baseUrl.JoinPath(latestFile).String()
}
+ initConfig.Url = strings.ToLower(initConfig.Url)
log.Info("Set latest snapshot url", "url", initConfig.Url)
return nil
}
@@ -351,12 +354,12 @@ func validateBlockChain(blockChain *core.BlockChain, chainConfig *params.ChainCo
}
// Make sure we don't allow accidentally downgrading ArbOS
if chainConfig.DebugMode() {
- if currentArbosState.ArbOSVersion() > currentArbosState.MaxDebugArbosVersionSupported() {
- return fmt.Errorf("attempted to launch node in debug mode with ArbOS version %v on ArbOS state with version %v", currentArbosState.MaxDebugArbosVersionSupported(), currentArbosState.ArbOSVersion())
+ if currentArbosState.ArbOSVersion() > arbosState.MaxDebugArbosVersionSupported {
+ return fmt.Errorf("attempted to launch node in debug mode with ArbOS version %v on ArbOS state with version %v", arbosState.MaxDebugArbosVersionSupported, currentArbosState.ArbOSVersion())
}
} else {
- if currentArbosState.ArbOSVersion() > currentArbosState.MaxArbosVersionSupported() {
- return fmt.Errorf("attempted to launch node with ArbOS version %v on ArbOS state with version %v", currentArbosState.MaxArbosVersionSupported(), currentArbosState.ArbOSVersion())
+ if currentArbosState.ArbOSVersion() > arbosState.MaxArbosVersionSupported {
+ return fmt.Errorf("attempted to launch node with ArbOS version %v on ArbOS state with version %v", arbosState.MaxArbosVersionSupported, currentArbosState.ArbOSVersion())
}
}
@@ -396,17 +399,168 @@ func checkEmptyDatabaseDir(dir string, force bool) error {
return nil
}
-var pebbleNotExistErrorRegex = regexp.MustCompile("pebble: database .* does not exist")
+func databaseIsEmpty(db ethdb.Database) bool {
+ it := db.NewIterator(nil, nil)
+ defer it.Release()
+ return !it.Next()
+}
+
+func isWasmDb(path string) bool {
+ path = strings.ToLower(path) // lowers the path to handle case-insensitive file systems
+ path = filepath.Clean(path)
+ parts := strings.Split(path, string(filepath.Separator))
+ if len(parts) >= 1 && parts[0] == "wasm" {
+ return true
+ }
+ if len(parts) >= 2 && parts[0] == "" && parts[1] == "wasm" { // Cover "/wasm" case
+ return true
+ }
+ return false
+}
-func isPebbleNotExistError(err error) bool {
- return pebbleNotExistErrorRegex.MatchString(err.Error())
+func extractSnapshot(archive string, location string, importWasm bool) error {
+ reader, err := os.Open(archive)
+ if err != nil {
+ return fmt.Errorf("couln't open init '%v' archive: %w", archive, err)
+ }
+ defer reader.Close()
+ stat, err := reader.Stat()
+ if err != nil {
+ return err
+ }
+ log.Info("extracting downloaded init archive", "size", fmt.Sprintf("%dMB", stat.Size()/1024/1024))
+ var rename extract.Renamer
+ if !importWasm {
+ rename = func(path string) string {
+ if isWasmDb(path) {
+ return "" // do not extract wasm files
+ }
+ return path
+ }
+ }
+ err = extract.Archive(context.Background(), reader, location, rename)
+ if err != nil {
+ return fmt.Errorf("couln't extract init archive '%v' err: %w", archive, err)
+ }
+ return nil
+}
+
+// removes all entries with keys prefixed with prefixes and of length used in initial version of wasm store schema
+func purgeVersion0WasmStoreEntries(db ethdb.Database) error {
+ prefixes, keyLength := rawdb.DeprecatedPrefixesV0()
+ batch := db.NewBatch()
+ notMatchingLengthKeyLogged := false
+ for _, prefix := range prefixes {
+ it := db.NewIterator(prefix, nil)
+ defer it.Release()
+ for it.Next() {
+ key := it.Key()
+ if len(key) != keyLength {
+ if !notMatchingLengthKeyLogged {
+ log.Warn("Found key with deprecated prefix but not matching length, skipping removal. (this warning is logged only once)", "key", key)
+ notMatchingLengthKeyLogged = true
+ }
+ continue
+ }
+ if err := batch.Delete(key); err != nil {
+ return fmt.Errorf("Failed to remove key %v : %w", key, err)
+ }
+
+ // Recreate the iterator after every batch commit in order
+ // to allow the underlying compactor to delete the entries.
+ if batch.ValueSize() >= ethdb.IdealBatchSize {
+ if err := batch.Write(); err != nil {
+ return fmt.Errorf("Failed to write batch: %w", err)
+ }
+ batch.Reset()
+ it.Release()
+ it = db.NewIterator(prefix, key)
+ }
+ }
+ }
+ if batch.ValueSize() > 0 {
+ if err := batch.Write(); err != nil {
+ return fmt.Errorf("Failed to write batch: %w", err)
+ }
+ batch.Reset()
+ }
+ return nil
+}
+
+// if db is not empty, validates if wasm database schema version matches current version
+// otherwise persists current version
+func validateOrUpgradeWasmStoreSchemaVersion(db ethdb.Database) error {
+ if !databaseIsEmpty(db) {
+ version, err := rawdb.ReadWasmSchemaVersion(db)
+ if err != nil {
+ if dbutil.IsErrNotFound(err) {
+ version = []byte{0}
+ } else {
+ return fmt.Errorf("Failed to retrieve wasm schema version: %w", err)
+ }
+ }
+ if len(version) != 1 || version[0] > rawdb.WasmSchemaVersion {
+ return fmt.Errorf("Unsupported wasm database schema version, current version: %v, read from wasm database: %v", rawdb.WasmSchemaVersion, version)
+ }
+ // special step for upgrading from version 0 - remove all entries added in version 0
+ if version[0] == 0 {
+ log.Warn("Detected wasm store schema version 0 - removing all old wasm store entries")
+ if err := purgeVersion0WasmStoreEntries(db); err != nil {
+ return fmt.Errorf("Failed to purge wasm store version 0 entries: %w", err)
+ }
+ log.Info("Wasm store schama version 0 entries successfully removed.")
+ }
+ }
+ rawdb.WriteWasmSchemaVersion(db)
+ return nil
}
-func isLeveldbNotExistError(err error) bool {
- return os.IsNotExist(err)
+func rebuildLocalWasm(ctx context.Context, config *gethexec.Config, l2BlockChain *core.BlockChain, chainDb, wasmDb ethdb.Database, rebuildMode string) (ethdb.Database, *core.BlockChain, error) {
+ var err error
+ latestBlock := l2BlockChain.CurrentBlock()
+ if latestBlock == nil || latestBlock.Number.Uint64() <= l2BlockChain.Config().ArbitrumChainParams.GenesisBlockNum ||
+ types.DeserializeHeaderExtraInformation(latestBlock).ArbOSFormatVersion < params.ArbosVersion_Stylus {
+ // If there is only genesis block or no blocks in the blockchain, set Rebuilding of wasm store to Done
+ // If Stylus upgrade hasn't yet happened, skipping rebuilding of wasm store
+ log.Info("Setting rebuilding of wasm store to done")
+ if err = gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingPositionKey, gethexec.RebuildingDone); err != nil {
+ return nil, nil, fmt.Errorf("unable to set rebuilding status of wasm store to done: %w", err)
+ }
+ } else if rebuildMode != "false" {
+ var position common.Hash
+ if rebuildMode == "force" {
+ log.Info("Commencing force rebuilding of wasm store by setting codehash position in rebuilding to beginning")
+ if err := gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingPositionKey, common.Hash{}); err != nil {
+ return nil, nil, fmt.Errorf("unable to initialize codehash position in rebuilding of wasm store to beginning: %w", err)
+ }
+ } else {
+ position, err = gethexec.ReadFromKeyValueStore[common.Hash](wasmDb, gethexec.RebuildingPositionKey)
+ if err != nil {
+ log.Info("Unable to get codehash position in rebuilding of wasm store, its possible it isnt initialized yet, so initializing it and starting rebuilding", "err", err)
+ if err := gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingPositionKey, common.Hash{}); err != nil {
+ return nil, nil, fmt.Errorf("unable to initialize codehash position in rebuilding of wasm store to beginning: %w", err)
+ }
+ }
+ }
+ if position != gethexec.RebuildingDone {
+ startBlockHash, err := gethexec.ReadFromKeyValueStore[common.Hash](wasmDb, gethexec.RebuildingStartBlockHashKey)
+ if err != nil {
+ log.Info("Unable to get start block hash in rebuilding of wasm store, its possible it isnt initialized yet, so initializing it to latest block hash", "err", err)
+ if err := gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingStartBlockHashKey, latestBlock.Hash()); err != nil {
+ return nil, nil, fmt.Errorf("unable to initialize start block hash in rebuilding of wasm store to latest block hash: %w", err)
+ }
+ startBlockHash = latestBlock.Hash()
+ }
+ log.Info("Starting or continuing rebuilding of wasm store", "codeHash", position, "startBlockHash", startBlockHash)
+ if err := gethexec.RebuildWasmStore(ctx, wasmDb, chainDb, config.RPC.MaxRecreateStateDepth, &config.StylusTarget, l2BlockChain, position, startBlockHash); err != nil {
+ return nil, nil, fmt.Errorf("error rebuilding of wasm store: %w", err)
+ }
+ }
+ }
+ return chainDb, l2BlockChain, nil
}
-func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeConfig, chainId *big.Int, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses) (ethdb.Database, *core.BlockChain, error) {
+func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeConfig, chainId *big.Int, cacheConfig *core.CacheConfig, targetConfig *gethexec.StylusTargetConfig, persistentConfig *conf.PersistentConfig, l1Client *ethclient.Client, rollupAddrs chaininfo.RollupAddresses) (ethdb.Database, *core.BlockChain, error) {
if !config.Init.Force {
if readOnlyDb, err := stack.OpenDatabaseWithFreezerWithExtraOptions("l2chaindata", 0, 0, config.Persistent.Ancient, "l2chaindata/", true, persistentConfig.Pebble.ExtraOptions("l2chaindata")); err == nil {
if chainConfig := gethexec.TryReadStoredChainConfig(readOnlyDb); chainConfig != nil {
@@ -418,11 +572,20 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo
if err != nil {
return nil, nil, err
}
+ if err := dbutil.UnfinishedConversionCheck(chainData); err != nil {
+ return nil, nil, fmt.Errorf("l2chaindata unfinished database conversion check error: %w", err)
+ }
wasmDb, err := stack.OpenDatabaseWithExtraOptions("wasm", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, "wasm/", false, persistentConfig.Pebble.ExtraOptions("wasm"))
if err != nil {
return nil, nil, err
}
- chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb, 1)
+ if err := validateOrUpgradeWasmStoreSchemaVersion(wasmDb); err != nil {
+ return nil, nil, err
+ }
+ if err := dbutil.UnfinishedConversionCheck(wasmDb); err != nil {
+ return nil, nil, fmt.Errorf("wasm unfinished database conversion check error: %w", err)
+ }
+ chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb, 1, targetConfig.WasmTargets())
_, err = rawdb.ParseStateScheme(cacheConfig.StateScheme, chainDb)
if err != nil {
return nil, nil, err
@@ -445,43 +608,11 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo
return chainDb, l2BlockChain, fmt.Errorf("failed to recreate missing states: %w", err)
}
}
- latestBlock := l2BlockChain.CurrentBlock()
- if latestBlock == nil || latestBlock.Number.Uint64() <= chainConfig.ArbitrumChainParams.GenesisBlockNum ||
- types.DeserializeHeaderExtraInformation(latestBlock).ArbOSFormatVersion < params.ArbosVersion_Stylus {
- // If there is only genesis block or no blocks in the blockchain, set Rebuilding of wasm store to Done
- // If Stylus upgrade hasn't yet happened, skipping rebuilding of wasm store
- log.Info("Setting rebuilding of wasm store to done")
- if err = gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingPositionKey, gethexec.RebuildingDone); err != nil {
- return nil, nil, fmt.Errorf("unable to set rebuilding status of wasm store to done: %w", err)
- }
- } else if config.Init.RebuildLocalWasm {
- position, err := gethexec.ReadFromKeyValueStore[common.Hash](wasmDb, gethexec.RebuildingPositionKey)
- if err != nil {
- log.Info("Unable to get codehash position in rebuilding of wasm store, its possible it isnt initialized yet, so initializing it and starting rebuilding", "err", err)
- if err := gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingPositionKey, common.Hash{}); err != nil {
- return nil, nil, fmt.Errorf("unable to initialize codehash position in rebuilding of wasm store to beginning: %w", err)
- }
- }
- if position != gethexec.RebuildingDone {
- startBlockHash, err := gethexec.ReadFromKeyValueStore[common.Hash](wasmDb, gethexec.RebuildingStartBlockHashKey)
- if err != nil {
- log.Info("Unable to get start block hash in rebuilding of wasm store, its possible it isnt initialized yet, so initializing it to latest block hash", "err", err)
- if err := gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingStartBlockHashKey, latestBlock.Hash()); err != nil {
- return nil, nil, fmt.Errorf("unable to initialize start block hash in rebuilding of wasm store to latest block hash: %w", err)
- }
- startBlockHash = latestBlock.Hash()
- }
- log.Info("Starting or continuing rebuilding of wasm store", "codeHash", position, "startBlockHash", startBlockHash)
- if err := gethexec.RebuildWasmStore(ctx, wasmDb, chainDb, config.Execution.RPC.MaxRecreateStateDepth, l2BlockChain, position, startBlockHash); err != nil {
- return nil, nil, fmt.Errorf("error rebuilding of wasm store: %w", err)
- }
- }
- }
- return chainDb, l2BlockChain, nil
+ return rebuildLocalWasm(ctx, &config.Execution, l2BlockChain, chainDb, wasmDb, config.Init.RebuildLocalWasm)
}
readOnlyDb.Close()
- } else if !isLeveldbNotExistError(err) && !isPebbleNotExistError(err) {
- // we only want to continue if the error is pebble or leveldb not exist error
+ } else if !dbutil.IsNotExistError(err) {
+ // we only want to continue if the database does not exist
return nil, nil, fmt.Errorf("Failed to open database: %w", err)
}
}
@@ -511,19 +642,9 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo
}
if initFile != "" {
- reader, err := os.Open(initFile)
- if err != nil {
- return nil, nil, fmt.Errorf("couln't open init '%v' archive: %w", initFile, err)
- }
- stat, err := reader.Stat()
- if err != nil {
+ if err := extractSnapshot(initFile, stack.InstanceDir(), config.Init.ImportWasm); err != nil {
return nil, nil, err
}
- log.Info("extracting downloaded init archive", "size", fmt.Sprintf("%dMB", stat.Size()/1024/1024))
- err = extract.Archive(context.Background(), reader, stack.InstanceDir(), nil)
- if err != nil {
- return nil, nil, fmt.Errorf("couln't extract init archive '%v' err:%w", initFile, err)
- }
}
var initDataReader statetransfer.InitDataReader = nil
@@ -536,7 +657,10 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo
if err != nil {
return nil, nil, err
}
- chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb, 1)
+ if err := validateOrUpgradeWasmStoreSchemaVersion(wasmDb); err != nil {
+ return nil, nil, err
+ }
+ chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb, 1, targetConfig.WasmTargets())
_, err = rawdb.ParseStateScheme(cacheConfig.StateScheme, chainDb)
if err != nil {
return nil, nil, err
@@ -706,7 +830,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo
return chainDb, l2BlockChain, err
}
- return chainDb, l2BlockChain, nil
+ return rebuildLocalWasm(ctx, &config.Execution, l2BlockChain, chainDb, wasmDb, config.Init.RebuildLocalWasm)
}
func testTxIndexUpdated(chainDb ethdb.Database, lastBlock uint64) bool {
@@ -752,6 +876,7 @@ func testUpdateTxIndex(chainDb ethdb.Database, chainConfig *params.ChainConfig,
localWg.Add(1)
go func() {
batch := chainDb.NewBatch()
+ // #nosec G115
for blockNum := uint64(thread); blockNum <= lastBlock; blockNum += uint64(threads) {
blockHash := rawdb.ReadCanonicalHash(chainDb, blockNum)
block := rawdb.ReadBlock(chainDb, blockHash, blockNum)
diff --git a/cmd/nitro/init_test.go b/cmd/nitro/init_test.go
index 0797ac9b46..48d969f053 100644
--- a/cmd/nitro/init_test.go
+++ b/cmd/nitro/init_test.go
@@ -4,30 +4,36 @@
package main
import (
+ "archive/tar"
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
+ "io"
"math/big"
"net"
"net/http"
"os"
"path"
"path/filepath"
+ "slices"
"strings"
"testing"
"time"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethclient"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/node"
+ "github.com/google/go-cmp/cmp"
"github.com/offchainlabs/nitro/arbnode"
"github.com/offchainlabs/nitro/cmd/chaininfo"
"github.com/offchainlabs/nitro/cmd/conf"
"github.com/offchainlabs/nitro/execution/gethexec"
"github.com/offchainlabs/nitro/util/testhelpers"
+ "github.com/offchainlabs/nitro/util/testhelpers/env"
)
const (
@@ -207,6 +213,7 @@ func TestSetLatestSnapshotUrl(t *testing.T) {
testCases := []struct {
name string
+ chain string
latestContents string
wantUrl func(string) string
}{
@@ -230,6 +237,12 @@ func TestSetLatestSnapshotUrl(t *testing.T) {
latestContents: "https://some.domain.com/arb1/2024/21/archive.tar.gz",
wantUrl: func(serverAddr string) string { return "https://some.domain.com/arb1/2024/21/archive.tar.gz" },
},
+ {
+ name: "chain and contents with upper case",
+ chain: "ARB1",
+ latestContents: "ARB1/2024/21/ARCHIVE.TAR.GZ",
+ wantUrl: func(serverAddr string) string { return serverAddr + "/arb1/2024/21/archive.tar.gz" },
+ },
}
for _, testCase := range testCases {
@@ -237,6 +250,7 @@ func TestSetLatestSnapshotUrl(t *testing.T) {
// Create latest file
serverDir := t.TempDir()
+
err := os.Mkdir(filepath.Join(serverDir, chain), dirPerm)
Require(t, err)
err = os.WriteFile(filepath.Join(serverDir, chain, latestFile), []byte(testCase.latestContents), filePerm)
@@ -251,7 +265,11 @@ func TestSetLatestSnapshotUrl(t *testing.T) {
initConfig := conf.InitConfigDefault
initConfig.Latest = snapshotKind
initConfig.LatestBase = addr
- err = setLatestSnapshotUrl(ctx, &initConfig, chain)
+ configChain := testCase.chain
+ if configChain == "" {
+ configChain = chain
+ }
+ err = setLatestSnapshotUrl(ctx, &initConfig, configChain)
Require(t, err)
// Check url
@@ -286,38 +304,6 @@ func startFileServer(t *testing.T, ctx context.Context, dir string) string {
return addr
}
-func testIsNotExistError(t *testing.T, dbEngine string, isNotExist func(error) bool) {
- stackConf := node.DefaultConfig
- stackConf.DataDir = t.TempDir()
- stackConf.DBEngine = dbEngine
- stack, err := node.New(&stackConf)
- if err != nil {
- t.Fatalf("Failed to created test stack: %v", err)
- }
- defer stack.Close()
- readonly := true
- _, err = stack.OpenDatabaseWithExtraOptions("test", 16, 16, "", readonly, nil)
- if err == nil {
- t.Fatal("Opening non-existent database did not fail")
- }
- if !isNotExist(err) {
- t.Fatalf("Failed to classify error as not exist error - internal implementation of OpenDatabaseWithExtraOptions might have changed, err: %v", err)
- }
- err = errors.New("some other error")
- if isNotExist(err) {
- t.Fatalf("Classified other error as not exist, err: %v", err)
- }
-}
-
-func TestIsNotExistError(t *testing.T) {
- t.Run("TestIsPebbleNotExistError", func(t *testing.T) {
- testIsNotExistError(t, "pebble", isPebbleNotExistError)
- })
- t.Run("TestIsLeveldbNotExistError", func(t *testing.T) {
- testIsNotExistError(t, "leveldb", isLeveldbNotExistError)
- })
-}
-
func TestEmptyDatabaseDir(t *testing.T) {
testCases := []struct {
name string
@@ -368,6 +354,12 @@ func TestEmptyDatabaseDir(t *testing.T) {
}
}
+func defaultStylusTargetConfigForTest(t *testing.T) *gethexec.StylusTargetConfig {
+ targetConfig := gethexec.DefaultStylusTargetConfig
+ Require(t, targetConfig.Validate())
+ return &targetConfig
+}
+
func TestOpenInitializeChainDbIncompatibleStateScheme(t *testing.T) {
t.Parallel()
@@ -395,6 +387,7 @@ func TestOpenInitializeChainDbIncompatibleStateScheme(t *testing.T) {
&nodeConfig,
new(big.Int).SetUint64(nodeConfig.Chain.ID),
gethexec.DefaultCacheConfigFor(stack, &nodeConfig.Execution.Caching),
+ defaultStylusTargetConfigForTest(t),
&nodeConfig.Persistent,
l1Client,
chaininfo.RollupAddresses{},
@@ -411,6 +404,7 @@ func TestOpenInitializeChainDbIncompatibleStateScheme(t *testing.T) {
&nodeConfig,
new(big.Int).SetUint64(nodeConfig.Chain.ID),
gethexec.DefaultCacheConfigFor(stack, &nodeConfig.Execution.Caching),
+ defaultStylusTargetConfigForTest(t),
&nodeConfig.Persistent,
l1Client,
chaininfo.RollupAddresses{},
@@ -428,6 +422,7 @@ func TestOpenInitializeChainDbIncompatibleStateScheme(t *testing.T) {
&nodeConfig,
new(big.Int).SetUint64(nodeConfig.Chain.ID),
gethexec.DefaultCacheConfigFor(stack, &nodeConfig.Execution.Caching),
+ defaultStylusTargetConfigForTest(t),
&nodeConfig.Persistent,
l1Client,
chaininfo.RollupAddresses{},
@@ -436,3 +431,289 @@ func TestOpenInitializeChainDbIncompatibleStateScheme(t *testing.T) {
t.Fatalf("Failed to detect incompatible state scheme")
}
}
+
+func writeKeys(t *testing.T, db ethdb.Database, keys [][]byte) {
+ t.Helper()
+ batch := db.NewBatch()
+ for _, key := range keys {
+ err := batch.Put(key, []byte("some data"))
+ if err != nil {
+ t.Fatal("Internal test error - failed to insert key:", err)
+ }
+ }
+ err := batch.Write()
+ if err != nil {
+ t.Fatal("Internal test error - failed to write batch:", err)
+ }
+ batch.Reset()
+}
+
+func checkKeys(t *testing.T, db ethdb.Database, keys [][]byte, shouldExist bool) {
+ t.Helper()
+ for _, key := range keys {
+ has, err := db.Has(key)
+ if err != nil {
+ t.Fatal("Failed to check key existence, key: ", key)
+ }
+ if shouldExist && !has {
+ t.Fatal("Key not found:", key)
+ }
+ if !shouldExist && has {
+ t.Fatal("Key found:", key, "k3:", string(key[:3]), "len", len(key))
+ }
+ }
+}
+
+func TestPurgeVersion0WasmStoreEntries(t *testing.T) {
+ stackConf := node.DefaultConfig
+ stackConf.DataDir = t.TempDir()
+ stack, err := node.New(&stackConf)
+ if err != nil {
+ t.Fatalf("Failed to create test stack: %v", err)
+ }
+ defer stack.Close()
+ db, err := stack.OpenDatabaseWithExtraOptions("wasm", NodeConfigDefault.Execution.Caching.DatabaseCache, NodeConfigDefault.Persistent.Handles, "wasm/", false, nil)
+ if err != nil {
+ t.Fatalf("Failed to open test db: %v", err)
+ }
+ var version0Keys [][]byte
+ for i := 0; i < 20; i++ {
+ version0Keys = append(version0Keys,
+ append([]byte{0x00, 'w', 'a'}, testhelpers.RandomSlice(32)...))
+ version0Keys = append(version0Keys,
+ append([]byte{0x00, 'w', 'm'}, testhelpers.RandomSlice(32)...))
+ }
+ var collidedKeys [][]byte
+ for i := 0; i < 5; i++ {
+ collidedKeys = append(collidedKeys,
+ append([]byte{0x00, 'w', 'a'}, testhelpers.RandomSlice(31)...))
+ collidedKeys = append(collidedKeys,
+ append([]byte{0x00, 'w', 'm'}, testhelpers.RandomSlice(31)...))
+ collidedKeys = append(collidedKeys,
+ append([]byte{0x00, 'w', 'a'}, testhelpers.RandomSlice(33)...))
+ collidedKeys = append(collidedKeys,
+ append([]byte{0x00, 'w', 'm'}, testhelpers.RandomSlice(33)...))
+ }
+ var otherKeys [][]byte
+ for i := 0x00; i <= 0xff; i++ {
+ if byte(i) == 'a' || byte(i) == 'm' {
+ continue
+ }
+ otherKeys = append(otherKeys,
+ append([]byte{0x00, 'w', byte(i)}, testhelpers.RandomSlice(32)...))
+ otherKeys = append(otherKeys,
+ append([]byte{0x00, 'w', byte(i)}, testhelpers.RandomSlice(32)...))
+ }
+ for i := 0; i < 10; i++ {
+ var randomSlice []byte
+ var j int
+ for j = 0; j < 10; j++ {
+ randomSlice = testhelpers.RandomSlice(testhelpers.RandomUint64(1, 40))
+ if len(randomSlice) >= 3 && !bytes.Equal(randomSlice[:3], []byte{0x00, 'w', 'm'}) && !bytes.Equal(randomSlice[:3], []byte{0x00, 'w', 'm'}) {
+ break
+ }
+ }
+ if j == 10 {
+ t.Fatal("Internal test error - failed to generate random key")
+ }
+ otherKeys = append(otherKeys, randomSlice)
+ }
+ writeKeys(t, db, version0Keys)
+ writeKeys(t, db, collidedKeys)
+ writeKeys(t, db, otherKeys)
+ checkKeys(t, db, version0Keys, true)
+ checkKeys(t, db, collidedKeys, true)
+ checkKeys(t, db, otherKeys, true)
+ err = purgeVersion0WasmStoreEntries(db)
+ if err != nil {
+ t.Fatal("Failed to purge version 0 keys, err:", err)
+ }
+ checkKeys(t, db, version0Keys, false)
+ checkKeys(t, db, collidedKeys, true)
+ checkKeys(t, db, otherKeys, true)
+}
+
+func TestOpenInitializeChainDbEmptyInit(t *testing.T) {
+ t.Parallel()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ stackConfig := testhelpers.CreateStackConfigForTest(t.TempDir())
+ stack, err := node.New(stackConfig)
+ defer stack.Close()
+ Require(t, err)
+
+ nodeConfig := NodeConfigDefault
+ nodeConfig.Execution.Caching.StateScheme = env.GetTestStateScheme()
+ nodeConfig.Chain.ID = 42161
+ nodeConfig.Node = *arbnode.ConfigDefaultL2Test()
+ nodeConfig.Init.Empty = true
+
+ l1Client := ethclient.NewClient(stack.Attach())
+
+ chainDb, blockchain, err := openInitializeChainDb(
+ ctx,
+ stack,
+ &nodeConfig,
+ new(big.Int).SetUint64(nodeConfig.Chain.ID),
+ gethexec.DefaultCacheConfigFor(stack, &nodeConfig.Execution.Caching),
+ defaultStylusTargetConfigForTest(t),
+ &nodeConfig.Persistent,
+ l1Client,
+ chaininfo.RollupAddresses{},
+ )
+ Require(t, err)
+ blockchain.Stop()
+ err = chainDb.Close()
+ Require(t, err)
+}
+
+func TestExtractSnapshot(t *testing.T) {
+ testCases := []struct {
+ name string
+ archiveFiles []string
+ importWasm bool
+ wantFiles []string
+ }{
+ {
+ name: "extractAll",
+ importWasm: true,
+ archiveFiles: []string{
+ "arbitrumdata/000001.ldb",
+ "l2chaindata/000001.ldb",
+ "l2chaindata/ancients/000001.ldb",
+ "nodes/000001.ldb",
+ "wasm/000001.ldb",
+ },
+ wantFiles: []string{
+ "arbitrumdata/000001.ldb",
+ "l2chaindata/000001.ldb",
+ "l2chaindata/ancients/000001.ldb",
+ "nodes/000001.ldb",
+ "wasm/000001.ldb",
+ },
+ },
+ {
+ name: "extractAllButWasm",
+ importWasm: false,
+ archiveFiles: []string{
+ "arbitrumdata/000001.ldb",
+ "l2chaindata/000001.ldb",
+ "nodes/000001.ldb",
+ "wasm/000001.ldb",
+ },
+ wantFiles: []string{
+ "arbitrumdata/000001.ldb",
+ "l2chaindata/000001.ldb",
+ "nodes/000001.ldb",
+ },
+ },
+ {
+ name: "extractAllButWasmWithPrefixDot",
+ importWasm: false,
+ archiveFiles: []string{
+ "./arbitrumdata/000001.ldb",
+ "./l2chaindata/000001.ldb",
+ "./nodes/000001.ldb",
+ "./wasm/000001.ldb",
+ },
+ wantFiles: []string{
+ "arbitrumdata/000001.ldb",
+ "l2chaindata/000001.ldb",
+ "nodes/000001.ldb",
+ },
+ },
+ }
+ for _, testCase := range testCases {
+ t.Run(testCase.name, func(t *testing.T) {
+ // Create archive with dummy files
+ archiveDir := t.TempDir()
+ archivePath := path.Join(archiveDir, "archive.tar")
+ {
+ // Create context to close the file handlers
+ archiveFile, err := os.Create(archivePath)
+ Require(t, err)
+ defer archiveFile.Close()
+ tarWriter := tar.NewWriter(archiveFile)
+ defer tarWriter.Close()
+ for _, relativePath := range testCase.archiveFiles {
+ filePath := path.Join(archiveDir, relativePath)
+ dir := filepath.Dir(filePath)
+ const dirPerm = 0700
+ err := os.MkdirAll(dir, dirPerm)
+ Require(t, err)
+ const filePerm = 0600
+ err = os.WriteFile(filePath, []byte{0xbe, 0xef}, filePerm)
+ Require(t, err)
+ file, err := os.Open(filePath)
+ Require(t, err)
+ info, err := file.Stat()
+ Require(t, err)
+ header, err := tar.FileInfoHeader(info, "")
+ Require(t, err)
+ header.Name = relativePath
+ err = tarWriter.WriteHeader(header)
+ Require(t, err)
+ _, err = io.Copy(tarWriter, file)
+ Require(t, err)
+ }
+ }
+
+ // Extract archive and compare contents
+ targetDir := t.TempDir()
+ err := extractSnapshot(archivePath, targetDir, testCase.importWasm)
+ Require(t, err, "failed to extract snapshot")
+ gotFiles := []string{}
+ err = filepath.WalkDir(targetDir, func(path string, d os.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if !d.IsDir() {
+ gotFiles = append(gotFiles, path)
+ }
+ return nil
+ })
+ Require(t, err)
+ slices.Sort(gotFiles)
+ for i, f := range testCase.wantFiles {
+ testCase.wantFiles[i] = path.Join(targetDir, f)
+ }
+ if diff := cmp.Diff(gotFiles, testCase.wantFiles); diff != "" {
+ t.Fatal("extracted files don't match", diff)
+ }
+ })
+ }
+}
+
+func TestIsWasmDb(t *testing.T) {
+ testCases := []struct {
+ path string
+ want bool
+ }{
+ {"wasm", true},
+ {"wasm/", true},
+ {"wasm/something", true},
+ {"/wasm", true},
+ {"./wasm", true},
+ {"././wasm", true},
+ {"/./wasm", true},
+ {"WASM", true},
+ {"wAsM", true},
+ {"nitro/../wasm", true},
+ {"/nitro/../wasm", true},
+ {".//nitro/.//../wasm", true},
+ {"not-wasm", false},
+ {"l2chaindata/example@@", false},
+ {"somedir/wasm", false},
+ }
+ for _, testCase := range testCases {
+ name := fmt.Sprintf("%q", testCase.path)
+ t.Run(name, func(t *testing.T) {
+ got := isWasmDb(testCase.path)
+ if testCase.want != got {
+ t.Fatalf("want %v, but got %v", testCase.want, got)
+ }
+ })
+ }
+}
diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go
index 2c7d07cf3b..bc2155a475 100644
--- a/cmd/nitro/nitro.go
+++ b/cmd/nitro/nitro.go
@@ -62,6 +62,7 @@ import (
"github.com/offchainlabs/nitro/staker"
"github.com/offchainlabs/nitro/staker/validatorwallet"
"github.com/offchainlabs/nitro/util/colors"
+ "github.com/offchainlabs/nitro/util/dbutil"
"github.com/offchainlabs/nitro/util/headerreader"
"github.com/offchainlabs/nitro/util/iostat"
"github.com/offchainlabs/nitro/util/rpcclient"
@@ -237,6 +238,10 @@ func mainImpl() int {
if nodeConfig.Execution.Sequencer.Enable != nodeConfig.Node.Sequencer {
log.Error("consensus and execution must agree if sequencing is enabled or not", "Execution.Sequencer.Enable", nodeConfig.Execution.Sequencer.Enable, "Node.Sequencer", nodeConfig.Node.Sequencer)
}
+ if nodeConfig.Node.SeqCoordinator.Enable && !nodeConfig.Node.ParentChainReader.Enable {
+ log.Error("Sequencer coordinator must be enabled with parent chain reader, try starting node with --parent-chain.connection.url")
+ return 1
+ }
var dataSigner signature.DataSignerFunc
var l1TransactionOptsValidator *bind.TransactOpts
@@ -244,7 +249,7 @@ func mainImpl() int {
// If sequencer and signing is enabled or batchposter is enabled without
// external signing sequencer will need a key.
sequencerNeedsKey := (nodeConfig.Node.Sequencer && !nodeConfig.Node.Feed.Output.DisableSigning) ||
- (nodeConfig.Node.BatchPoster.Enable && nodeConfig.Node.BatchPoster.DataPoster.ExternalSigner.URL == "")
+ (nodeConfig.Node.BatchPoster.Enable && (nodeConfig.Node.BatchPoster.DataPoster.ExternalSigner.URL == "" || nodeConfig.Node.DataAvailability.Enable))
validatorNeedsKey := nodeConfig.Node.Staker.OnlyCreateWalletContract ||
(nodeConfig.Node.Staker.Enable && !strings.EqualFold(nodeConfig.Node.Staker.Strategy, "watchtower") && nodeConfig.Node.Staker.DataPoster.ExternalSigner.URL == "")
@@ -366,7 +371,21 @@ func mainImpl() int {
if err != nil {
log.Crit("error getting rollup addresses config", "err", err)
}
- addr, err := validatorwallet.GetValidatorWalletContract(ctx, deployInfo.ValidatorWalletCreator, int64(deployInfo.DeployedAt), l1TransactionOptsValidator, l1Reader, true)
+
+ dataPoster, err := arbnode.DataposterOnlyUsedToCreateValidatorWalletContract(
+ ctx,
+ l1Reader,
+ l1TransactionOptsValidator,
+ &nodeConfig.Node.Staker.DataPoster,
+ new(big.Int).SetUint64(nodeConfig.ParentChain.ID),
+ )
+ if err != nil {
+ log.Crit("error creating data poster to create validator wallet contract", "err", err)
+ }
+ getExtraGas := func() uint64 { return nodeConfig.Node.Staker.ExtraGas }
+
+ // #nosec G115
+ addr, err := validatorwallet.GetValidatorWalletContract(ctx, deployInfo.ValidatorWalletCreator, int64(deployInfo.DeployedAt), l1Reader, true, dataPoster, getExtraGas)
if err != nil {
log.Crit("error creating validator wallet contract", "error", err, "address", l1TransactionOptsValidator.From.Hex())
}
@@ -418,65 +437,13 @@ func mainImpl() int {
// Check that node is compatible with on-chain WASM module root on startup and before any ArbOS upgrades take effect to prevent divergences
if nodeConfig.Node.ParentChainReader.Enable && nodeConfig.Validation.Wasm.EnableWasmrootsCheck {
- // Fetch current on-chain WASM module root
- rollupUserLogic, err := rollupgen.NewRollupUserLogic(rollupAddrs.Rollup, l1Client)
+ err := checkWasmModuleRootCompatibility(ctx, nodeConfig.Validation.Wasm, l1Client, rollupAddrs)
if err != nil {
- log.Error("failed to create rollupUserLogic", "err", err)
- return 1
- }
- moduleRoot, err := rollupUserLogic.WasmModuleRoot(&bind.CallOpts{Context: ctx})
- if err != nil {
- log.Error("failed to get on-chain WASM module root", "err", err)
- return 1
- }
- if (moduleRoot == common.Hash{}) {
- log.Error("on-chain WASM module root is zero")
- return 1
- }
- // Check if the on-chain WASM module root belongs to the set of allowed module roots
- allowedWasmModuleRoots := nodeConfig.Validation.Wasm.AllowedWasmModuleRoots
- if len(allowedWasmModuleRoots) > 0 {
- moduleRootMatched := false
- for _, root := range allowedWasmModuleRoots {
- bytes, err := hex.DecodeString(strings.TrimPrefix(root, "0x"))
- if err == nil {
- if common.HexToHash(root) == common.BytesToHash(bytes) {
- moduleRootMatched = true
- break
- }
- continue
- }
- locator, locatorErr := server_common.NewMachineLocator(root)
- if locatorErr != nil {
- log.Warn("allowed-wasm-module-roots: value not a hex nor valid path:", "value", root, "locatorErr", locatorErr, "decodeErr", err)
- continue
- }
- path := locator.GetMachinePath(moduleRoot)
- if _, err := os.Stat(path); err == nil {
- moduleRootMatched = true
- break
- }
- }
- if !moduleRootMatched {
- log.Error("on-chain WASM module root did not match with any of the allowed WASM module roots")
- return 1
- }
- } else {
- // If no allowed module roots were provided in config, check if we have a validator machine directory for the on-chain WASM module root
- locator, err := server_common.NewMachineLocator(nodeConfig.Validation.Wasm.RootPath)
- if err != nil {
- log.Warn("failed to create machine locator. Skipping the check for compatibility with on-chain WASM module root", "err", err)
- } else {
- path := locator.GetMachinePath(moduleRoot)
- if _, err := os.Stat(path); err != nil {
- log.Error("unable to find validator machine directory for the on-chain WASM module root", "err", err)
- return 1
- }
- }
+ log.Warn("failed to check if node is compatible with on-chain WASM module root", "err", err)
}
}
- chainDb, l2BlockChain, err := openInitializeChainDb(ctx, stack, nodeConfig, new(big.Int).SetUint64(nodeConfig.Chain.ID), gethexec.DefaultCacheConfigFor(stack, &nodeConfig.Execution.Caching), &nodeConfig.Persistent, l1Client, rollupAddrs)
+ chainDb, l2BlockChain, err := openInitializeChainDb(ctx, stack, nodeConfig, new(big.Int).SetUint64(nodeConfig.Chain.ID), gethexec.DefaultCacheConfigFor(stack, &nodeConfig.Execution.Caching), &nodeConfig.Execution.StylusTarget, &nodeConfig.Persistent, l1Client, rollupAddrs)
if l2BlockChain != nil {
deferFuncs = append(deferFuncs, func() { l2BlockChain.Stop() })
}
@@ -494,6 +461,10 @@ func mainImpl() int {
log.Error("database is corrupt; delete it and try again", "database-directory", stack.InstanceDir())
return 1
}
+ if err := dbutil.UnfinishedConversionCheck(arbDb); err != nil {
+ log.Error("arbitrumdata unfinished conversion check error", "err", err)
+ return 1
+ }
fatalErrChan := make(chan error, 10)
@@ -501,6 +472,10 @@ func mainImpl() int {
if nodeConfig.BlocksReExecutor.Enable && l2BlockChain != nil {
blocksReExecutor = blocksreexecutor.New(&nodeConfig.BlocksReExecutor, l2BlockChain, fatalErrChan)
if nodeConfig.Init.ThenQuit {
+ if err := gethexec.PopulateStylusTargetCache(&nodeConfig.Execution.StylusTarget); err != nil {
+ log.Error("error populating stylus target cache", "err", err)
+ return 1
+ }
success := make(chan struct{})
blocksReExecutor.Start(ctx, success)
deferFuncs = append(deferFuncs, func() { blocksReExecutor.StopAndWait() })
@@ -573,7 +548,7 @@ func mainImpl() int {
l1TransactionOptsBatchPoster,
dataSigner,
fatalErrChan,
- big.NewInt(int64(nodeConfig.ParentChain.ID)),
+ new(big.Int).SetUint64(nodeConfig.ParentChain.ID),
blobReader,
)
if err != nil {
@@ -869,6 +844,7 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa
l2ChainInfoIpfsDownloadPath := k.String("chain.info-ipfs-download-path")
l2ChainInfoFiles := k.Strings("chain.info-files")
l2ChainInfoJson := k.String("chain.info-json")
+ // #nosec G115
err = applyChainParameters(ctx, k, uint64(l2ChainId), l2ChainName, l2ChainInfoFiles, l2ChainInfoJson, l2ChainInfoIpfsUrl, l2ChainInfoIpfsDownloadPath)
if err != nil {
return nil, nil, err
@@ -891,10 +867,12 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa
// Don't print wallet passwords
if nodeConfig.Conf.Dump {
err = confighelpers.DumpConfig(k, map[string]interface{}{
- "parent-chain.wallet.password": "",
- "parent-chain.wallet.private-key": "",
- "chain.dev-wallet.password": "",
- "chain.dev-wallet.private-key": "",
+ "node.batch-poster.parent-chain-wallet.password": "",
+ "node.batch-poster.parent-chain-wallet.private-key": "",
+ "node.staker.parent-chain-wallet.password": "",
+ "node.staker.parent-chain-wallet.private-key": "",
+ "chain.dev-wallet.password": "",
+ "chain.dev-wallet.private-key": "",
})
if err != nil {
return nil, nil, err
@@ -1012,13 +990,16 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c
func initReorg(initConfig conf.InitConfig, chainConfig *params.ChainConfig, inboxTracker *arbnode.InboxTracker) error {
var batchCount uint64
if initConfig.ReorgToBatch >= 0 {
+ // #nosec G115
batchCount = uint64(initConfig.ReorgToBatch) + 1
} else {
var messageIndex arbutil.MessageIndex
if initConfig.ReorgToMessageBatch >= 0 {
+ // #nosec G115
messageIndex = arbutil.MessageIndex(initConfig.ReorgToMessageBatch)
} else if initConfig.ReorgToBlockBatch > 0 {
genesis := chainConfig.ArbitrumChainParams.GenesisBlockNum
+ // #nosec G115
blockNum := uint64(initConfig.ReorgToBlockBatch)
if blockNum < genesis {
return fmt.Errorf("ReorgToBlockBatch %d before genesis %d", blockNum, genesis)
@@ -1029,14 +1010,15 @@ func initReorg(initConfig conf.InitConfig, chainConfig *params.ChainConfig, inbo
return nil
}
// Reorg out the batch containing the next message
- var missing bool
+ var found bool
var err error
- batchCount, missing, err = inboxTracker.FindInboxBatchContainingMessage(messageIndex + 1)
+ batchCount, found, err = inboxTracker.FindInboxBatchContainingMessage(messageIndex + 1)
if err != nil {
return err
}
- if missing {
- return fmt.Errorf("cannot reorg to unknown message index %v", messageIndex)
+ if !found {
+ log.Warn("init-reorg: no need to reorg, because message ahead of chain", "messageIndex", messageIndex)
+ return nil
}
}
return inboxTracker.ReorgBatchesTo(batchCount)
@@ -1049,3 +1031,57 @@ type NodeConfigFetcher struct {
func (f *NodeConfigFetcher) Get() *arbnode.Config {
return &f.LiveConfig.Get().Node
}
+
+func checkWasmModuleRootCompatibility(ctx context.Context, wasmConfig valnode.WasmConfig, l1Client *ethclient.Client, rollupAddrs chaininfo.RollupAddresses) error {
+ // Fetch current on-chain WASM module root
+ rollupUserLogic, err := rollupgen.NewRollupUserLogic(rollupAddrs.Rollup, l1Client)
+ if err != nil {
+ return fmt.Errorf("failed to create RollupUserLogic: %w", err)
+ }
+ moduleRoot, err := rollupUserLogic.WasmModuleRoot(&bind.CallOpts{Context: ctx})
+ if err != nil {
+ return fmt.Errorf("failed to get on-chain WASM module root: %w", err)
+ }
+ if (moduleRoot == common.Hash{}) {
+ return errors.New("on-chain WASM module root is zero")
+ }
+ // Check if the on-chain WASM module root belongs to the set of allowed module roots
+ allowedWasmModuleRoots := wasmConfig.AllowedWasmModuleRoots
+ if len(allowedWasmModuleRoots) > 0 {
+ moduleRootMatched := false
+ for _, root := range allowedWasmModuleRoots {
+ bytes, err := hex.DecodeString(strings.TrimPrefix(root, "0x"))
+ if err == nil {
+ if common.HexToHash(root) == common.BytesToHash(bytes) {
+ moduleRootMatched = true
+ break
+ }
+ continue
+ }
+ locator, locatorErr := server_common.NewMachineLocator(root)
+ if locatorErr != nil {
+ log.Warn("allowed-wasm-module-roots: value not a hex nor valid path:", "value", root, "locatorErr", locatorErr, "decodeErr", err)
+ continue
+ }
+ path := locator.GetMachinePath(moduleRoot)
+ if _, err := os.Stat(path); err == nil {
+ moduleRootMatched = true
+ break
+ }
+ }
+ if !moduleRootMatched {
+ return errors.New("on-chain WASM module root did not match with any of the allowed WASM module roots")
+ }
+ } else {
+ // If no allowed module roots were provided in config, check if we have a validator machine directory for the on-chain WASM module root
+ locator, err := server_common.NewMachineLocator(wasmConfig.RootPath)
+ if err != nil {
+ return fmt.Errorf("failed to create machine locator: %w", err)
+ }
+ path := locator.GetMachinePath(moduleRoot)
+ if _, err := os.Stat(path); err != nil {
+ return fmt.Errorf("unable to find validator machine directory for the on-chain WASM module root: %w", err)
+ }
+ }
+ return nil
+}
diff --git a/cmd/pruning/pruning.go b/cmd/pruning/pruning.go
index 096bb4b1ae..0755f5ff9e 100644
--- a/cmd/pruning/pruning.go
+++ b/cmd/pruning/pruning.go
@@ -15,6 +15,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/pruner"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
@@ -80,7 +81,7 @@ func (r *importantRoots) addHeader(header *types.Header, overwrite bool) error {
var hashListRegex = regexp.MustCompile("^(0x)?[0-9a-fA-F]{64}(,(0x)?[0-9a-fA-F]{64})*$")
// Finds important roots to retain while proving
-func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) ([]common.Hash, error) {
+func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client *ethclient.Client, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) ([]common.Hash, error) {
chainConfig := gethexec.TryReadStoredChainConfig(chainDb)
if chainConfig == nil {
return nil, errors.New("database doesn't have a chain config (was this node initialized?)")
@@ -212,6 +213,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node
}
if meta.ParentChainBlock <= l1BlockNum {
signedBlockNum := arbutil.MessageCountToBlockNumber(meta.MessageCount, genesisNum)
+ // #nosec G115
blockNum := uint64(signedBlockNum)
l2Hash := rawdb.ReadCanonicalHash(chainDb, blockNum)
l2Header := rawdb.ReadHeader(chainDb, l2Hash, blockNum)
@@ -232,7 +234,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node
return roots.roots, nil
}
-func PruneChainDb(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) error {
+func PruneChainDb(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client *ethclient.Client, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) error {
if cacheConfig.StateScheme == rawdb.PathScheme {
return nil
}
diff --git a/cmd/staterecovery/staterecovery.go b/cmd/staterecovery/staterecovery.go
index bb01477414..5486ba3726 100644
--- a/cmd/staterecovery/staterecovery.go
+++ b/cmd/staterecovery/staterecovery.go
@@ -60,6 +60,7 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon
break
}
if time.Since(logged) > 1*time.Minute {
+ // #nosec G115
log.Info("Recreating missing states", "block", current, "target", target, "remaining", int64(target)-int64(current), "elapsed", time.Since(start), "recreated", recreated)
logged = time.Now()
}
diff --git a/cmd/util/confighelpers/configuration.go b/cmd/util/confighelpers/configuration.go
index 55c9ec330f..19b5b1a24c 100644
--- a/cmd/util/confighelpers/configuration.go
+++ b/cmd/util/confighelpers/configuration.go
@@ -13,7 +13,6 @@ import (
"github.com/knadh/koanf"
"github.com/knadh/koanf/parsers/json"
- koanfjson "github.com/knadh/koanf/parsers/json"
"github.com/knadh/koanf/providers/confmap"
"github.com/knadh/koanf/providers/env"
"github.com/knadh/koanf/providers/file"
@@ -215,15 +214,17 @@ func devFlagArgs() []string {
}
func BeginCommonParse(f *flag.FlagSet, args []string) (*koanf.Koanf, error) {
+ var expandedArgs []string
for _, arg := range args {
if arg == "--version" || arg == "-v" {
return nil, ErrVersion
} else if arg == "--dev" {
- args = devFlagArgs()
- break
+ expandedArgs = append(expandedArgs, devFlagArgs()...)
+ } else {
+ expandedArgs = append(expandedArgs, arg)
}
}
- if err := f.Parse(args); err != nil {
+ if err := f.Parse(expandedArgs); err != nil {
return nil, err
}
@@ -305,7 +306,7 @@ func DumpConfig(k *koanf.Koanf, extraOverrideFields map[string]interface{}) erro
return fmt.Errorf("error removing extra parameters before dump: %w", err)
}
- c, err := k.Marshal(koanfjson.Parser())
+ c, err := k.Marshal(json.Parser())
if err != nil {
return fmt.Errorf("unable to marshal config file to JSON: %w", err)
}
diff --git a/contracts b/contracts
index f7894d3a6d..b140ed63ac 160000
--- a/contracts
+++ b/contracts
@@ -1 +1 @@
-Subproject commit f7894d3a6d4035ba60f51a7f1334f0f2d4f02dce
+Subproject commit b140ed63acdb53cb906ffd1fa3c36fdbd474364e
diff --git a/das/aggregator.go b/das/aggregator.go
index d944f8d48a..372e448e76 100644
--- a/das/aggregator.go
+++ b/das/aggregator.go
@@ -15,11 +15,11 @@ import (
flag "github.com/spf13/pflag"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/offchainlabs/nitro/arbstate/daprovider"
- "github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/blsSignatures"
"github.com/offchainlabs/nitro/das/dastree"
"github.com/offchainlabs/nitro/solgen/go/bridgegen"
@@ -114,7 +114,7 @@ func NewAggregator(ctx context.Context, config DataAvailabilityConfig, services
func NewAggregatorWithL1Info(
config DataAvailabilityConfig,
services []ServiceDetails,
- l1client arbutil.L1Interface,
+ l1client *ethclient.Client,
seqInboxAddress common.Address,
) (*Aggregator, error) {
seqInboxCaller, err := bridgegen.NewSequencerInboxCaller(seqInboxAddress, l1client)
@@ -130,6 +130,7 @@ func NewAggregatorWithSeqInboxCaller(
seqInboxCaller *bridgegen.SequencerInboxCaller,
) (*Aggregator, error) {
+ // #nosec G115
keysetHash, keysetBytes, err := KeysetHashFromServices(services, uint64(config.RPCAggregator.AssumedHonest))
if err != nil {
return nil, err
@@ -166,6 +167,7 @@ type storeResponse struct {
// If Store gets not enough successful responses by the time its context is canceled
// (eg via TimeoutWrapper) then it also returns an error.
func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) {
+ // #nosec G115
log.Trace("das.Aggregator.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0))
allBackendsSucceeded := false
diff --git a/das/chain_fetch_das.go b/das/chain_fetch_das.go
index 465b54f400..4de6c981cf 100644
--- a/das/chain_fetch_das.go
+++ b/das/chain_fetch_das.go
@@ -12,8 +12,8 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
- "github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/das/dastree"
"github.com/offchainlabs/nitro/solgen/go/bridgegen"
)
@@ -42,7 +42,7 @@ type KeysetFetcher struct {
keysetCache syncedKeysetCache
}
-func NewKeysetFetcher(l1client arbutil.L1Interface, seqInboxAddr common.Address) (*KeysetFetcher, error) {
+func NewKeysetFetcher(l1client *ethclient.Client, seqInboxAddr common.Address) (*KeysetFetcher, error) {
seqInbox, err := bridgegen.NewSequencerInbox(seqInboxAddr, l1client)
if err != nil {
return nil, err
diff --git a/das/das.go b/das/das.go
index 6bd02fbc75..0b03c05ad6 100644
--- a/das/das.go
+++ b/das/das.go
@@ -41,9 +41,10 @@ type DataAvailabilityConfig struct {
LocalCache CacheConfig `koanf:"local-cache"`
RedisCache RedisConfig `koanf:"redis-cache"`
- LocalDBStorage LocalDBStorageConfig `koanf:"local-db-storage"`
- LocalFileStorage LocalFileStorageConfig `koanf:"local-file-storage"`
- S3Storage S3StorageServiceConfig `koanf:"s3-storage"`
+ LocalDBStorage LocalDBStorageConfig `koanf:"local-db-storage"`
+ LocalFileStorage LocalFileStorageConfig `koanf:"local-file-storage"`
+ S3Storage S3StorageServiceConfig `koanf:"s3-storage"`
+ GoogleCloudStorage GoogleCloudStorageServiceConfig `koanf:"google-cloud-storage"`
MigrateLocalDBToFileStorage bool `koanf:"migrate-local-db-to-file-storage"`
@@ -114,6 +115,7 @@ func dataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet, r role) {
LocalDBStorageConfigAddOptions(prefix+".local-db-storage", f)
LocalFileStorageConfigAddOptions(prefix+".local-file-storage", f)
S3ConfigAddOptions(prefix+".s3-storage", f)
+ GoogleCloudConfigAddOptions(prefix+".google-cloud-storage", f)
f.Bool(prefix+".migrate-local-db-to-file-storage", DefaultDataAvailabilityConfig.MigrateLocalDBToFileStorage, "daserver will migrate all data on startup from local-db-storage to local-file-storage, then mark local-db-storage as unusable")
// Key config for storage
diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go
index ca2ee8e7d4..241f2196b1 100644
--- a/das/dasRpcClient.go
+++ b/das/dasRpcClient.go
@@ -12,6 +12,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
"golang.org/x/sync/errgroup"
"github.com/ethereum/go-ethereum/rpc"
@@ -21,6 +22,17 @@ import (
"github.com/offchainlabs/nitro/util/signature"
)
+var (
+ rpcClientStoreRequestGauge = metrics.NewRegisteredGauge("arb/das/rpcclient/store/requests", nil)
+ rpcClientStoreSuccessGauge = metrics.NewRegisteredGauge("arb/das/rpcclient/store/success", nil)
+ rpcClientStoreFailureGauge = metrics.NewRegisteredGauge("arb/das/rpcclient/store/failure", nil)
+ rpcClientStoreStoredBytesGauge = metrics.NewRegisteredGauge("arb/das/rpcclient/store/bytes", nil)
+ rpcClientStoreDurationHistogram = metrics.NewRegisteredHistogram("arb/das/rpcclient/store/duration", nil, metrics.NewBoundedHistogramSample())
+
+ rpcClientSendChunkSuccessGauge = metrics.NewRegisteredGauge("arb/das/rpcclient/sendchunk/success", nil)
+ rpcClientSendChunkFailureGauge = metrics.NewRegisteredGauge("arb/das/rpcclient/sendchunk/failure", nil)
+)
+
type DASRPCClient struct { // implements DataAvailabilityService
clnt *rpc.Client
url string
@@ -58,7 +70,20 @@ func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChu
}
func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) {
- timestamp := uint64(time.Now().Unix())
+ rpcClientStoreRequestGauge.Inc(1)
+ start := time.Now()
+ success := false
+ defer func() {
+ if success {
+ rpcClientStoreSuccessGauge.Inc(1)
+ } else {
+ rpcClientStoreFailureGauge.Inc(1)
+ }
+ rpcClientStoreDurationHistogram.Update(time.Since(start).Nanoseconds())
+ }()
+
+ // #nosec G115
+ timestamp := uint64(start.Unix())
nChunks := uint64(len(message)) / c.chunkSize
lastChunkSize := uint64(len(message)) % c.chunkSize
if lastChunkSize > 0 {
@@ -115,6 +140,9 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64
return nil, err
}
+ rpcClientStoreStoredBytesGauge.Inc(int64(len(message)))
+ success = true
+
return &daprovider.DataAvailabilityCertificate{
DataHash: common.BytesToHash(storeResult.DataHash),
Timeout: uint64(storeResult.Timeout),
@@ -132,12 +160,15 @@ func (c *DASRPCClient) sendChunk(ctx context.Context, batchId, i uint64, chunk [
}
if err := c.clnt.CallContext(ctx, nil, "das_sendChunk", hexutil.Uint64(batchId), hexutil.Uint64(i), hexutil.Bytes(chunk), hexutil.Bytes(chunkReqSig)); err != nil {
+ rpcClientSendChunkFailureGauge.Inc(1)
return err
}
+ rpcClientSendChunkSuccessGauge.Inc(1)
return nil
}
func (c *DASRPCClient) legacyStore(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) {
+ // #nosec G115
log.Trace("das.DASRPCClient.Store(...)", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "this", *c)
reqSig, err := applyDasSigner(c.signer, message, timeout)
diff --git a/das/dasRpcServer.go b/das/dasRpcServer.go
index 9e6228ca5d..bb1be0384e 100644
--- a/das/dasRpcServer.go
+++ b/das/dasRpcServer.go
@@ -108,6 +108,7 @@ type StoreResult struct {
}
func (s *DASRPCServer) Store(ctx context.Context, message hexutil.Bytes, timeout hexutil.Uint64, sig hexutil.Bytes) (*StoreResult, error) {
+ // #nosec G115
log.Trace("dasRpc.DASRPCServer.Store", "message", pretty.FirstFewBytes(message), "message length", len(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", s)
rpcStoreRequestGauge.Inc(1)
start := time.Now()
@@ -152,7 +153,7 @@ type SendChunkResult struct {
type batch struct {
chunks [][]byte
expectedChunks uint64
- seenChunks atomic.Int64
+ seenChunks atomic.Uint64
expectedChunkSize, expectedSize uint64
timeout uint64
startTime time.Time
@@ -247,7 +248,7 @@ func (b *batchBuilder) close(id uint64) ([]byte, uint64, time.Time, error) {
return nil, 0, time.Time{}, fmt.Errorf("unknown batch(%d)", id)
}
- if batch.expectedChunks != uint64(batch.seenChunks.Load()) {
+ if batch.expectedChunks != batch.seenChunks.Load() {
return nil, 0, time.Time{}, fmt.Errorf("incomplete batch(%d): got %d/%d chunks", id, batch.seenChunks.Load(), batch.expectedChunks)
}
@@ -277,6 +278,7 @@ func (s *DASRPCServer) StartChunkedStore(ctx context.Context, timestamp, nChunks
}
// Prevent replay of old messages
+ // #nosec G115
if time.Since(time.Unix(int64(timestamp), 0)).Abs() > time.Minute {
return nil, errors.New("too much time has elapsed since request was signed")
}
diff --git a/das/das_test.go b/das/das_test.go
index 179734c8b1..4971d454e5 100644
--- a/das/das_test.go
+++ b/das/das_test.go
@@ -55,6 +55,7 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) {
Require(t, err, "no das")
var daReader DataAvailabilityServiceReader = storageService
+ // #nosec G115
timeout := uint64(time.Now().Add(time.Hour * 24).Unix())
messageSaved := []byte("hello world")
cert, err := daWriter.Store(firstCtx, messageSaved, timeout)
@@ -146,6 +147,7 @@ func testDASMissingMessage(t *testing.T, storageType string) {
var daReader DataAvailabilityServiceReader = storageService
messageSaved := []byte("hello world")
+ // #nosec G115
timeout := uint64(time.Now().Add(time.Hour * 24).Unix())
cert, err := daWriter.Store(ctx, messageSaved, timeout)
Require(t, err, "Error storing message")
diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go
index d873f0568d..2bcbccaae3 100644
--- a/das/dastree/dastree.go
+++ b/das/dastree/dastree.go
@@ -61,12 +61,13 @@ func RecordHash(record func(bytes32, []byte, arbutil.PreimageType), preimage ...
return arbmath.FlipBit(keccord(prepend(LeafByte, keccord([]byte{}).Bytes())), 0)
}
- length := uint32(len(unrolled))
+ length := len(unrolled)
leaves := []node{}
- for bin := uint32(0); bin < length; bin += BinSize {
+ for bin := 0; bin < length; bin += BinSize {
end := arbmath.MinInt(bin+BinSize, length)
hash := keccord(prepend(LeafByte, keccord(unrolled[bin:end]).Bytes()))
- leaves = append(leaves, node{hash, end - bin})
+ // #nosec G115
+ leaves = append(leaves, node{hash, uint32(end - bin)})
}
layer := leaves
@@ -186,7 +187,9 @@ func Content(root bytes32, oracle func(bytes32) ([]byte, error)) ([]byte, error)
leaves = append(leaves, leaf)
case NodeByte:
count := binary.BigEndian.Uint32(data[64:])
- power := uint32(arbmath.NextOrCurrentPowerOf2(uint64(count)))
+ power := arbmath.NextOrCurrentPowerOf2(uint64(count))
+ // #nosec G115
+ halfPower := uint32(power / 2)
if place.size != count {
return nil, fmt.Errorf("invalid size data: %v vs %v for %v", count, place.size, data)
@@ -194,11 +197,11 @@ func Content(root bytes32, oracle func(bytes32) ([]byte, error)) ([]byte, error)
prior := node{
hash: common.BytesToHash(data[:32]),
- size: power / 2,
+ size: halfPower,
}
after := node{
hash: common.BytesToHash(data[32:64]),
- size: count - power/2,
+ size: count - halfPower,
}
// we want to expand leftward so we reverse their order
diff --git a/das/db_storage_service.go b/das/db_storage_service.go
index e3b6183c37..74bf12b927 100644
--- a/das/db_storage_service.go
+++ b/das/db_storage_service.go
@@ -8,6 +8,7 @@ import (
"context"
"errors"
"fmt"
+ "math"
"os"
"path/filepath"
"time"
@@ -172,7 +173,8 @@ func (dbs *DBStorageService) Put(ctx context.Context, data []byte, timeout uint6
return dbs.db.Update(func(txn *badger.Txn) error {
e := badger.NewEntry(dastree.HashBytes(data), data)
- if dbs.discardAfterTimeout {
+ if dbs.discardAfterTimeout && timeout <= math.MaxInt64 {
+ // #nosec G115
e = e.WithTTL(time.Until(time.Unix(int64(timeout), 0)))
}
return txn.SetEntry(e)
@@ -265,6 +267,7 @@ func (dbs *DBStorageService) String() string {
func (dbs *DBStorageService) HealthCheck(ctx context.Context) error {
testData := []byte("Test-Data")
+ // #nosec G115
err := dbs.Put(ctx, testData, uint64(time.Now().Add(time.Minute).Unix()))
if err != nil {
return err
diff --git a/das/factory.go b/das/factory.go
index 5742a39479..3e9771f932 100644
--- a/das/factory.go
+++ b/das/factory.go
@@ -7,11 +7,10 @@ import (
"context"
"errors"
"fmt"
- "math"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethclient"
- "github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/solgen/go/bridgegen"
"github.com/offchainlabs/nitro/util/headerreader"
"github.com/offchainlabs/nitro/util/signature"
@@ -66,6 +65,15 @@ func CreatePersistentStorageService(
storageServices = append(storageServices, s)
}
+ if config.GoogleCloudStorage.Enable {
+ s, err := NewGoogleCloudStorageService(config.GoogleCloudStorage)
+ if err != nil {
+ return nil, nil, err
+ }
+ lifecycleManager.Register(s)
+ storageServices = append(storageServices, s)
+ }
+
if len(storageServices) > 1 {
s, err := NewRedundantStorageService(ctx, storageServices)
if err != nil {
@@ -113,7 +121,7 @@ func CreateBatchPosterDAS(
ctx context.Context,
config *DataAvailabilityConfig,
dataSigner signature.DataSignerFunc,
- l1Reader arbutil.L1Interface,
+ l1Reader *ethclient.Client,
sequencerInboxAddr common.Address,
) (DataAvailabilityServiceWriter, DataAvailabilityServiceReader, *KeysetFetcher, *LifecycleManager, error) {
if !config.Enable {
@@ -187,12 +195,7 @@ func CreateDAComponentsForDaserver(
dasLifecycleManager.Register(restAgg)
syncConf := &config.RestAggregator.SyncToStorage
- var retentionPeriodSeconds uint64
- if uint64(syncConf.RetentionPeriod) == math.MaxUint64 {
- retentionPeriodSeconds = math.MaxUint64
- } else {
- retentionPeriodSeconds = uint64(syncConf.RetentionPeriod.Seconds())
- }
+ retentionPeriodSeconds := uint64(syncConf.RetentionPeriod.Seconds())
if syncConf.Eager {
if l1Reader == nil || seqInboxAddress == nil {
diff --git a/das/fallback_storage_service.go b/das/fallback_storage_service.go
index 49f961da60..0a451678d0 100644
--- a/das/fallback_storage_service.go
+++ b/das/fallback_storage_service.go
@@ -85,6 +85,7 @@ func (f *FallbackStorageService) GetByHash(ctx context.Context, key common.Hash)
}
if dastree.ValidHash(key, data) {
putErr := f.StorageService.Put(
+ // #nosec G115
ctx, data, arbmath.SaturatingUAdd(uint64(time.Now().Unix()), f.backupRetentionSeconds),
)
if putErr != nil && !f.ignoreRetentionWriteErrors {
diff --git a/das/google_cloud_storage_service.go b/das/google_cloud_storage_service.go
new file mode 100644
index 0000000000..2c490f346c
--- /dev/null
+++ b/das/google_cloud_storage_service.go
@@ -0,0 +1,202 @@
+package das
+
+import (
+ googlestorage "cloud.google.com/go/storage"
+ "context"
+ "fmt"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/google/go-cmp/cmp"
+ "github.com/offchainlabs/nitro/arbstate/daprovider"
+ "github.com/offchainlabs/nitro/das/dastree"
+ "github.com/offchainlabs/nitro/util/pretty"
+ flag "github.com/spf13/pflag"
+ "google.golang.org/api/option"
+ "io"
+ "math"
+ "sort"
+ "time"
+)
+
+type GoogleCloudStorageOperator interface {
+ Bucket(name string) *googlestorage.BucketHandle
+ Upload(ctx context.Context, bucket, objectPrefix string, value []byte) error
+ Download(ctx context.Context, bucket, objectPrefix string, key common.Hash) ([]byte, error)
+ Close(ctx context.Context) error
+}
+
+type GoogleCloudStorageClient struct {
+ client *googlestorage.Client
+}
+
+func (g *GoogleCloudStorageClient) Bucket(name string) *googlestorage.BucketHandle {
+ return g.client.Bucket(name)
+}
+
+func (g *GoogleCloudStorageClient) Upload(ctx context.Context, bucket, objectPrefix string, value []byte) error {
+ obj := g.client.Bucket(bucket).Object(objectPrefix + EncodeStorageServiceKey(dastree.Hash(value)))
+ w := obj.NewWriter(ctx)
+
+ if _, err := fmt.Fprintln(w, value); err != nil {
+ return err
+ }
+ return w.Close()
+
+}
+
+func (g *GoogleCloudStorageClient) Download(ctx context.Context, bucket, objectPrefix string, key common.Hash) ([]byte, error) {
+ obj := g.client.Bucket(bucket).Object(objectPrefix + EncodeStorageServiceKey(key))
+ reader, err := obj.NewReader(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return io.ReadAll(reader)
+}
+
+func (g *GoogleCloudStorageClient) Close(ctx context.Context) error {
+ return g.client.Close()
+}
+
+type GoogleCloudStorageServiceConfig struct {
+ Enable bool `koanf:"enable"`
+ AccessToken string `koanf:"access-token"`
+ Bucket string `koanf:"bucket"`
+ ObjectPrefix string `koanf:"object-prefix"`
+ EnableExpiry bool `koanf:"enable-expiry"`
+ MaxRetention time.Duration `koanf:"max-retention"`
+}
+
+var DefaultGoogleCloudStorageServiceConfig = GoogleCloudStorageServiceConfig{}
+
+func GoogleCloudConfigAddOptions(prefix string, f *flag.FlagSet) {
+ f.Bool(prefix+".enable", DefaultGoogleCloudStorageServiceConfig.Enable, "enable storage/retrieval of sequencer batch data from an Google Cloud Storage bucket")
+ f.String(prefix+".access-token", DefaultGoogleCloudStorageServiceConfig.AccessToken, "Google Cloud Storage access token")
+ f.String(prefix+".bucket", DefaultGoogleCloudStorageServiceConfig.Bucket, "Google Cloud Storage bucket")
+ f.String(prefix+".object-prefix", DefaultGoogleCloudStorageServiceConfig.ObjectPrefix, "prefix to add to Google Cloud Storage objects")
+ f.Bool(prefix+".enable-expiry", DefaultLocalFileStorageConfig.EnableExpiry, "enable expiry of batches")
+ f.Duration(prefix+".max-retention", DefaultLocalFileStorageConfig.MaxRetention, "store requests with expiry times farther in the future than max-retention will be rejected")
+
+}
+
+type GoogleCloudStorageService struct {
+ operator GoogleCloudStorageOperator
+ bucket string
+ objectPrefix string
+ enableExpiry bool
+ maxRetention time.Duration
+}
+
+func NewGoogleCloudStorageService(config GoogleCloudStorageServiceConfig) (StorageService, error) {
+ var client *googlestorage.Client
+ var err error
+ // Note that if the credentials are not specified, the client library will find credentials using ADC(Application Default Credentials)
+ // https://cloud.google.com/docs/authentication/provide-credentials-adc.
+ if config.AccessToken == "" {
+ client, err = googlestorage.NewClient(context.Background())
+ } else {
+ client, err = googlestorage.NewClient(context.Background(), option.WithCredentialsJSON([]byte(config.AccessToken)))
+ }
+ if err != nil {
+ return nil, fmt.Errorf("error creating Google Cloud Storage client: %w", err)
+ }
+ service := &GoogleCloudStorageService{
+ operator: &GoogleCloudStorageClient{client: client},
+ bucket: config.Bucket,
+ objectPrefix: config.ObjectPrefix,
+ enableExpiry: config.EnableExpiry,
+ maxRetention: config.MaxRetention,
+ }
+ if config.EnableExpiry {
+ lifecycleRule := googlestorage.LifecycleRule{
+ Action: googlestorage.LifecycleAction{Type: "Delete"},
+ Condition: googlestorage.LifecycleCondition{AgeInDays: int64(config.MaxRetention.Hours() / 24)}, // Objects older than 30 days
+ }
+ ctx := context.Background()
+ bucket := service.operator.Bucket(service.bucket)
+ // check if bucket exists (and others), and update expiration policy if enabled
+ attrs, err := bucket.Attrs(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error getting bucket attributes: %w", err)
+ }
+ attrs.Lifecycle.Rules = append(attrs.Lifecycle.Rules, lifecycleRule)
+
+ bucketAttrsToUpdate := googlestorage.BucketAttrsToUpdate{
+ Lifecycle: &attrs.Lifecycle,
+ }
+ if _, err := bucket.Update(ctx, bucketAttrsToUpdate); err != nil {
+ return nil, fmt.Errorf("failed to update bucket lifecycle: %w", err)
+ }
+ }
+ return service, nil
+}
+
+func (gcs *GoogleCloudStorageService) Put(ctx context.Context, data []byte, expiry uint64) error {
+ logPut("das.GoogleCloudStorageService.Store", data, expiry, gcs)
+ if expiry > math.MaxInt64 {
+ return fmt.Errorf("request expiry time (%v) exceeds max int64", expiry)
+ }
+ // #nosec G115
+ expiryTime := time.Unix(int64(expiry), 0)
+ currentTimePlusRetention := time.Now().Add(gcs.maxRetention)
+ if expiryTime.After(currentTimePlusRetention) {
+ return fmt.Errorf("requested expiry time (%v) exceeds current time plus maximum allowed retention period(%v)", expiryTime, currentTimePlusRetention)
+ }
+ if err := gcs.operator.Upload(ctx, gcs.bucket, gcs.objectPrefix, data); err != nil {
+ log.Error("das.GoogleCloudStorageService.Store", "err", err)
+ return err
+ }
+ return nil
+}
+
+func (gcs *GoogleCloudStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) {
+ log.Trace("das.GoogleCloudStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", gcs)
+ buf, err := gcs.operator.Download(ctx, gcs.bucket, gcs.objectPrefix, key)
+ if err != nil {
+ log.Error("das.GoogleCloudStorageService.GetByHash", "err", err)
+ return nil, err
+ }
+ return buf, nil
+}
+
+func (gcs *GoogleCloudStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) {
+ if gcs.enableExpiry {
+ return daprovider.KeepForever, nil
+ }
+ return daprovider.DiscardAfterDataTimeout, nil
+}
+
+func (gcs *GoogleCloudStorageService) Sync(ctx context.Context) error {
+ return nil
+}
+
+func (gcs *GoogleCloudStorageService) Close(ctx context.Context) error {
+ return gcs.operator.Close(ctx)
+}
+
+func (gcs *GoogleCloudStorageService) String() string {
+ return fmt.Sprintf("GoogleCloudStorageService(:%s)", gcs.bucket)
+}
+
+func (gcs *GoogleCloudStorageService) HealthCheck(ctx context.Context) error {
+ bucket := gcs.operator.Bucket(gcs.bucket)
+ // check if we have bucket permissions
+ permissions := []string{
+ "storage.buckets.get",
+ "storage.buckets.list",
+ "storage.objects.create",
+ "storage.objects.delete",
+ "storage.objects.list",
+ "storage.objects.get",
+ }
+ perms, err := bucket.IAM().TestPermissions(ctx, permissions)
+ if err != nil {
+ return fmt.Errorf("could not check permissions: %w", err)
+ }
+ sort.Strings(permissions)
+ sort.Strings(perms)
+ if !cmp.Equal(perms, permissions) {
+ return fmt.Errorf("permissions mismatch (-want +got):\n%s", cmp.Diff(permissions, perms))
+ }
+
+ return nil
+}
diff --git a/das/google_cloud_storage_service_test.go b/das/google_cloud_storage_service_test.go
new file mode 100644
index 0000000000..799d999bad
--- /dev/null
+++ b/das/google_cloud_storage_service_test.go
@@ -0,0 +1,84 @@
+package das
+
+import (
+ "bytes"
+ googlestorage "cloud.google.com/go/storage"
+ "context"
+ "errors"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/offchainlabs/nitro/das/dastree"
+ "testing"
+ "time"
+)
+
+type mockGCSClient struct {
+ storage map[string][]byte
+}
+
+func (c *mockGCSClient) Bucket(name string) *googlestorage.BucketHandle {
+ return nil
+}
+
+func (c *mockGCSClient) Download(ctx context.Context, bucket, objectPrefix string, key common.Hash) ([]byte, error) {
+ value, ok := c.storage[objectPrefix+EncodeStorageServiceKey(key)]
+ if !ok {
+ return nil, ErrNotFound
+ }
+ return value, nil
+}
+
+func (c *mockGCSClient) Close(ctx context.Context) error {
+ return nil
+}
+
+func (c *mockGCSClient) Upload(ctx context.Context, bucket, objectPrefix string, value []byte) error {
+ key := objectPrefix + EncodeStorageServiceKey(dastree.Hash(value))
+ c.storage[key] = value
+ return nil
+}
+
+func NewTestGoogleCloudStorageService(ctx context.Context, googleCloudStorageConfig GoogleCloudStorageServiceConfig) (StorageService, error) {
+ return &GoogleCloudStorageService{
+ bucket: googleCloudStorageConfig.Bucket,
+ objectPrefix: googleCloudStorageConfig.ObjectPrefix,
+ operator: &mockGCSClient{
+ storage: make(map[string][]byte),
+ },
+ maxRetention: googleCloudStorageConfig.MaxRetention,
+ }, nil
+}
+
+func TestNewGoogleCloudStorageService(t *testing.T) {
+ ctx := context.Background()
+ // #nosec G115
+ expiry := uint64(time.Now().Add(time.Hour).Unix())
+ googleCloudStorageServiceConfig := DefaultGoogleCloudStorageServiceConfig
+ googleCloudStorageServiceConfig.Enable = true
+ googleCloudStorageServiceConfig.MaxRetention = time.Hour * 24
+ googleCloudService, err := NewTestGoogleCloudStorageService(ctx, googleCloudStorageServiceConfig)
+ Require(t, err)
+
+ val1 := []byte("The first value")
+ val1CorrectKey := dastree.Hash(val1)
+ val2IncorrectKey := dastree.Hash(append(val1, 0))
+
+ _, err = googleCloudService.GetByHash(ctx, val1CorrectKey)
+ if !errors.Is(err, ErrNotFound) {
+ t.Fatal(err)
+ }
+
+ err = googleCloudService.Put(ctx, val1, expiry)
+ Require(t, err)
+
+ _, err = googleCloudService.GetByHash(ctx, val2IncorrectKey)
+ if !errors.Is(err, ErrNotFound) {
+ t.Fatal(err)
+ }
+
+ val, err := googleCloudService.GetByHash(ctx, val1CorrectKey)
+ Require(t, err)
+ if !bytes.Equal(val, val1) {
+ t.Fatal(val, val1)
+ }
+
+}
diff --git a/das/local_file_storage_service.go b/das/local_file_storage_service.go
index 65ca6fe15c..5e64c34b10 100644
--- a/das/local_file_storage_service.go
+++ b/das/local_file_storage_service.go
@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"io"
+ "math"
"os"
"path"
"path/filepath"
@@ -133,6 +134,10 @@ func (s *LocalFileStorageService) GetByHash(ctx context.Context, key common.Hash
func (s *LocalFileStorageService) Put(ctx context.Context, data []byte, expiry uint64) error {
logPut("das.LocalFileStorageService.Store", data, expiry, s)
+ if expiry > math.MaxInt64 {
+ return fmt.Errorf("request expiry time (%v) exceeds max int64", expiry)
+ }
+ // #nosec G115
expiryTime := time.Unix(int64(expiry), 0)
currentTimePlusRetention := time.Now().Add(s.config.MaxRetention)
if expiryTime.After(currentTimePlusRetention) {
@@ -182,6 +187,7 @@ func (s *LocalFileStorageService) Put(ctx context.Context, data []byte, expiry u
// new flat layout files, set their modification time accordingly.
if s.enableLegacyLayout {
tv := syscall.Timeval{
+ // #nosec G115
Sec: int64(expiry - uint64(s.legacyLayout.retention.Seconds())),
Usec: 0,
}
@@ -371,6 +377,7 @@ func migrate(fl *flatLayout, tl *trieLayout) error {
return err
}
+ // #nosec G115
expiryPath := tl.expiryPath(batch.key, uint64(batch.expiry.Unix()))
if err = createHardLink(newPath, expiryPath); err != nil {
return err
diff --git a/das/local_file_storage_service_test.go b/das/local_file_storage_service_test.go
index cc27e293e3..8a36664670 100644
--- a/das/local_file_storage_service_test.go
+++ b/das/local_file_storage_service_test.go
@@ -78,6 +78,7 @@ func TestMigrationNoExpiry(t *testing.T) {
Require(t, err)
s.enableLegacyLayout = true
+ // #nosec G115
now := uint64(time.Now().Unix())
err = s.Put(ctx, []byte("a"), now+1)
@@ -99,6 +100,7 @@ func TestMigrationNoExpiry(t *testing.T) {
getByHashAndCheck(t, s, "a", "b", "c", "d")
// Can still iterate by timestamp even if expiry disabled
+ // #nosec G115
countTimestampEntries(t, &s.layout, time.Unix(int64(now+11), 0), 4)
}
@@ -120,14 +122,19 @@ func TestMigrationExpiry(t *testing.T) {
now := time.Now()
// Use increments of expiry divisor in order to span multiple by-expiry-timestamp dirs
+ // #nosec G115
err = s.Put(ctx, []byte("a"), uint64(now.Add(-2*time.Second*expiryDivisor).Unix()))
Require(t, err)
+ // #nosec G115
err = s.Put(ctx, []byte("b"), uint64(now.Add(-1*time.Second*expiryDivisor).Unix()))
Require(t, err)
+ // #nosec G115
err = s.Put(ctx, []byte("c"), uint64(now.Add(time.Second*expiryDivisor).Unix()))
Require(t, err)
+ // #nosec G115
err = s.Put(ctx, []byte("d"), uint64(now.Add(time.Second*expiryDivisor).Unix()))
Require(t, err)
+ // #nosec G115
err = s.Put(ctx, []byte("e"), uint64(now.Add(2*time.Second*expiryDivisor).Unix()))
Require(t, err)
@@ -170,19 +177,26 @@ func TestExpiryDuplicates(t *testing.T) {
now := time.Now()
// Use increments of expiry divisor in order to span multiple by-expiry-timestamp dirs
+ // #nosec G115
err = s.Put(ctx, []byte("a"), uint64(now.Add(-2*time.Second*expiryDivisor).Unix()))
Require(t, err)
+ // #nosec G115
err = s.Put(ctx, []byte("a"), uint64(now.Add(-1*time.Second*expiryDivisor).Unix()))
Require(t, err)
+ // #nosec G115
err = s.Put(ctx, []byte("a"), uint64(now.Add(time.Second*expiryDivisor).Unix()))
Require(t, err)
+ // #nosec G115
err = s.Put(ctx, []byte("d"), uint64(now.Add(time.Second*expiryDivisor).Unix()))
Require(t, err)
+ // #nosec G115
err = s.Put(ctx, []byte("e"), uint64(now.Add(2*time.Second*expiryDivisor).Unix()))
Require(t, err)
+ // #nosec G115
err = s.Put(ctx, []byte("f"), uint64(now.Add(3*time.Second*expiryDivisor).Unix()))
Require(t, err)
// Put the same entry and expiry again, should have no effect
+ // #nosec G115
err = s.Put(ctx, []byte("f"), uint64(now.Add(3*time.Second*expiryDivisor).Unix()))
Require(t, err)
diff --git a/das/redis_storage_service_test.go b/das/redis_storage_service_test.go
index 55f3ecd82c..77d3e8cd0f 100644
--- a/das/redis_storage_service_test.go
+++ b/das/redis_storage_service_test.go
@@ -16,6 +16,7 @@ import (
func TestRedisStorageService(t *testing.T) {
ctx := context.Background()
+ // #nosec G115
timeout := uint64(time.Now().Add(time.Hour).Unix())
baseStorageService := NewMemoryBackedStorageService(ctx)
server, err := miniredis.Run()
diff --git a/das/redundant_storage_test.go b/das/redundant_storage_test.go
index b56f62ee24..11d3b58264 100644
--- a/das/redundant_storage_test.go
+++ b/das/redundant_storage_test.go
@@ -17,6 +17,7 @@ const NumServices = 3
func TestRedundantStorageService(t *testing.T) {
ctx := context.Background()
+ // #nosec G115
timeout := uint64(time.Now().Add(time.Hour).Unix())
services := []StorageService{}
for i := 0; i < NumServices; i++ {
diff --git a/das/restful_server_test.go b/das/restful_server_test.go
index 1d3675749a..e6982f9db5 100644
--- a/das/restful_server_test.go
+++ b/das/restful_server_test.go
@@ -48,6 +48,7 @@ func TestRestfulClientServer(t *testing.T) {
server, port, err := NewRestfulDasServerOnRandomPort(LocalServerAddressForTest, storage)
Require(t, err)
+ // #nosec G115
err = storage.Put(ctx, data, uint64(time.Now().Add(time.Hour).Unix()))
Require(t, err)
diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go
index 24a470be5b..9cf481e015 100644
--- a/das/rpc_aggregator.go
+++ b/das/rpc_aggregator.go
@@ -21,7 +21,7 @@ import (
"github.com/offchainlabs/nitro/util/signature"
"github.com/ethereum/go-ethereum/common"
- "github.com/offchainlabs/nitro/arbutil"
+ "github.com/ethereum/go-ethereum/ethclient"
)
type BackendConfig struct {
@@ -83,7 +83,7 @@ func NewRPCAggregator(ctx context.Context, config DataAvailabilityConfig, signer
return NewAggregator(ctx, config, services)
}
-func NewRPCAggregatorWithL1Info(config DataAvailabilityConfig, l1client arbutil.L1Interface, seqInboxAddress common.Address, signer signature.DataSignerFunc) (*Aggregator, error) {
+func NewRPCAggregatorWithL1Info(config DataAvailabilityConfig, l1client *ethclient.Client, seqInboxAddress common.Address, signer signature.DataSignerFunc) (*Aggregator, error) {
services, err := ParseServices(config.RPCAggregator, signer)
if err != nil {
return nil, err
@@ -119,7 +119,7 @@ func ParseServices(config AggregatorConfig, signer signature.DataSignerFunc) ([]
return nil, err
}
- d, err := NewServiceDetails(service, *pubKey, 1< bound {
@@ -88,7 +89,9 @@ func (api *ArbDebugAPI) evenlySpaceBlocks(start, end rpc.BlockNumber) (uint64, u
return 0, 0, 0, 0, fmt.Errorf("invalid block range: %v to %v", start.Int64(), end.Int64())
}
+ // #nosec G115
first := uint64(end.Int64() - step*(blocks-1)) // minus 1 to include the fact that we start from the last
+ // #nosec G115
return first, uint64(step), uint64(end), uint64(blocks), nil
}
@@ -222,11 +225,13 @@ func (api *ArbDebugAPI) TimeoutQueue(ctx context.Context, blockNum rpc.BlockNumb
blockNum, _ = api.blockchain.ClipToPostNitroGenesis(blockNum)
queue := TimeoutQueue{
+ // #nosec G115
BlockNumber: uint64(blockNum),
Tickets: []common.Hash{},
Timeouts: []uint64{},
}
+ // #nosec G115
state, _, err := stateAndHeader(api.blockchain, uint64(blockNum))
if err != nil {
return queue, err
diff --git a/execution/gethexec/block_recorder.go b/execution/gethexec/block_recorder.go
index 8879c90702..a31b6b3736 100644
--- a/execution/gethexec/block_recorder.go
+++ b/execution/gethexec/block_recorder.go
@@ -16,6 +16,7 @@ import (
"github.com/offchainlabs/nitro/arbos/arbostypes"
"github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/execution"
+ flag "github.com/spf13/pflag"
)
// BlockRecorder uses a separate statedatabase from the blockchain.
@@ -25,6 +26,8 @@ import (
// Most recent/advanced header we ever computed (lastHdr)
// Hopefully - some recent valid block. For that we always keep one candidate block until it becomes validated.
type BlockRecorder struct {
+ config *BlockRecorderConfig
+
recordingDatabase *arbitrum.RecordingDatabase
execEngine *ExecutionEngine
@@ -39,10 +42,33 @@ type BlockRecorder struct {
preparedLock sync.Mutex
}
-func NewBlockRecorder(config *arbitrum.RecordingDatabaseConfig, execEngine *ExecutionEngine, ethDb ethdb.Database) *BlockRecorder {
+type BlockRecorderConfig struct {
+ TrieDirtyCache int `koanf:"trie-dirty-cache"`
+ TrieCleanCache int `koanf:"trie-clean-cache"`
+ MaxPrepared int `koanf:"max-prepared"`
+}
+
+var DefaultBlockRecorderConfig = BlockRecorderConfig{
+ TrieDirtyCache: 1024,
+ TrieCleanCache: 16,
+ MaxPrepared: 1000,
+}
+
+func BlockRecorderConfigAddOptions(prefix string, f *flag.FlagSet) {
+ f.Int(prefix+".trie-dirty-cache", DefaultBlockRecorderConfig.TrieDirtyCache, "like trie-dirty-cache for the separate, recording database (used for validation)")
+ f.Int(prefix+".trie-clean-cache", DefaultBlockRecorderConfig.TrieCleanCache, "like trie-clean-cache for the separate, recording database (used for validation)")
+ f.Int(prefix+".max-prepared", DefaultBlockRecorderConfig.MaxPrepared, "max references to store in the recording database")
+}
+
+func NewBlockRecorder(config *BlockRecorderConfig, execEngine *ExecutionEngine, ethDb ethdb.Database) *BlockRecorder {
+ dbConfig := arbitrum.RecordingDatabaseConfig{
+ TrieDirtyCache: config.TrieDirtyCache,
+ TrieCleanCache: config.TrieCleanCache,
+ }
recorder := &BlockRecorder{
+ config: config,
execEngine: execEngine,
- recordingDatabase: arbitrum.NewRecordingDatabase(config, ethDb, execEngine.bc),
+ recordingDatabase: arbitrum.NewRecordingDatabase(&dbConfig, ethDb, execEngine.bc),
}
execEngine.SetRecorder(recorder)
return recorder
@@ -303,7 +329,7 @@ func (r *BlockRecorder) PrepareForRecord(ctx context.Context, start, end arbutil
r.updateLastHdr(header)
hdrNum++
}
- r.preparedAddTrim(references, 1000)
+ r.preparedAddTrim(references, r.config.MaxPrepared)
return nil
}
diff --git a/execution/gethexec/blockchain.go b/execution/gethexec/blockchain.go
index 996b87a9e6..9b0c1a6f2f 100644
--- a/execution/gethexec/blockchain.go
+++ b/execution/gethexec/blockchain.go
@@ -37,7 +37,7 @@ type CachingConfig struct {
SnapshotRestoreGasLimit uint64 `koanf:"snapshot-restore-gas-limit"`
MaxNumberOfBlocksToSkipStateSaving uint32 `koanf:"max-number-of-blocks-to-skip-state-saving"`
MaxAmountOfGasToSkipStateSaving uint64 `koanf:"max-amount-of-gas-to-skip-state-saving"`
- StylusLRUCache uint32 `koanf:"stylus-lru-cache"`
+ StylusLRUCacheCapacity uint32 `koanf:"stylus-lru-cache-capacity"`
StateScheme string `koanf:"state-scheme"`
StateHistory uint64 `koanf:"state-history"`
}
@@ -54,12 +54,13 @@ func CachingConfigAddOptions(prefix string, f *flag.FlagSet) {
f.Uint64(prefix+".snapshot-restore-gas-limit", DefaultCachingConfig.SnapshotRestoreGasLimit, "maximum gas rolled back to recover snapshot")
f.Uint32(prefix+".max-number-of-blocks-to-skip-state-saving", DefaultCachingConfig.MaxNumberOfBlocksToSkipStateSaving, "maximum number of blocks to skip state saving to persistent storage (archive node only) -- warning: this option seems to cause issues")
f.Uint64(prefix+".max-amount-of-gas-to-skip-state-saving", DefaultCachingConfig.MaxAmountOfGasToSkipStateSaving, "maximum amount of gas in blocks to skip saving state to Persistent storage (archive node only) -- warning: this option seems to cause issues")
- f.Uint32(prefix+".stylus-lru-cache", DefaultCachingConfig.StylusLRUCache, "initialized stylus programs to keep in LRU cache")
+ f.Uint32(prefix+".stylus-lru-cache-capacity", DefaultCachingConfig.StylusLRUCacheCapacity, "capacity, in megabytes, of the LRU cache that keeps initialized stylus programs")
f.String(prefix+".state-scheme", DefaultCachingConfig.StateScheme, "scheme to use for state trie storage (hash, path)")
f.Uint64(prefix+".state-history", DefaultCachingConfig.StateHistory, "number of recent blocks to retain state history for (path state-scheme only)")
}
func getStateHistory(maxBlockSpeed time.Duration) uint64 {
+ // #nosec G115
return uint64(24 * time.Hour / maxBlockSpeed)
}
@@ -75,7 +76,7 @@ var DefaultCachingConfig = CachingConfig{
SnapshotRestoreGasLimit: 300_000_000_000,
MaxNumberOfBlocksToSkipStateSaving: 0,
MaxAmountOfGasToSkipStateSaving: 0,
- StylusLRUCache: 256,
+ StylusLRUCacheCapacity: 256,
StateScheme: rawdb.HashScheme,
StateHistory: getStateHistory(DefaultSequencerConfig.MaxBlockSpeed),
}
diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go
index d8a592736c..a0f3a2f59a 100644
--- a/execution/gethexec/executionengine.go
+++ b/execution/gethexec/executionengine.go
@@ -7,7 +7,7 @@
package gethexec
/*
-#cgo CFLAGS: -g -Wall -I../../target/include/
+#cgo CFLAGS: -g -I../../target/include/
#cgo LDFLAGS: ${SRCDIR}/../../target/lib/libstylus.a -ldl -lm
#include "arbitrator.h"
*/
@@ -27,6 +27,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
@@ -137,7 +138,7 @@ func (s *ExecutionEngine) MarkFeedStart(to arbutil.MessageIndex) {
defer s.cachedL1PriceData.mutex.Unlock()
if to < s.cachedL1PriceData.startOfL1PriceDataCache {
- log.Info("trying to trim older cache which doesnt exist anymore")
+ log.Debug("trying to trim older L1 price data cache which doesnt exist anymore")
} else if to >= s.cachedL1PriceData.endOfL1PriceDataCache {
s.cachedL1PriceData.startOfL1PriceDataCache = 0
s.cachedL1PriceData.endOfL1PriceDataCache = 0
@@ -149,10 +150,46 @@ func (s *ExecutionEngine) MarkFeedStart(to arbutil.MessageIndex) {
}
}
-func (s *ExecutionEngine) Initialize(rustCacheSize uint32) {
- if rustCacheSize != 0 {
- programs.ResizeWasmLruCache(rustCacheSize)
+func PopulateStylusTargetCache(targetConfig *StylusTargetConfig) error {
+ localTarget := rawdb.LocalTarget()
+ targets := targetConfig.WasmTargets()
+ var nativeSet bool
+ for _, target := range targets {
+ var effectiveStylusTarget string
+ switch target {
+ case rawdb.TargetWavm:
+ // skip wavm target
+ continue
+ case rawdb.TargetArm64:
+ effectiveStylusTarget = targetConfig.Arm64
+ case rawdb.TargetAmd64:
+ effectiveStylusTarget = targetConfig.Amd64
+ case rawdb.TargetHost:
+ effectiveStylusTarget = targetConfig.Host
+ default:
+ return fmt.Errorf("unsupported stylus target: %v", target)
+ }
+ isNative := target == localTarget
+ err := programs.SetTarget(target, effectiveStylusTarget, isNative)
+ if err != nil {
+ return fmt.Errorf("failed to set stylus target: %w", err)
+ }
+ nativeSet = nativeSet || isNative
+ }
+ if !nativeSet {
+ return fmt.Errorf("local target %v missing in list of archs %v", localTarget, targets)
}
+ return nil
+}
+
+func (s *ExecutionEngine) Initialize(rustCacheCapacityMB uint32, targetConfig *StylusTargetConfig) error {
+ if rustCacheCapacityMB != 0 {
+ programs.SetWasmLruCacheCapacity(arbmath.SaturatingUMul(uint64(rustCacheCapacityMB), 1024*1024))
+ }
+ if err := PopulateStylusTargetCache(targetConfig); err != nil {
+ return fmt.Errorf("error populating stylus target cache: %w", err)
+ }
+ return nil
}
func (s *ExecutionEngine) SetRecorder(recorder *BlockRecorder) {
@@ -926,4 +963,15 @@ func (s *ExecutionEngine) Start(ctx_in context.Context) {
}
}
})
+ // periodically update stylus lru cache metrics
+ s.LaunchThread(func(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-time.After(time.Minute):
+ programs.GetWasmLruCacheMetrics()
+ }
+ }
+ })
}
diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go
index 6624188cbd..cb06a58e74 100644
--- a/execution/gethexec/node.go
+++ b/execution/gethexec/node.go
@@ -5,40 +5,99 @@ import (
"errors"
"fmt"
"reflect"
+ "sort"
"sync/atomic"
"testing"
"github.com/ethereum/go-ethereum/arbitrum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/filters"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
"github.com/offchainlabs/nitro/arbos/arbostypes"
+ "github.com/offchainlabs/nitro/arbos/programs"
"github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/execution"
"github.com/offchainlabs/nitro/solgen/go/precompilesgen"
+ "github.com/offchainlabs/nitro/util/dbutil"
"github.com/offchainlabs/nitro/util/headerreader"
flag "github.com/spf13/pflag"
)
+type StylusTargetConfig struct {
+ Arm64 string `koanf:"arm64"`
+ Amd64 string `koanf:"amd64"`
+ Host string `koanf:"host"`
+ ExtraArchs []string `koanf:"extra-archs"`
+
+ wasmTargets []ethdb.WasmTarget
+}
+
+func (c *StylusTargetConfig) WasmTargets() []ethdb.WasmTarget {
+ return c.wasmTargets
+}
+
+func (c *StylusTargetConfig) Validate() error {
+ targetsSet := make(map[ethdb.WasmTarget]bool, len(c.ExtraArchs))
+ for _, arch := range c.ExtraArchs {
+ target := ethdb.WasmTarget(arch)
+ if !rawdb.IsSupportedWasmTarget(target) {
+ return fmt.Errorf("unsupported architecture: %v, possible values: %s, %s, %s, %s", arch, rawdb.TargetWavm, rawdb.TargetArm64, rawdb.TargetAmd64, rawdb.TargetHost)
+ }
+ targetsSet[target] = true
+ }
+ if !targetsSet[rawdb.TargetWavm] {
+ return fmt.Errorf("%s target not found in archs list, archs: %v", rawdb.TargetWavm, c.ExtraArchs)
+ }
+ targetsSet[rawdb.LocalTarget()] = true
+ targets := make([]ethdb.WasmTarget, 0, len(c.ExtraArchs)+1)
+ for target := range targetsSet {
+ targets = append(targets, target)
+ }
+ sort.Slice(
+ targets,
+ func(i, j int) bool {
+ return targets[i] < targets[j]
+ })
+ c.wasmTargets = targets
+ return nil
+}
+
+var DefaultStylusTargetConfig = StylusTargetConfig{
+ Arm64: programs.DefaultTargetDescriptionArm,
+ Amd64: programs.DefaultTargetDescriptionX86,
+ Host: "",
+ ExtraArchs: []string{string(rawdb.TargetWavm)},
+}
+
+func StylusTargetConfigAddOptions(prefix string, f *flag.FlagSet) {
+ f.String(prefix+".arm64", DefaultStylusTargetConfig.Arm64, "stylus programs compilation target for arm64 linux")
+ f.String(prefix+".amd64", DefaultStylusTargetConfig.Amd64, "stylus programs compilation target for amd64 linux")
+ f.String(prefix+".host", DefaultStylusTargetConfig.Host, "stylus programs compilation target for system other than 64-bit ARM or 64-bit x86")
+ f.StringSlice(prefix+".extra-archs", DefaultStylusTargetConfig.ExtraArchs, fmt.Sprintf("Comma separated list of extra architectures to cross-compile stylus program to and cache in wasm store (additionally to local target). Currently must include at least %s. (supported targets: %s, %s, %s, %s)", rawdb.TargetWavm, rawdb.TargetWavm, rawdb.TargetArm64, rawdb.TargetAmd64, rawdb.TargetHost))
+}
+
type Config struct {
- ParentChainReader headerreader.Config `koanf:"parent-chain-reader" reload:"hot"`
- Sequencer SequencerConfig `koanf:"sequencer" reload:"hot"`
- RecordingDatabase arbitrum.RecordingDatabaseConfig `koanf:"recording-database"`
- TxPreChecker TxPreCheckerConfig `koanf:"tx-pre-checker" reload:"hot"`
- Forwarder ForwarderConfig `koanf:"forwarder"`
- ForwardingTarget string `koanf:"forwarding-target"`
- SecondaryForwardingTarget []string `koanf:"secondary-forwarding-target"`
- Caching CachingConfig `koanf:"caching"`
- RPC arbitrum.Config `koanf:"rpc"`
- TxLookupLimit uint64 `koanf:"tx-lookup-limit"`
- EnablePrefetchBlock bool `koanf:"enable-prefetch-block"`
- SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"`
+ ParentChainReader headerreader.Config `koanf:"parent-chain-reader" reload:"hot"`
+ Sequencer SequencerConfig `koanf:"sequencer" reload:"hot"`
+ RecordingDatabase BlockRecorderConfig `koanf:"recording-database"`
+ TxPreChecker TxPreCheckerConfig `koanf:"tx-pre-checker" reload:"hot"`
+ Forwarder ForwarderConfig `koanf:"forwarder"`
+ ForwardingTarget string `koanf:"forwarding-target"`
+ SecondaryForwardingTarget []string `koanf:"secondary-forwarding-target"`
+ Caching CachingConfig `koanf:"caching"`
+ RPC arbitrum.Config `koanf:"rpc"`
+ TxLookupLimit uint64 `koanf:"tx-lookup-limit"`
+ EnablePrefetchBlock bool `koanf:"enable-prefetch-block"`
+ SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"`
+ StylusTarget StylusTargetConfig `koanf:"stylus-target"`
forwardingTarget string
}
@@ -61,6 +120,9 @@ func (c *Config) Validate() error {
if c.forwardingTarget != "" && c.Sequencer.Enable {
return errors.New("ForwardingTarget set and sequencer enabled")
}
+ if err := c.StylusTarget.Validate(); err != nil {
+ return err
+ }
return nil
}
@@ -68,7 +130,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) {
arbitrum.ConfigAddOptions(prefix+".rpc", f)
SequencerConfigAddOptions(prefix+".sequencer", f)
headerreader.AddOptions(prefix+".parent-chain-reader", f)
- arbitrum.RecordingDatabaseConfigAddOptions(prefix+".recording-database", f)
+ BlockRecorderConfigAddOptions(prefix+".recording-database", f)
f.String(prefix+".forwarding-target", ConfigDefault.ForwardingTarget, "transaction forwarding target URL, or \"null\" to disable forwarding (iff not sequencer)")
f.StringSlice(prefix+".secondary-forwarding-target", ConfigDefault.SecondaryForwardingTarget, "secondary transaction forwarding target URL")
AddOptionsForNodeForwarderConfig(prefix+".forwarder", f)
@@ -77,13 +139,14 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) {
SyncMonitorConfigAddOptions(prefix+".sync-monitor", f)
f.Uint64(prefix+".tx-lookup-limit", ConfigDefault.TxLookupLimit, "retain the ability to lookup transactions by hash for the past N blocks (0 = all blocks)")
f.Bool(prefix+".enable-prefetch-block", ConfigDefault.EnablePrefetchBlock, "enable prefetching of blocks")
+ StylusTargetConfigAddOptions(prefix+".stylus-target", f)
}
var ConfigDefault = Config{
RPC: arbitrum.DefaultConfig,
Sequencer: DefaultSequencerConfig,
ParentChainReader: headerreader.DefaultConfig,
- RecordingDatabase: arbitrum.DefaultRecordingDatabaseConfig,
+ RecordingDatabase: DefaultBlockRecorderConfig,
ForwardingTarget: "",
SecondaryForwardingTarget: []string{},
TxPreChecker: DefaultTxPreCheckerConfig,
@@ -91,6 +154,7 @@ var ConfigDefault = Config{
Caching: DefaultCachingConfig,
Forwarder: DefaultNodeForwarderConfig,
EnablePrefetchBlock: true,
+ StylusTarget: DefaultStylusTargetConfig,
}
type ConfigFetcher func() *Config
@@ -116,7 +180,7 @@ func CreateExecutionNode(
stack *node.Node,
chainDB ethdb.Database,
l2BlockChain *core.BlockChain,
- l1client arbutil.L1Interface,
+ l1client *ethclient.Client,
configFetcher ConfigFetcher,
) (*ExecutionNode, error) {
config := configFetcher()
@@ -181,11 +245,16 @@ func CreateExecutionNode(
var classicOutbox *ClassicOutboxRetriever
if l2BlockChain.Config().ArbitrumChainParams.GenesisBlockNum > 0 {
- classicMsgDb, err := stack.OpenDatabase("classic-msg", 0, 0, "classicmsg/", true) // TODO can we skip using ExtraOptions here?
- if err != nil {
+ classicMsgDb, err := stack.OpenDatabase("classic-msg", 0, 0, "classicmsg/", true)
+ if dbutil.IsNotExistError(err) {
log.Warn("Classic Msg Database not found", "err", err)
classicOutbox = nil
+ } else if err != nil {
+ return nil, fmt.Errorf("Failed to open classic-msg database: %w", err)
} else {
+ if err := dbutil.UnfinishedConversionCheck(classicMsgDb); err != nil {
+ return nil, fmt.Errorf("classic-msg unfinished database conversion check error: %w", err)
+ }
classicOutbox = NewClassicOutboxRetriever(classicMsgDb)
}
}
@@ -245,9 +314,13 @@ func (n *ExecutionNode) MarkFeedStart(to arbutil.MessageIndex) {
}
func (n *ExecutionNode) Initialize(ctx context.Context) error {
- n.ExecEngine.Initialize(n.ConfigFetcher().Caching.StylusLRUCache)
+ config := n.ConfigFetcher()
+ err := n.ExecEngine.Initialize(config.Caching.StylusLRUCacheCapacity, &config.StylusTarget)
+ if err != nil {
+ return fmt.Errorf("error initializing execution engine: %w", err)
+ }
n.ArbInterface.Initialize(n)
- err := n.Backend.Start()
+ err = n.Backend.Start()
if err != nil {
return fmt.Errorf("error starting geth backend: %w", err)
}
diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go
index 90e3082062..cc98c7930f 100644
--- a/execution/gethexec/sequencer.go
+++ b/execution/gethexec/sequencer.go
@@ -887,11 +887,12 @@ func (s *Sequencer) createBlock(ctx context.Context) (returnValue bool) {
for _, queueItem := range queueItems {
s.txRetryQueue.Push(queueItem)
}
+ // #nosec G115
log.Error(
"cannot sequence: unknown L1 block or L1 timestamp too far from local clock time",
"l1Block", l1Block,
"l1Timestamp", time.Unix(int64(l1Timestamp), 0),
- "localTimestamp", time.Unix(int64(timestamp), 0),
+ "localTimestamp", time.Unix(timestamp, 0),
)
return true
}
@@ -900,7 +901,7 @@ func (s *Sequencer) createBlock(ctx context.Context) (returnValue bool) {
Kind: arbostypes.L1MessageType_L2Message,
Poster: l1pricing.BatchPosterAddress,
BlockNumber: l1Block,
- Timestamp: uint64(timestamp),
+ Timestamp: arbmath.SaturatingUCast[uint64](timestamp),
RequestId: nil,
L1BaseFee: nil,
}
@@ -1037,10 +1038,14 @@ func (s *Sequencer) updateExpectedSurplus(ctx context.Context) (int64, error) {
if err != nil {
return 0, fmt.Errorf("error encountered getting l1 pricing surplus while updating expectedSurplus: %w", err)
}
+ // #nosec G115
backlogL1GasCharged := int64(s.execEngine.backlogL1GasCharged())
+ // #nosec G115
backlogCallDataUnits := int64(s.execEngine.backlogCallDataUnits())
+ // #nosec G115
expectedSurplus := int64(surplus) + backlogL1GasCharged - backlogCallDataUnits*int64(l1GasPrice)
// update metrics
+ // #nosec G115
l1GasPriceGauge.Update(int64(l1GasPrice))
callDataUnitsBacklogGauge.Update(backlogCallDataUnits)
unusedL1GasChargeGauge.Update(backlogL1GasCharged)
diff --git a/execution/gethexec/stylus_tracer.go b/execution/gethexec/stylus_tracer.go
new file mode 100644
index 0000000000..4c18bb2ebe
--- /dev/null
+++ b/execution/gethexec/stylus_tracer.go
@@ -0,0 +1,199 @@
+// Copyright 2024, Offchain Labs, Inc.
+// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE
+
+package gethexec
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math/big"
+ "strings"
+ "sync/atomic"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/eth/tracers"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/offchainlabs/nitro/util/containers"
+)
+
+func init() {
+ tracers.DefaultDirectory.Register("stylusTracer", newStylusTracer, false)
+}
+
+// stylusTracer captures Stylus HostIOs and returns them in a structured format to be used in Cargo
+// Stylus Replay.
+type stylusTracer struct {
+ open *containers.Stack[HostioTraceInfo]
+ stack *containers.Stack[*containers.Stack[HostioTraceInfo]]
+ interrupt atomic.Bool
+ reason error
+}
+
+// HostioTraceInfo contains the captured HostIO log returned by stylusTracer.
+type HostioTraceInfo struct {
+ // Name of the HostIO.
+ Name string `json:"name"`
+
+ // Arguments of the HostIO encoded as binary.
+ // For details about the encoding check the HostIO implemenation on
+ // arbitrator/wasm-libraries/user-host-trait.
+ Args hexutil.Bytes `json:"args"`
+
+ // Outputs of the HostIO encoded as binary.
+ // For details about the encoding check the HostIO implemenation on
+ // arbitrator/wasm-libraries/user-host-trait.
+ Outs hexutil.Bytes `json:"outs"`
+
+ // Amount of Ink before executing the HostIO.
+ StartInk uint64 `json:"startInk"`
+
+ // Amount of Ink after executing the HostIO.
+ EndInk uint64 `json:"endInk"`
+
+ // For *call HostIOs, the address of the called contract.
+ Address *common.Address `json:"address,omitempty"`
+
+ // For *call HostIOs, the steps performed by the called contract.
+ Steps *containers.Stack[HostioTraceInfo] `json:"steps,omitempty"`
+}
+
+// nestsHostios contains the hostios with nested calls.
+var nestsHostios = map[string]bool{
+ "call_contract": true,
+ "delegate_call_contract": true,
+ "static_call_contract": true,
+}
+
+func newStylusTracer(ctx *tracers.Context, _ json.RawMessage) (tracers.Tracer, error) {
+ return &stylusTracer{
+ open: containers.NewStack[HostioTraceInfo](),
+ stack: containers.NewStack[*containers.Stack[HostioTraceInfo]](),
+ }, nil
+}
+
+func (t *stylusTracer) CaptureStylusHostio(name string, args, outs []byte, startInk, endInk uint64) {
+ if t.interrupt.Load() {
+ return
+ }
+ info := HostioTraceInfo{
+ Name: name,
+ Args: args,
+ Outs: outs,
+ StartInk: startInk,
+ EndInk: endInk,
+ }
+ if nestsHostios[name] {
+ last, err := t.open.Pop()
+ if err != nil {
+ t.Stop(err)
+ return
+ }
+ if !strings.HasPrefix(last.Name, "evm_") || last.Name[4:] != info.Name {
+ t.Stop(fmt.Errorf("trace inconsistency for %v: last opcode is %v", info.Name, last.Name))
+ return
+ }
+ if last.Steps == nil {
+ t.Stop(fmt.Errorf("trace inconsistency for %v: nil steps", info.Name))
+ return
+ }
+ info.Address = last.Address
+ info.Steps = last.Steps
+ }
+ t.open.Push(info)
+}
+
+func (t *stylusTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+ if t.interrupt.Load() {
+ return
+ }
+
+ // This function adds the prefix evm_ because it assumes the opcode came from the EVM.
+ // If the opcode comes from WASM, the CaptureStylusHostio function will remove the evm prefix.
+ var name string
+ switch typ {
+ case vm.CALL:
+ name = "evm_call_contract"
+ case vm.DELEGATECALL:
+ name = "evm_delegate_call_contract"
+ case vm.STATICCALL:
+ name = "evm_static_call_contract"
+ case vm.CREATE:
+ name = "evm_create1"
+ case vm.CREATE2:
+ name = "evm_create2"
+ case vm.SELFDESTRUCT:
+ name = "evm_self_destruct"
+ }
+
+ inner := containers.NewStack[HostioTraceInfo]()
+ info := HostioTraceInfo{
+ Name: name,
+ Address: &to,
+ Steps: inner,
+ }
+ t.open.Push(info)
+ t.stack.Push(t.open)
+ t.open = inner
+}
+
+func (t *stylusTracer) CaptureExit(output []byte, gasUsed uint64, _ error) {
+ if t.interrupt.Load() {
+ return
+ }
+ var err error
+ t.open, err = t.stack.Pop()
+ if err != nil {
+ t.Stop(err)
+ }
+}
+
+func (t *stylusTracer) GetResult() (json.RawMessage, error) {
+ if t.reason != nil {
+ return nil, t.reason
+ }
+
+ var internalErr error
+ if t.open == nil {
+ internalErr = errors.Join(internalErr, fmt.Errorf("tracer.open is nil"))
+ }
+ if t.stack == nil {
+ internalErr = errors.Join(internalErr, fmt.Errorf("tracer.stack is nil"))
+ }
+ if !t.stack.Empty() {
+ internalErr = errors.Join(internalErr, fmt.Errorf("tracer.stack should be empty, but has %d values", t.stack.Len()))
+ }
+ if internalErr != nil {
+ log.Error("stylusTracer: internal error when generating a trace", "error", internalErr)
+ return nil, fmt.Errorf("internal error: %w", internalErr)
+ }
+
+ msg, err := json.Marshal(t.open)
+ if err != nil {
+ return nil, err
+ }
+ return msg, nil
+}
+
+func (t *stylusTracer) Stop(err error) {
+ t.reason = err
+ t.interrupt.Store(true)
+}
+
+// Unimplemented EVMLogger interface methods
+
+func (t *stylusTracer) CaptureArbitrumTransfer(env *vm.EVM, from, to *common.Address, value *big.Int, before bool, purpose string) {
+}
+func (t *stylusTracer) CaptureArbitrumStorageGet(key common.Hash, depth int, before bool) {}
+func (t *stylusTracer) CaptureArbitrumStorageSet(key, value common.Hash, depth int, before bool) {}
+func (t *stylusTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
+}
+func (t *stylusTracer) CaptureEnd(output []byte, gasUsed uint64, err error) {}
+func (t *stylusTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
+}
+func (t *stylusTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) {
+}
+func (t *stylusTracer) CaptureTxStart(gasLimit uint64) {}
+func (t *stylusTracer) CaptureTxEnd(restGas uint64) {}
diff --git a/execution/gethexec/tx_pre_checker.go b/execution/gethexec/tx_pre_checker.go
index dacfd32e81..e0ae330148 100644
--- a/execution/gethexec/tx_pre_checker.go
+++ b/execution/gethexec/tx_pre_checker.go
@@ -43,7 +43,7 @@ type TxPreCheckerConfig struct {
type TxPreCheckerConfigFetcher func() *TxPreCheckerConfig
var DefaultTxPreCheckerConfig = TxPreCheckerConfig{
- Strictness: TxPreCheckerStrictnessNone,
+ Strictness: TxPreCheckerStrictnessLikelyCompatible,
RequiredStateAge: 2,
RequiredStateMaxBlocks: 4,
}
@@ -161,6 +161,7 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty
oldHeader := header
blocksTraversed := uint(0)
// find a block that's old enough
+ // #nosec G115
for now-int64(oldHeader.Time) < config.RequiredStateAge &&
(config.RequiredStateMaxBlocks <= 0 || blocksTraversed < config.RequiredStateMaxBlocks) &&
oldHeader.Number.Uint64() > 0 {
diff --git a/execution/gethexec/wasmstorerebuilder.go b/execution/gethexec/wasmstorerebuilder.go
index dcbee45a3f..e3eb8e9268 100644
--- a/execution/gethexec/wasmstorerebuilder.go
+++ b/execution/gethexec/wasmstorerebuilder.go
@@ -59,9 +59,14 @@ func WriteToKeyValueStore[T any](store ethdb.KeyValueStore, key []byte, val T) e
// It also stores a special value that is only set once when rebuilding commenced in RebuildingStartBlockHashKey as the block
// time of the latest block when rebuilding was first called, this is used to avoid recomputing of assembly and module of
// contracts that were created after rebuilding commenced since they would anyway already be added during sync.
-func RebuildWasmStore(ctx context.Context, wasmStore ethdb.KeyValueStore, chainDb ethdb.Database, maxRecreateStateDepth int64, l2Blockchain *core.BlockChain, position, rebuildingStartBlockHash common.Hash) error {
+func RebuildWasmStore(ctx context.Context, wasmStore ethdb.KeyValueStore, chainDb ethdb.Database, maxRecreateStateDepth int64, targetConfig *StylusTargetConfig, l2Blockchain *core.BlockChain, position, rebuildingStartBlockHash common.Hash) error {
var err error
var stateDb *state.StateDB
+
+ if err := PopulateStylusTargetCache(targetConfig); err != nil {
+ return fmt.Errorf("error populating stylus target cache: %w", err)
+ }
+
latestHeader := l2Blockchain.CurrentBlock()
// Attempt to get state at the start block when rebuilding commenced, if not available (in case of non-archival nodes) use latest state
rebuildingStartHeader := l2Blockchain.GetHeaderByHash(rebuildingStartBlockHash)
diff --git a/execution/nodeInterface/NodeInterface.go b/execution/nodeInterface/NodeInterface.go
index 9179a52718..71ebbcce80 100644
--- a/execution/nodeInterface/NodeInterface.go
+++ b/execution/nodeInterface/NodeInterface.go
@@ -7,6 +7,7 @@ import (
"context"
"errors"
"fmt"
+ "math"
"math/big"
"sort"
@@ -234,6 +235,7 @@ func (n NodeInterface) ConstructOutboxProof(c ctx, evm mech, size, leaf uint64)
}
balanced := size == arbmath.NextPowerOf2(size)/2
+ // #nosec G115
treeLevels := int(arbmath.Log2ceil(size)) // the # of levels in the tree
proofLevels := treeLevels - 1 // the # of levels where a hash is needed (all but root)
walkLevels := treeLevels // the # of levels we need to consider when building walks
@@ -249,6 +251,7 @@ func (n NodeInterface) ConstructOutboxProof(c ctx, evm mech, size, leaf uint64)
place := leaf // where we are in the tree
for level := 0; level < walkLevels; level++ {
sibling := place ^ which
+ // #nosec G115
position := merkletree.NewLevelAndLeaf(uint64(level), sibling)
if sibling < size {
@@ -272,6 +275,7 @@ func (n NodeInterface) ConstructOutboxProof(c ctx, evm mech, size, leaf uint64)
total += power // The leaf for a given partial is the sum of the powers
leaf := total - 1 // of 2 preceding it. It's 1 less since we count from 0
+ // #nosec G115
partial := merkletree.NewLevelAndLeaf(uint64(level), leaf)
query = append(query, partial)
@@ -297,6 +301,7 @@ func (n NodeInterface) ConstructOutboxProof(c ctx, evm mech, size, leaf uint64)
mid := (lo + hi) / 2
+ // #nosec G115
block, err := n.backend.BlockByNumber(n.context, rpc.BlockNumber(mid))
if err != nil {
searchErr = err
@@ -405,6 +410,7 @@ func (n NodeInterface) ConstructOutboxProof(c ctx, evm mech, size, leaf uint64)
step.Leaf += 1 << step.Level // we start on the min partial's zero-hash sibling
known[step] = hash0
+ // #nosec G115
for step.Level < uint64(treeLevels) {
curr, ok := known[step]
@@ -643,6 +649,10 @@ func (n NodeInterface) LegacyLookupMessageBatchProof(c ctx, evm mech, batchNum h
// L2BlockRangeForL1 fetches the L1 block number of a given l2 block number.
// c ctx and evm mech arguments are not used but supplied to match the precompile function type in NodeInterface contract
func (n NodeInterface) BlockL1Num(c ctx, evm mech, l2BlockNum uint64) (uint64, error) {
+ if l2BlockNum > math.MaxInt64 {
+ return 0, fmt.Errorf("requested l2 block number %d out of range for int64", l2BlockNum)
+ }
+ // #nosec G115
blockHeader, err := n.backend.HeaderByNumber(n.context, rpc.BlockNumber(l2BlockNum))
if err != nil {
return 0, err
diff --git a/go-ethereum b/go-ethereum
index a1fc200e5b..b068464bf5 160000
--- a/go-ethereum
+++ b/go-ethereum
@@ -1 +1 @@
-Subproject commit a1fc200e5b85a7737a9834ec28fb768fb7bde7bd
+Subproject commit b068464bf59ab5414f72c2d4aba855b8af5edc17
diff --git a/go.mod b/go.mod
index 6649973725..18e3a8b02a 100644
--- a/go.mod
+++ b/go.mod
@@ -1,21 +1,22 @@
module github.com/offchainlabs/nitro
-go 1.21
+go 1.23
replace github.com/VictoriaMetrics/fastcache => ./fastcache
replace github.com/ethereum/go-ethereum => ./go-ethereum
require (
+ cloud.google.com/go/storage v1.43.0
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible
github.com/Shopify/toxiproxy v2.1.4+incompatible
github.com/alicebob/miniredis/v2 v2.32.1
github.com/andybalholm/brotli v1.0.4
- github.com/aws/aws-sdk-go-v2 v1.21.2
- github.com/aws/aws-sdk-go-v2/config v1.18.45
- github.com/aws/aws-sdk-go-v2/credentials v1.13.43
- github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10
- github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9
+ github.com/aws/aws-sdk-go-v2 v1.31.0
+ github.com/aws/aws-sdk-go-v2/config v1.27.40
+ github.com/aws/aws-sdk-go-v2/credentials v1.17.38
+ github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.27
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.64.1
github.com/cavaliergopher/grab/v3 v3.0.1
github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593
github.com/codeclysm/extract/v3 v3.0.2
@@ -30,7 +31,7 @@ require (
github.com/gobwas/ws-examples v0.0.0-20190625122829-a9e8908d9484
github.com/google/btree v1.1.2
github.com/google/go-cmp v0.6.0
- github.com/google/uuid v1.3.0
+ github.com/google/uuid v1.6.0
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/holiman/uint256 v1.2.4
github.com/knadh/koanf v1.4.0
@@ -41,17 +42,39 @@ require (
github.com/rivo/tview v0.0.0-20240307173318-e804876934a1
github.com/spf13/pflag v1.0.5
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
- github.com/wasmerio/wasmer-go v1.0.4
github.com/wealdtech/go-merkletree v1.0.0
- golang.org/x/crypto v0.21.0
+ golang.org/x/crypto v0.24.0
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa
- golang.org/x/sys v0.18.0
- golang.org/x/term v0.18.0
- golang.org/x/tools v0.16.0
+ golang.org/x/sys v0.21.0
+ golang.org/x/term v0.21.0
+ golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d
+ google.golang.org/api v0.187.0
gopkg.in/natefinch/lumberjack.v2 v2.0.0
)
-require github.com/google/go-querystring v1.1.0 // indirect
+require (
+ cloud.google.com/go v0.115.0 // indirect
+ cloud.google.com/go/auth v0.6.1 // indirect
+ cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
+ cloud.google.com/go/compute/metadata v0.3.0 // indirect
+ cloud.google.com/go/iam v1.1.8 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/go-logr/logr v1.4.1 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/google/go-querystring v1.1.0 // indirect
+ github.com/google/s2a-go v0.1.7 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
+ github.com/googleapis/gax-go/v2 v2.12.5 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
+ go.opentelemetry.io/otel v1.24.0 // indirect
+ go.opentelemetry.io/otel/metric v1.24.0 // indirect
+ go.opentelemetry.io/otel/trace v1.24.0 // indirect
+ google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect
+ google.golang.org/grpc v1.64.0 // indirect
+)
require (
github.com/DataDog/zstd v1.4.5 // indirect
@@ -59,20 +82,20 @@ require (
github.com/StackExchange/wmi v1.2.1 // indirect
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
- github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 // indirect
- github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 // indirect
- github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.1 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.5 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.4 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 // indirect
- github.com/aws/smithy-go v1.15.0 // indirect
+ github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.18 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.20 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.18 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.23.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.31.4 // indirect
+ github.com/aws/smithy-go v1.22.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.10.0 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
@@ -95,7 +118,6 @@ require (
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/ethereum/c-kzg-4844 v0.4.0 // indirect
- github.com/fjl/memsize v0.0.2 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gammazero/deque v0.2.1 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
@@ -108,9 +130,9 @@ require (
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
- github.com/golang/glog v1.0.0 // indirect
- github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
- github.com/golang/protobuf v1.5.3 // indirect
+ github.com/golang/glog v1.2.0 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/flatbuffers v1.12.1 // indirect
github.com/google/go-github/v62 v62.0.0
@@ -123,7 +145,6 @@ require (
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
- github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5 // indirect
github.com/juju/loggo v0.0.0-20180524022052-584905176618 // indirect
github.com/klauspost/compress v1.17.2 // indirect
@@ -160,13 +181,13 @@ require (
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yuin/gopher-lua v1.1.1 // indirect
- go.opencensus.io v0.22.5 // indirect
- golang.org/x/mod v0.14.0 // indirect
- golang.org/x/net v0.23.0 // indirect
+ go.opencensus.io v0.24.0 // indirect
+ golang.org/x/mod v0.17.0 // indirect
+ golang.org/x/net v0.26.0 // indirect
golang.org/x/oauth2 v0.22.0
- golang.org/x/sync v0.5.0
- golang.org/x/text v0.14.0 // indirect
- golang.org/x/time v0.3.0 // indirect
- google.golang.org/protobuf v1.33.0 // indirect
+ golang.org/x/sync v0.7.0
+ golang.org/x/text v0.16.0 // indirect
+ golang.org/x/time v0.5.0 // indirect
+ google.golang.org/protobuf v1.34.2 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)
diff --git a/go.sum b/go.sum
index 8529b2497d..f848c2aa2d 100644
--- a/go.sum
+++ b/go.sum
@@ -13,14 +13,26 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14=
+cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU=
+cloud.google.com/go/auth v0.6.1 h1:T0Zw1XM5c1GlpN2HYr2s+m3vr1p2wy+8VN+Z1FKxW38=
+cloud.google.com/go/auth v0.6.1/go.mod h1:eFHG7zDzbXHKmjJddFG/rBlcGp6t25SwRUiEQSlO4x4=
+cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
+cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
+cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0=
+cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE=
+cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU=
+cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -30,6 +42,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs=
+cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@@ -70,64 +84,53 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
-github.com/aws/aws-sdk-go-v2 v1.16.3/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU=
-github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA=
-github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM=
-github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 h1:SdK4Ppk5IzLs64ZMvr6MrSficMtjY2oS0WOORXTlxwU=
-github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM=
+github.com/aws/aws-sdk-go-v2 v1.31.0 h1:3V05LbxTSItI5kUqNwhJrrrY1BAXxXt0sN0l72QmG5U=
+github.com/aws/aws-sdk-go-v2 v1.31.0/go.mod h1:ztolYtaEUtdpf9Wftr31CJfLVjOnD/CVRkKOOYgF8hA=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5 h1:xDAuZTn4IMm8o1LnBZvmrL8JA1io4o3YWNXgohbf20g=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5/go.mod h1:wYSv6iDS621sEFLfKvpPE2ugjTuGlAG7iROg0hLOkfc=
github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw=
-github.com/aws/aws-sdk-go-v2/config v1.15.5/go.mod h1:ZijHHh0xd/A+ZY53az0qzC5tT46kt4JVCePf2NX9Lk4=
-github.com/aws/aws-sdk-go-v2/config v1.18.45 h1:Aka9bI7n8ysuwPeFdm77nfbyHCAKQ3z9ghB3S/38zes=
-github.com/aws/aws-sdk-go-v2/config v1.18.45/go.mod h1:ZwDUgFnQgsazQTnWfeLWk5GjeqTQTL8lMkoE1UXzxdE=
+github.com/aws/aws-sdk-go-v2/config v1.27.40 h1:sie4mPBGFOO+Z27+yHzvyN31G20h/bf2xb5mCbpLv2Q=
+github.com/aws/aws-sdk-go-v2/config v1.27.40/go.mod h1:4KW7Aa5tNo+0VHnuLnnE1vPHtwMurlNZNS65IdcewHA=
github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ=
-github.com/aws/aws-sdk-go-v2/credentials v1.12.0/go.mod h1:9YWk7VW+eyKsoIL6/CljkTrNVWBSK9pkqOPUuijid4A=
-github.com/aws/aws-sdk-go-v2/credentials v1.13.43 h1:LU8vo40zBlo3R7bAvBVy/ku4nxGEyZe9N8MqAeFTzF8=
-github.com/aws/aws-sdk-go-v2/credentials v1.13.43/go.mod h1:zWJBz1Yf1ZtX5NGax9ZdNjhhI4rgjfgsyk6vTY1yfVg=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.38 h1:iM90eRhCeZtlkzCNCG1JysOzJXGYf5rx80aD1lUgNDU=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.38/go.mod h1:TCVYPZeQuLaYNEkf/TVn6k5k/zdVZZ7xH9po548VNNg=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.4/go.mod h1:u/s5/Z+ohUQOPXl00m2yJVyioWDECsbpXTQlaqSlufc=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 h1:PIktER+hwIG286DqXyvVENjgLTAwGgoeriLDD5C+YlQ=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10 h1:JL7cY85hyjlgfA29MMyAlItX+JYIH9XsxgMBS7jtlqA=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10/go.mod h1:p+ul5bLZSDRRXCZ/vePvfmZBH9akozXBJA5oMshWa5U=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10/go.mod h1:F+EZtuIwjlv35kRJPyBGcsA4f7bnSoz15zOQ2lJq1Z4=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 h1:nFBQlGtkbPzp/NjZLuFxRqmT91rLJkgvsEQs68h962Y=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4/go.mod h1:8glyUqVIM4AmeenIsPo0oVh3+NUwnsQml2OFupfQW+0=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 h1:JRVhO25+r3ar2mKGP7E0LDl8K9/G36gjlqca5iQbaqc=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 h1:C/d03NAmh8C4BZXhuRNboF/DqhBkBCeDiJDcaqIT5pA=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14/go.mod h1:7I0Ju7p9mCIdlrfS+JCgqcYD0VXz/N4yozsox+0o078=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.27 h1:1oLpQSTuqbizOUEYdxAwH+Eveg+FOCOkg84Yijba6Kc=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.27/go.mod h1:afo0vF9P3pjy1ny+cb45lzBjtKeEb5t5MPRxeTXpujw=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18 h1:kYQ3H1u0ANr9KEKlGs/jTLrBFPo8P8NaH/w7A01NeeM=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18/go.mod h1:r506HmK5JDUh9+Mw4CfGJGSSoqIiLCndAuqXuhbv67Y=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18 h1:Z7IdFUONvTcvS7YuhtVxN99v2cCoHRXOS4mTr0B/pUc=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18/go.mod h1:DkKMmksZVVyat+Y+r1dEOgJEfUeA7UngIHWeKsi0yNc=
github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.3.11/go.mod h1:0MR+sS1b/yxsfAPvAESrw8NfwUoxMinDyw6EYR9BS2U=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 h1:hze8YsjSh8Wl1rYa1CJpRmXP21BvOBuc76YhW0HsuQ4=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.1 h1:C21IDZCm9Yu5xqjb3fKmxDoYvJXtw1DNlOmLZEIlY1M=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.1/go.mod h1:l/BbcfqDCT3hePawhy4ZRtewjtdkl6GWtd9/U+1penQ=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.18 h1:OWYvKL53l1rbsUmW7bQyJVsYU/Ii3bbAAQIIFNbM0Tk=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.18/go.mod h1:CUx0G1v3wG6l01tUB+j7Y8kclA8NSqK4ef0YG79a4cg=
github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1 h1:T4pFel53bkHjL2mMo+4DKE6r6AuoZnM0fg7k1/ratr4=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1/go.mod h1:GeUru+8VzrTXV/83XyMJ80KpH8xO89VPoUileyNQ+tc=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.5 h1:9LSZqt4v1JiehyZTrQnRFf2mY/awmyYNNY/b7zqtduU=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.5/go.mod h1:S8TVP66AAkMMdYYCNZGvrdEq9YRm+qLXjio4FqRnrEE=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 h1:QFASJGfT8wMXtuP3D5CRmMjARHv9ZmzFUMJznHDOY3w=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5/go.mod h1:QdZ3OmoIjSX+8D1OPAzPxDfjXASbBMDsz9qvtyIhtik=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.20 h1:rTWjG6AvWekO2B1LHeM3ktU7MqyX9rzWQ7hgzneZW7E=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.20/go.mod h1:RGW2DDpVc8hu6Y6yG8G5CHVmVOAn1oV8rNKOHRJyswg=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4/go.mod h1:uKkN7qmSIsNJVyMtxNQoCEYMvFEXbOg9fwCJPdfp2u8=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 h1:WWZA/I2K4ptBS1kg0kV1JbBtG/umed0vwHRrmcr9z7k=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37/go.mod h1:vBmDnwWXWxNPFRMmG2m/3MKOe+xEcMDo1tanpaWCcck=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.4 h1:RE/DlZLYrz1OOmq8F28IXHLksuuvlpzUbvJ+SESCZBI=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.4/go.mod h1:oudbsSdDtazNj47z1ut1n37re9hDsKpk2ZI3v7KSxq0=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 h1:LCQKnopq2t4oQS3VKivlYTzAHCTJZZoQICM9fny7KHY=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9/go.mod h1:iMYipLPXlWpBJ0KFX7QJHZ84rBydHBY8as2aQICTPWk=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 h1:Xbwbmk44URTiHNx6PNo0ujDE6ERlsCKJD3u1zfnzAPg=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20/go.mod h1:oAfOFzUB14ltPZj1rWwRc3d/6OgD76R8KlvU3EqM9Fg=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.18 h1:eb+tFOIl9ZsUe2259/BKPeniKuz4/02zZFH/i4Nf8Rg=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.18/go.mod h1:GVCC2IJNJTmdlyEsSmofEy7EfJncP7DNnXDzRjJ5Keg=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.64.1 h1:jjHf+M6vCp/WzbyFEroY4/Nx8dJac520A0EPwlYk0Do=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.64.1/go.mod h1:NLTqRLe3pUNu3nTEHI6XlHLKYmc8fbHUdMxAB6+s41Q=
github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk=
-github.com/aws/aws-sdk-go-v2/service/sso v1.11.4/go.mod h1:cPDwJwsP4Kff9mldCXAmddjJL6JGQqtA3Mzer2zyr88=
-github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 h1:JuPGc7IkOP4AaqcZSIcyqLpFSqBWK32rM9+a1g6u73k=
-github.com/aws/aws-sdk-go-v2/service/sso v1.15.2/go.mod h1:gsL4keucRCgW+xA85ALBpRFfdSLH4kHOVSnLMSuBECo=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 h1:HFiiRkf1SdaAmV3/BHOFZ9DjFynPHj8G/UIO1lQS+fk=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3/go.mod h1:a7bHA82fyUXOm+ZSWKU6PIoBxrjSprdLoM8xPYvzYVg=
+github.com/aws/aws-sdk-go-v2/service/sso v1.23.4 h1:ck/Y8XWNR1gHa4BFkwE3oSu7XDJGwl+8TI7E/RB2EcQ=
+github.com/aws/aws-sdk-go-v2/service/sso v1.23.4/go.mod h1:XRlMvmad0ZNL+75C5FYdMvbbLkd6qiqz6foR1nA1PXY=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.4 h1:4f2/JKYZHAZbQ7koBpZ012bKi32NHPY0m7TDuJgsbug=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.4/go.mod h1:FnvDM4sfa+isJ3kDXIzAB9GAwVSzFzSy97uZ3IsHo4E=
github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g=
-github.com/aws/aws-sdk-go-v2/service/sts v1.16.4/go.mod h1:lfSYenAXtavyX2A1LsViglqlG9eEFYxNryTZS5rn3QE=
-github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 h1:0BkLfgeDjfZnZ+MhB3ONb01u9pwFYTCZVhlsSSBvlbU=
-github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsPRzAKcVDrcmjjWiih2+HUUQ=
+github.com/aws/aws-sdk-go-v2/service/sts v1.31.4 h1:uK6dUUdJtqutK1XO/tmNaQMJiPLCJY/eAeOOmqQ6ygY=
+github.com/aws/aws-sdk-go-v2/service/sts v1.31.4/go.mod h1:yMWe0F+XG0DkRZK5ODZhG7BEFYhLXi2dqGsv6tX0cgI=
github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
-github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
-github.com/aws/smithy-go v1.15.0 h1:PS/durmlzvAFpQHDs4wi4sNNP9ExsqZh6IlfdHXgKK8=
-github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM=
+github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -233,8 +236,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
-github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
-github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
@@ -269,6 +272,11 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
@@ -303,12 +311,13 @@ github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzq
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
+github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
+github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@@ -332,8 +341,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
@@ -353,11 +362,10 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
-github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github/v62 v62.0.0 h1:/6mGCaRywZz9MuHyw9gD1CwsbmBX8GWsbFkwMmHdhl4=
@@ -368,8 +376,11 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=
+github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@@ -381,12 +392,18 @@ github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8q
github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0=
github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
+github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
+github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA=
+github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
@@ -443,9 +460,7 @@ github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0Gqw
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
-github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
-github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
@@ -670,14 +685,19 @@ github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobt
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
-github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
@@ -704,8 +724,6 @@ github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9
github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
-github.com/wasmerio/wasmer-go v1.0.4 h1:MnqHoOGfiQ8MMq2RF6wyCeebKOe84G88h5yv+vmxJgs=
-github.com/wasmerio/wasmer-go v1.0.4/go.mod h1:0gzVdSfg6pysA6QVp6iVRPTagC6Wq9pOE8J86WKb2Fk=
github.com/wealdtech/go-merkletree v1.0.0 h1:DsF1xMzj5rK3pSQM6mPv8jlyJyHXhFxpnA2bwEjMMBY=
github.com/wealdtech/go-merkletree v1.0.0/go.mod h1:cdil512d/8ZC7Kx3bfrDvGMQXB25NTKbsm0rFrmDax4=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -731,8 +749,20 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
+go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
+go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
+go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
+go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
+go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
+go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
+go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
+go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
golang.org/x/crypto v0.0.0-20180214000028-650f4a345ab4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -745,8 +775,8 @@ golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
-golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
+golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -783,8 +813,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
-golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
+golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -821,6 +851,7 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@@ -829,8 +860,8 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
-golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
+golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -853,8 +884,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
-golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -930,14 +961,14 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
-golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
+golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
-golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
+golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -949,14 +980,15 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
-golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
+golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1005,8 +1037,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM=
-golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1027,6 +1059,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.187.0 h1:Mxs7VATVC2v7CY+7Xwm4ndkX71hpElcvx0D1Ji/p1eo=
+google.golang.org/api v0.187.0/go.mod h1:KIHlTc4x7N7gKKuVsdmfBXN13yEEWXWFURWY6SBp2gk=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1065,6 +1099,12 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d h1:PksQg4dV6Sem3/HkBX+Ltq8T0ke0PKIRBNBatoDTVls=
+google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:s7iA721uChleev562UJO2OYB0PPT9CMFjV+Ce7VJH5M=
+google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 h1:MuYw1wJzT+ZkybKfaOXKp5hJiZDn2iHaXRw0mRYdHSc=
+google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4/go.mod h1:px9SlOOZBg1wM1zdnr8jEL4CNGUBZ+ZKYtNPApNQc4c=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@@ -1080,7 +1120,10 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
+google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1093,8 +1136,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
-google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/nitro-testnode b/nitro-testnode
index f328006579..72141dd495 160000
--- a/nitro-testnode
+++ b/nitro-testnode
@@ -1 +1 @@
-Subproject commit f328006579cbefe22c6c57de3d6b86397fde4438
+Subproject commit 72141dd495ad965aa2a23723ea3e755037903ad7
diff --git a/precompiles/ArbAddressTable.go b/precompiles/ArbAddressTable.go
index 05f2275fd7..102fd55c3b 100644
--- a/precompiles/ArbAddressTable.go
+++ b/precompiles/ArbAddressTable.go
@@ -33,7 +33,7 @@ func (con ArbAddressTable) Decompress(c ctx, evm mech, buf []uint8, offset huge)
return addr{}, nil, errors.New("invalid offset in ArbAddressTable.Decompress")
}
result, nbytes, err := c.State.AddressTable().Decompress(buf[ioffset:])
- return result, big.NewInt(int64(nbytes)), err
+ return result, new(big.Int).SetUint64(nbytes), err
}
// Lookup the index of an address in the table
@@ -45,7 +45,7 @@ func (con ArbAddressTable) Lookup(c ctx, evm mech, addr addr) (huge, error) {
if !exists {
return nil, errors.New("address does not exist in AddressTable")
}
- return big.NewInt(int64(result)), nil
+ return new(big.Int).SetUint64(result), nil
}
// LookupIndex for an address in the table by index
@@ -66,11 +66,11 @@ func (con ArbAddressTable) LookupIndex(c ctx, evm mech, index huge) (addr, error
// Register adds an account to the table, shrinking its compressed representation
func (con ArbAddressTable) Register(c ctx, evm mech, addr addr) (huge, error) {
slot, err := c.State.AddressTable().Register(addr)
- return big.NewInt(int64(slot)), err
+ return new(big.Int).SetUint64(slot), err
}
// Size gets the number of addresses in the table
func (con ArbAddressTable) Size(c ctx, evm mech) (huge, error) {
size, err := c.State.AddressTable().Size()
- return big.NewInt(int64(size)), err
+ return new(big.Int).SetUint64(size), err
}
diff --git a/precompiles/ArbOwner.go b/precompiles/ArbOwner.go
index 066fc0a4c4..8b87445e0e 100644
--- a/precompiles/ArbOwner.go
+++ b/precompiles/ArbOwner.go
@@ -69,6 +69,9 @@ func (con ArbOwner) SetL2BaseFee(c ctx, evm mech, priceInWei huge) error {
// SetMinimumL2BaseFee sets the minimum base fee needed for a transaction to succeed
func (con ArbOwner) SetMinimumL2BaseFee(c ctx, evm mech, priceInWei huge) error {
+ if c.txProcessor.MsgIsNonMutating() && priceInWei.Sign() == 0 {
+ return errors.New("minimum base fee must be nonzero")
+ }
return c.State.L2PricingState().SetMinBaseFeeWei(priceInWei)
}
diff --git a/precompiles/ArbRetryableTx.go b/precompiles/ArbRetryableTx.go
index d508d75752..93e8023603 100644
--- a/precompiles/ArbRetryableTx.go
+++ b/precompiles/ArbRetryableTx.go
@@ -149,7 +149,7 @@ func (con ArbRetryableTx) GetTimeout(c ctx, evm mech, ticketId bytes32) (huge, e
if err != nil {
return nil, err
}
- return big.NewInt(int64(timeout)), nil
+ return new(big.Int).SetUint64(timeout), nil
}
// Keepalive adds one lifetime period to the ticket's expiry
@@ -176,8 +176,9 @@ func (con ArbRetryableTx) Keepalive(c ctx, evm mech, ticketId bytes32) (huge, er
return big.NewInt(0), err
}
- err = con.LifetimeExtended(c, evm, ticketId, big.NewInt(int64(newTimeout)))
- return big.NewInt(int64(newTimeout)), err
+ bigNewTimeout := new(big.Int).SetUint64(newTimeout)
+ err = con.LifetimeExtended(c, evm, ticketId, bigNewTimeout)
+ return bigNewTimeout, err
}
// GetBeneficiary gets the beneficiary of the ticket
diff --git a/precompiles/ArbSys.go b/precompiles/ArbSys.go
index 13f56d3b8e..d55067a09c 100644
--- a/precompiles/ArbSys.go
+++ b/precompiles/ArbSys.go
@@ -162,7 +162,7 @@ func (con *ArbSys) SendTxToL1(c ctx, evm mech, value huge, destination addr, cal
}
}
- leafNum := big.NewInt(int64(size - 1))
+ leafNum := new(big.Int).SetUint64(size - 1)
var blockTime big.Int
blockTime.SetUint64(evm.Context.Time)
@@ -199,7 +199,7 @@ func (con ArbSys) SendMerkleTreeState(c ctx, evm mech) (huge, bytes32, []bytes32
for i, par := range rawPartials {
partials[i] = par
}
- return big.NewInt(int64(size)), rootHash, partials, nil
+ return new(big.Int).SetUint64(size), rootHash, partials, nil
}
// WithdrawEth send paid eth to the destination on L1
diff --git a/precompiles/ArbWasm.go b/precompiles/ArbWasm.go
index 9f42cacb5a..bc24c8a6e8 100644
--- a/precompiles/ArbWasm.go
+++ b/precompiles/ArbWasm.go
@@ -5,6 +5,8 @@ package precompiles
import (
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/vm"
+ gethparams "github.com/ethereum/go-ethereum/params"
"github.com/offchainlabs/nitro/arbos/programs"
"github.com/offchainlabs/nitro/arbos/util"
"github.com/offchainlabs/nitro/util/arbmath"
@@ -32,12 +34,13 @@ func (con ArbWasm) ActivateProgram(c ctx, evm mech, value huge, program addr) (u
debug := evm.ChainConfig().DebugMode()
runMode := c.txProcessor.RunMode()
programs := c.State.Programs()
+ arbosVersion := c.State.ArbOSVersion()
// charge a fixed cost up front to begin activation
if err := c.Burn(1659168); err != nil {
return 0, nil, err
}
- version, codeHash, moduleHash, dataFee, takeAllGas, err := programs.ActivateProgram(evm, program, runMode, debug)
+ version, codeHash, moduleHash, dataFee, takeAllGas, err := programs.ActivateProgram(evm, program, arbosVersion, runMode, debug)
if takeAllGas {
_ = c.BurnOut()
}
@@ -133,6 +136,9 @@ func (con ArbWasm) MinInitGas(c ctx, _ mech) (uint64, uint64, error) {
params, err := c.State.Programs().Params()
init := uint64(params.MinInitGas) * programs.MinInitGasUnits
cached := uint64(params.MinCachedInitGas) * programs.MinCachedGasUnits
+ if c.State.ArbOSVersion() < gethparams.ArbosVersion_StylusChargingFixes {
+ return 0, 0, vm.ErrExecutionReverted
+ }
return init, cached, err
}
diff --git a/precompiles/precompile.go b/precompiles/precompile.go
index 9a6d8885ad..9a356c5a8e 100644
--- a/precompiles/precompile.go
+++ b/precompiles/precompile.go
@@ -329,6 +329,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr
gascost := func(args []reflect.Value) []reflect.Value {
cost := params.LogGas
+ // #nosec G115
cost += params.LogTopicGas * uint64(1+len(topicInputs))
var dataValues []interface{}
@@ -712,6 +713,8 @@ func (p *Precompile) Call(
tracingInfo: util.NewTracingInfo(evm, caller, precompileAddress, util.TracingDuringEVM),
}
+ // len(input) must be at least 4 because of the check near the start of this function
+ // #nosec G115
argsCost := params.CopyGas * arbmath.WordsForBytes(uint64(len(input)-4))
if err := callerCtx.Burn(argsCost); err != nil {
// user cannot afford the argument data supplied
diff --git a/precompiles/precompile_test.go b/precompiles/precompile_test.go
index ecce77088a..18b33714aa 100644
--- a/precompiles/precompile_test.go
+++ b/precompiles/precompile_test.go
@@ -91,6 +91,7 @@ func TestEvents(t *testing.T) {
if log.Address != debugContractAddr {
Fail(t, "address mismatch:", log.Address, "vs", debugContractAddr)
}
+ // #nosec G115
if log.BlockNumber != uint64(blockNumber) {
Fail(t, "block number mismatch:", log.BlockNumber, "vs", blockNumber)
}
@@ -170,6 +171,7 @@ func TestEventCosts(t *testing.T) {
offsetBytes := 32
storeBytes := sizeBytes + offsetBytes + len(bytes)
storeBytes = storeBytes + 31 - (storeBytes+31)%32 // round up to a multiple of 32
+ // #nosec G115
storeCost := uint64(storeBytes) * params.LogDataGas
expected[i] = baseCost + addrCost + hashCost + storeCost
diff --git a/pubsub/consumer.go b/pubsub/consumer.go
index df3695606d..bd73e729e7 100644
--- a/pubsub/consumer.go
+++ b/pubsub/consumer.go
@@ -77,6 +77,10 @@ func (c *Consumer[Request, Response]) Start(ctx context.Context) {
)
}
+func (c *Consumer[Request, Response]) Id() string {
+ return c.id
+}
+
func (c *Consumer[Request, Response]) StopAndWait() {
c.StopWaiter.StopAndWait()
c.deleteHeartBeat(c.GetParentContext())
@@ -164,10 +168,12 @@ func (c *Consumer[Request, Response]) SetResult(ctx context.Context, messageID s
if err != nil {
return fmt.Errorf("marshaling result: %w", err)
}
+ log.Debug("consumer: setting result", "cid", c.id, "messageId", messageID)
acquired, err := c.client.SetNX(ctx, messageID, resp, c.cfg.ResponseEntryTimeout).Result()
if err != nil || !acquired {
return fmt.Errorf("setting result for message: %v, error: %w", messageID, err)
}
+ log.Debug("consumer: xack", "cid", c.id, "messageId", messageID)
if _, err := c.client.XAck(ctx, c.redisStream, c.redisGroup, messageID).Result(); err != nil {
return fmt.Errorf("acking message: %v, error: %w", messageID, err)
}
diff --git a/pubsub/producer.go b/pubsub/producer.go
index 2b1cdb5e3f..5eec3a4b52 100644
--- a/pubsub/producer.go
+++ b/pubsub/producer.go
@@ -205,35 +205,39 @@ func setMinIdInt(min *[2]uint64, id string) error {
// checkResponses checks iteratively whether response for the promise is ready.
func (p *Producer[Request, Response]) checkResponses(ctx context.Context) time.Duration {
minIdInt := [2]uint64{math.MaxUint64, math.MaxUint64}
+ log.Debug("redis producer: check responses starting")
p.promisesLock.Lock()
defer p.promisesLock.Unlock()
responded := 0
errored := 0
+ checked := 0
for id, promise := range p.promises {
if ctx.Err() != nil {
return 0
}
+ checked++
res, err := p.client.Get(ctx, id).Result()
if err != nil {
errSetId := setMinIdInt(&minIdInt, id)
if errSetId != nil {
- log.Error("error setting minId", "err", err)
+ log.Error("redis producer: error setting minId", "err", err)
return p.cfg.CheckResultInterval
}
if !errors.Is(err, redis.Nil) {
- log.Error("Error reading value in redis", "key", id, "error", err)
+ log.Error("redis producer: Error reading value in redis", "key", id, "error", err)
}
continue
}
var resp Response
if err := json.Unmarshal([]byte(res), &resp); err != nil {
promise.ProduceError(fmt.Errorf("error unmarshalling: %w", err))
- log.Error("Error unmarshaling", "value", res, "error", err)
+ log.Error("redis producer: Error unmarshaling", "value", res, "error", err)
errored++
} else {
promise.Produce(resp)
responded++
}
+ p.client.Del(ctx, id)
delete(p.promises, id)
}
var trimmed int64
@@ -245,7 +249,7 @@ func (p *Producer[Request, Response]) checkResponses(ctx context.Context) time.D
} else {
trimmed, trimErr = p.client.XTrimMaxLen(ctx, p.redisStream, 0).Result()
}
- log.Trace("trimming", "id", minId, "trimmed", trimmed, "responded", responded, "errored", errored, "trim-err", trimErr)
+ log.Debug("trimming", "id", minId, "trimmed", trimmed, "responded", responded, "errored", errored, "trim-err", trimErr, "checked", checked)
return p.cfg.CheckResultInterval
}
diff --git a/relay/relay_stress_test.go b/relay/relay_stress_test.go
index 9a8875a429..575a77ee6f 100644
--- a/relay/relay_stress_test.go
+++ b/relay/relay_stress_test.go
@@ -47,6 +47,7 @@ func (r *DummyUpStream) PopulateFeedBacklogByNumber(ctx context.Context, backlog
was := r.broadcaster.GetCachedMessageCount()
var seqNums []arbutil.MessageIndex
for i := was; i < was+backlogSize; i++ {
+ // #nosec G115
seqNums = append(seqNums, arbutil.MessageIndex(i))
}
@@ -160,7 +161,7 @@ func largeBacklogRelayTestImpl(t *testing.T, numClients, backlogSize, l2MsgSize
connected++
}
}
- if int32(connected) != int32(numClients) {
+ if connected != numClients {
t.Fail()
}
log.Info("number of clients connected", "expected", numClients, "got", connected)
diff --git a/scripts/build-brotli.sh b/scripts/build-brotli.sh
index 7160936baa..1a23a88ae0 100755
--- a/scripts/build-brotli.sh
+++ b/scripts/build-brotli.sh
@@ -2,7 +2,7 @@
set -e
-mydir=`dirname $0`
+mydir=$(dirname "$0")
cd "$mydir"
BUILD_WASM=false
@@ -35,7 +35,7 @@ usage(){
echo "all relative paths are relative to script location"
}
-while getopts "s:t:c:D:wldhf" option; do
+while getopts "n:s:t:c:D:wldhf" option; do
case $option in
h)
usage
@@ -62,6 +62,9 @@ while getopts "s:t:c:D:wldhf" option; do
s)
SOURCE_DIR="$OPTARG"
;;
+ *)
+ usage
+ ;;
esac
done
@@ -74,7 +77,7 @@ if [ ! -d "$TARGET_DIR" ]; then
mkdir -p "${TARGET_DIR}lib"
ln -s "lib" "${TARGET_DIR}lib64" # Fedora build
fi
-TARGET_DIR_ABS=`cd -P "$TARGET_DIR"; pwd`
+TARGET_DIR_ABS=$(cd -P "$TARGET_DIR"; pwd)
if $USE_DOCKER; then
@@ -94,9 +97,9 @@ cd "$SOURCE_DIR"
if $BUILD_WASM; then
mkdir -p buildfiles/build-wasm
mkdir -p buildfiles/install-wasm
- TEMP_INSTALL_DIR_ABS=`cd -P buildfiles/install-wasm; pwd`
+ TEMP_INSTALL_DIR_ABS=$(cd -P buildfiles/install-wasm; pwd)
cd buildfiles/build-wasm
- cmake ../../ -DCMAKE_C_COMPILER=emcc -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_FLAGS=-fPIC -DCMAKE_INSTALL_PREFIX="$TEMP_INSTALL_DIR_ABS" -DCMAKE_AR=`which emar` -DCMAKE_RANLIB=`which touch`
+ cmake ../../ -DCMAKE_C_COMPILER=emcc -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_FLAGS=-fPIC -DCMAKE_INSTALL_PREFIX="$TEMP_INSTALL_DIR_ABS" -DCMAKE_AR="$(which emar)" -DCMAKE_RANLIB="$(which touch)"
make -j
make install
cp -rv "$TEMP_INSTALL_DIR_ABS/lib" "$TARGET_DIR_ABS/lib-wasm"
diff --git a/scripts/convert-databases.bash b/scripts/convert-databases.bash
new file mode 100755
index 0000000000..baddcdcacd
--- /dev/null
+++ b/scripts/convert-databases.bash
@@ -0,0 +1,275 @@
+#!/usr/bin/env bash
+
+DEFAULT_DBCONV=/usr/local/bin/dbconv
+DEFAULT_SRC=/home/user/.arbitrum/arb1/nitro
+
+dbconv=$DEFAULT_DBCONV
+src=$DEFAULT_SRC
+dst=
+force=false
+skip_existing=false
+clean="failed"
+
+l2chaindata_status="not started"
+l2chaindata_ancient_status="not started"
+arbitrumdata_status="not started"
+wasm_status="not started"
+classicmsg_status="not started"
+
+checkMissingValue () {
+ if [[ $1 -eq 0 || $2 == -* ]]; then
+ echo "missing $3 argument value"
+ exit 1
+ fi
+}
+
+printStatus() {
+ echo "== Conversion status:"
+ echo " l2chaindata database: $l2chaindata_status"
+ echo " l2chaindata database freezer (ancient): $l2chaindata_ancient_status"
+ echo " arbitrumdata database: $arbitrumdata_status"
+ echo " wasm database: $wasm_status"
+ echo " classic-msg database: $classicmsg_status"
+}
+
+printUsage() {
+echo Usage: "$0" \[OPTIONS..\]
+ echo
+ echo OPTIONS:
+ echo "--dbconv dbconv binary path (default: \"$DEFAULT_DBCONV\")"
+ echo "--src directory containing source databases (default: \"$DEFAULT_SRC\")"
+ echo "--dst destination directory"
+ echo "--force remove destination directory if it exists"
+ echo "--skip-existing skip convertion of databases which directories already exist in the destination directory"
+ echo "--clean sets what should be removed in case of error, possible values:"
+ echo " \"failed\" - remove database which conversion failed (default)"
+ echo " \"none\" - remove nothing, leave unfinished and potentially corrupted databases"
+ echo " \"all\" - remove whole destination directory"
+}
+
+removeDir() {
+ cmd="rm -r \"$1\""
+ echo "$cmd"
+ eval "$cmd"
+ return $?
+}
+
+cleanup() {
+ case $clean in
+ all)
+ echo "== Removing destination directory"
+ removeDir "$dst"
+ ;;
+ failed)
+ echo "== Note: removing only failed destination directory"
+ dstdir=$(echo "$dst"/"$1" | tr -s /)
+ removeDir "$dstdir"
+ ;;
+ none)
+ echo "== Warning: not removing destination directories, the destination databases might be incomplete and/or corrupted!"
+ ;;
+ *)
+ # shouldn't happen
+ echo "Script error, invalid --clean flag value: $clean"
+ exit 1
+ ;;
+
+ esac
+}
+
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --dbconv)
+ shift
+ checkMissingValue $# "$1" "--dbconv"
+ dbconv=$1
+ shift
+ ;;
+ --src)
+ shift
+ checkMissingValue $# "$1" "--src"
+ src=$1
+ shift
+ ;;
+ --dst)
+ shift
+ checkMissingValue $# "$1" "--dst"
+ dst=$1
+ shift
+ ;;
+ --force)
+ force=true
+ shift
+ ;;
+ --skip-existing)
+ skip_existing=true
+ shift
+ ;;
+ --clean)
+ shift
+ checkMissingValue $# "$1" "--clean"
+ clean=$1
+ shift
+ ;;
+ --help)
+ printUsage
+ exit 0
+ ;;
+ *)
+ printUsage
+ exit 0
+ esac
+done
+
+if $force && $skip_existing; then
+ echo Error: Cannot use both --force and --skipexisting
+ printUsage
+ exit 1
+fi
+
+if [ "$clean" != "all" ] && [ "$clean" != "failed" ] && [ "$clean" != "none" ] ; then
+ echo Error: Invalid --clean value: "$clean"
+ printUsage
+ exit 1
+fi
+
+if ! [ -e "$dbconv" ]; then
+ echo Error: Invalid dbconv binary path: "$dbconv" does not exist
+ exit 1
+fi
+
+if [ -z "$dst" ]; then
+ echo "Error: Missing destination directory (--dst)"
+ printUsage
+ exit 1
+fi
+
+if ! [ -d "$src" ]; then
+ echo Error: Invalid source directory: \""$src"\" is missing
+ exit 1
+fi
+
+src=$(realpath "$src")
+
+if ! [ -d "$src"/l2chaindata ]; then
+ echo Error: Invalid source directory: \""$src"/l2chaindata\" is missing
+ exit 1
+fi
+
+if ! [ -d "$src"/l2chaindata/ancient ]; then
+ echo Error: Invalid source directory: \""$src"/l2chaindata/ancient\" is missing
+ exit 1
+fi
+
+if ! [ -d "$src"/arbitrumdata ]; then
+ echo Error: Invalid source directory: missing "$src/arbitrumdata" directory
+ exit 1
+fi
+
+if [ -e "$dst" ] && ! $skip_existing; then
+ if $force; then
+ echo "== Warning! Destination already exists, --force is set, removing all files under path: $dst"
+ if ! removeDir "$dst"; then
+ echo Error: failed to remove "$dst"
+ exit 1
+ fi
+ else
+ echo Error: invalid destination path: "$dst" already exists
+ exit 1
+ fi
+fi
+
+convert_result=
+convert () {
+ srcdir="$src"/$1
+ dstdir=$(echo "$dst"/"$1" | tr -s /)
+ if ! [ -e "$dstdir" ]; then
+ echo "== Converting $1 db"
+ cmd="$dbconv --src.db-engine=leveldb --src.data \"$srcdir\" --dst.db-engine=pebble --dst.data \"$dstdir\" --convert --compact"
+ echo "$cmd"
+ if ! eval "$cmd"; then
+ cleanup "$1"
+ convert_result="FAILED"
+ return 1
+ fi
+ convert_result="converted"
+ return 0
+ else
+ if $skip_existing; then
+ echo "== Note: $dstdir directory already exists, skipping conversion (--skip-existing flag is set)"
+ convert_result="skipped"
+ return 0
+ else
+ convert_result="FAILED ($dstdir already exists)"
+ return 1
+ fi
+ fi
+}
+
+convert "l2chaindata"
+res=$?
+l2chaindata_status=$convert_result
+if [ $res -ne 0 ]; then
+ printStatus
+ exit 1
+fi
+
+if ! [ -e "$dst"/l2chaindata/ancient ]; then
+ ancient_src=$(echo "$src"/l2chaindata/ancient | tr -s /)
+ ancient_dst=$(echo "$dst"/l2chaindata/ | tr -s /)
+ echo "== Copying l2chaindata ancients"
+ cmd="cp -r \"$ancient_src\" \"$ancient_dst\""
+ echo "$cmd"
+ if ! eval "$cmd"; then
+ l2chaindata_ancient_status="FAILED (failed to copy)"
+ cleanup "l2chaindata"
+ printStatus
+ exit 1
+ fi
+ l2chaindata_ancient_status="copied"
+else
+ if $skip_existing; then
+ echo "== Note: l2chaindata/ancient directory already exists, skipping copy (--skip-existing flag is set)"
+ l2chaindata_ancient_status="skipped"
+ else
+ # unreachable, we already had to remove root directory
+ echo script error, reached unreachable
+ exit 1
+ fi
+fi
+
+convert "arbitrumdata"
+res=$?
+arbitrumdata_status=$convert_result
+if [ $res -ne 0 ]; then
+ printStatus
+ exit 1
+fi
+
+if [ -e "$src"/wasm ]; then
+ convert "wasm"
+ res=$?
+ wasm_status=$convert_result
+ if [ $res -ne 0 ]; then
+ printStatus
+ exit 1
+ fi
+else
+ echo "== Note: Source directory does not contain wasm database."
+ wasm_status="not found in source directory"
+fi
+
+if [ -e "$src"/classic-msg ]; then
+ convert "classic-msg"
+ res=$?
+ classicmsg_status=$convert_result
+ if [ $res -ne 0 ]; then
+ printStatus
+ exit 1
+ fi
+else
+ echo "== Note: Source directory does not contain classic-msg database."
+ classicmsg_status="not found in source directory"
+fi
+
+printStatus
diff --git a/scripts/fuzz.bash b/scripts/fuzz.bash
index 6271b917b6..a73c208e88 100755
--- a/scripts/fuzz.bash
+++ b/scripts/fuzz.bash
@@ -2,12 +2,12 @@
set -e
-mydir=`dirname $0`
+mydir=$(dirname "$0")
cd "$mydir"
function printusage {
- echo Usage: $0 --build \[--binary-path PATH\]
- echo " " $0 \ \[--binary-path PATH\] \[--fuzzcache-path PATH\] \[--nitro-path PATH\] \[--duration DURATION\]
+ echo Usage: "$0" --build \[--binary-path PATH\]
+ echo " " "$0" \ \[--binary-path PATH\] \[--fuzzcache-path PATH\] \[--nitro-path PATH\] \[--duration DURATION\]
echo
echo fuzzer names:
echo " " FuzzPrecompiles
@@ -22,7 +22,6 @@ if [[ $# -eq 0 ]]; then
exit
fi
-fuzz_executable=../target/bin/system_test.fuzz
binpath=../target/bin/
fuzzcachepath=../target/var/fuzz-cache
nitropath=../
@@ -72,7 +71,7 @@ while [[ $# -gt 0 ]]; do
shift
;;
FuzzPrecompiles | FuzzStateTransition)
- if [[ ! -z "$test_name" ]]; then
+ if [[ -n "$test_name" ]]; then
echo can only run one fuzzer at a time
exit 1
fi
@@ -81,7 +80,7 @@ while [[ $# -gt 0 ]]; do
shift
;;
FuzzInboxMultiplexer)
- if [[ ! -z "$test_name" ]]; then
+ if [[ -n "$test_name" ]]; then
echo can only run one fuzzer at a time
exit 1
fi
@@ -102,17 +101,17 @@ fi
if $run_build; then
for build_group in system_tests arbstate; do
- go test -c ${nitropath}/${build_group} -fuzz Fuzz -o "$binpath"/${build_group}.fuzz
+ go test -c "${nitropath}"/${build_group} -fuzz Fuzz -o "$binpath"/${build_group}.fuzz
done
fi
-if [[ ! -z $test_group ]]; then
- timeout "$((60 * duration))" "$binpath"/${test_group}.fuzz -test.run "^$" -test.fuzzcachedir "$fuzzcachepath" -test.fuzz $test_name || exit_status=$?
+if [[ -n $test_group ]]; then
+ timeout "$((60 * duration))" "$binpath"/${test_group}.fuzz -test.run "^$" -test.fuzzcachedir "$fuzzcachepath" -test.fuzz "$test_name" || exit_status=$?
fi
-if [ -n "$exit_status" ] && [ $exit_status -ne 0 ] && [ $exit_status -ne 124 ]; then
+if [ -n "$exit_status" ] && [ "$exit_status" -ne 0 ] && [ "$exit_status" -ne 124 ]; then
echo "Fuzzing failed."
- exit $exit_status
+ exit "$exit_status"
fi
echo "Fuzzing succeeded."
diff --git a/scripts/startup-testnode.bash b/scripts/startup-testnode.bash
index 701e7ff59a..5313a9ec5d 100755
--- a/scripts/startup-testnode.bash
+++ b/scripts/startup-testnode.bash
@@ -5,9 +5,9 @@
timeout 60 ./nitro-testnode/test-node.bash --init --dev || exit_status=$?
-if [ -n "$exit_status" ] && [ $exit_status -ne 0 ] && [ $exit_status -ne 124 ]; then
+if [ -n "$exit_status" ] && [ "$exit_status" -ne 0 ] && [ "$exit_status" -ne 124 ]; then
echo "Startup failed."
- exit $exit_status
+ exit "$exit_status"
fi
echo "Startup succeeded."
diff --git a/staker/block_challenge_backend.go b/staker/block_challenge_backend.go
index 42351789ba..0dd89865bd 100644
--- a/staker/block_challenge_backend.go
+++ b/staker/block_challenge_backend.go
@@ -219,6 +219,6 @@ func (b *BlockChallengeBackend) IssueExecChallenge(
},
machineStatuses,
globalStateHashes,
- big.NewInt(int64(numsteps)),
+ new(big.Int).SetUint64(numsteps),
)
}
diff --git a/staker/block_validator.go b/staker/block_validator.go
index df465cc31f..5a1f123693 100644
--- a/staker/block_validator.go
+++ b/staker/block_validator.go
@@ -17,6 +17,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
@@ -28,6 +29,8 @@ import (
"github.com/offchainlabs/nitro/util/stopwaiter"
"github.com/offchainlabs/nitro/validator"
"github.com/offchainlabs/nitro/validator/client/redis"
+ "github.com/offchainlabs/nitro/validator/inputs"
+ "github.com/offchainlabs/nitro/validator/server_api"
"github.com/spf13/pflag"
)
@@ -55,12 +58,12 @@ type BlockValidator struct {
chainCaughtUp bool
// can only be accessed from creation thread or if holding reorg-write
- nextCreateBatch []byte
- nextCreateBatchBlockHash common.Hash
- nextCreateBatchMsgCount arbutil.MessageIndex
- nextCreateBatchReread bool
- nextCreateStartGS validator.GoGlobalState
- nextCreatePrevDelayed uint64
+ nextCreateBatch *FullBatchInfo
+ nextCreateBatchReread bool
+ prevBatchCache map[uint64][]byte
+
+ nextCreateStartGS validator.GoGlobalState
+ nextCreatePrevDelayed uint64
// can only be accessed from from validation thread or if holding reorg-write
lastValidGS validator.GoGlobalState
@@ -93,6 +96,9 @@ type BlockValidator struct {
// for testing only
testingProgressMadeChan chan struct{}
+ // For troubleshooting failed validations
+ validationInputsWriter *inputs.Writer
+
fatalErr chan<- error
MemoryFreeLimitChecker resourcemanager.LimitChecker
@@ -105,13 +111,18 @@ type BlockValidatorConfig struct {
ValidationServerConfigs []rpcclient.ClientConfig `koanf:"validation-server-configs"`
ValidationPoll time.Duration `koanf:"validation-poll" reload:"hot"`
PrerecordedBlocks uint64 `koanf:"prerecorded-blocks" reload:"hot"`
+ RecordingIterLimit uint64 `koanf:"recording-iter-limit"`
ForwardBlocks uint64 `koanf:"forward-blocks" reload:"hot"`
+ BatchCacheLimit uint32 `koanf:"batch-cache-limit"`
CurrentModuleRoot string `koanf:"current-module-root"` // TODO(magic) requires reinitialization on hot reload
PendingUpgradeModuleRoot string `koanf:"pending-upgrade-module-root"` // TODO(magic) requires StatelessBlockValidator recreation on hot reload
FailureIsFatal bool `koanf:"failure-is-fatal" reload:"hot"`
Dangerous BlockValidatorDangerousConfig `koanf:"dangerous"`
MemoryFreeLimit string `koanf:"memory-free-limit" reload:"hot"`
ValidationServerConfigsList string `koanf:"validation-server-configs-list"`
+ // The directory to which the BlockValidator will write the
+ // block_inputs_.json files when WriteToFile() is called.
+ BlockInputsFilePath string `koanf:"block-inputs-file-path"`
memoryFreeLimit int
}
@@ -126,6 +137,9 @@ func (c *BlockValidatorConfig) Validate() error {
}
c.memoryFreeLimit = limit
}
+ if err := c.RedisValidationClientConfig.Validate(); err != nil {
+ return fmt.Errorf("failed to validate redis validation client config: %w", err)
+ }
streamsEnabled := c.RedisValidationClientConfig.Enabled()
if len(c.ValidationServerConfigs) == 0 {
c.ValidationServerConfigs = []rpcclient.ClientConfig{c.ValidationServer}
@@ -167,13 +181,16 @@ func BlockValidatorConfigAddOptions(prefix string, f *pflag.FlagSet) {
redis.ValidationClientConfigAddOptions(prefix+".redis-validation-client-config", f)
f.String(prefix+".validation-server-configs-list", DefaultBlockValidatorConfig.ValidationServerConfigsList, "array of execution rpc configs given as a json string. time duration should be supplied in number indicating nanoseconds")
f.Duration(prefix+".validation-poll", DefaultBlockValidatorConfig.ValidationPoll, "poll time to check validations")
- f.Uint64(prefix+".forward-blocks", DefaultBlockValidatorConfig.ForwardBlocks, "prepare entries for up to that many blocks ahead of validation (small footprint)")
+ f.Uint64(prefix+".forward-blocks", DefaultBlockValidatorConfig.ForwardBlocks, "prepare entries for up to that many blocks ahead of validation (stores batch-copy per block)")
f.Uint64(prefix+".prerecorded-blocks", DefaultBlockValidatorConfig.PrerecordedBlocks, "record that many blocks ahead of validation (larger footprint)")
+ f.Uint32(prefix+".batch-cache-limit", DefaultBlockValidatorConfig.BatchCacheLimit, "limit number of old batches to keep in block-validator")
f.String(prefix+".current-module-root", DefaultBlockValidatorConfig.CurrentModuleRoot, "current wasm module root ('current' read from chain, 'latest' from machines/latest dir, or provide hash)")
+ f.Uint64(prefix+".recording-iter-limit", DefaultBlockValidatorConfig.RecordingIterLimit, "limit on block recordings sent per iteration")
f.String(prefix+".pending-upgrade-module-root", DefaultBlockValidatorConfig.PendingUpgradeModuleRoot, "pending upgrade wasm module root to additionally validate (hash, 'latest' or empty)")
f.Bool(prefix+".failure-is-fatal", DefaultBlockValidatorConfig.FailureIsFatal, "failing a validation is treated as a fatal error")
BlockValidatorDangerousConfigAddOptions(prefix+".dangerous", f)
f.String(prefix+".memory-free-limit", DefaultBlockValidatorConfig.MemoryFreeLimit, "minimum free-memory limit after reaching which the blockvalidator pauses validation. Enabled by default as 1GB, to disable provide empty string")
+ f.String(prefix+".block-inputs-file-path", DefaultBlockValidatorConfig.BlockInputsFilePath, "directory to write block validation inputs files")
}
func BlockValidatorDangerousConfigAddOptions(prefix string, f *pflag.FlagSet) {
@@ -186,13 +203,16 @@ var DefaultBlockValidatorConfig = BlockValidatorConfig{
ValidationServer: rpcclient.DefaultClientConfig,
RedisValidationClientConfig: redis.DefaultValidationClientConfig,
ValidationPoll: time.Second,
- ForwardBlocks: 1024,
+ ForwardBlocks: 128,
PrerecordedBlocks: uint64(2 * runtime.NumCPU()),
+ BatchCacheLimit: 20,
CurrentModuleRoot: "current",
PendingUpgradeModuleRoot: "latest",
FailureIsFatal: true,
Dangerous: DefaultBlockValidatorDangerousConfig,
+ BlockInputsFilePath: "./target/validation_inputs",
MemoryFreeLimit: "default",
+ RecordingIterLimit: 20,
}
var TestBlockValidatorConfig = BlockValidatorConfig{
@@ -202,11 +222,14 @@ var TestBlockValidatorConfig = BlockValidatorConfig{
RedisValidationClientConfig: redis.TestValidationClientConfig,
ValidationPoll: 100 * time.Millisecond,
ForwardBlocks: 128,
+ BatchCacheLimit: 20,
PrerecordedBlocks: uint64(2 * runtime.NumCPU()),
+ RecordingIterLimit: 20,
CurrentModuleRoot: "latest",
PendingUpgradeModuleRoot: "latest",
FailureIsFatal: true,
Dangerous: DefaultBlockValidatorDangerousConfig,
+ BlockInputsFilePath: "./target/validation_inputs",
MemoryFreeLimit: "default",
}
@@ -263,7 +286,15 @@ func NewBlockValidator(
progressValidationsChan: make(chan struct{}, 1),
config: config,
fatalErr: fatalErr,
+ prevBatchCache: make(map[uint64][]byte),
+ }
+ valInputsWriter, err := inputs.NewWriter(
+ inputs.WithBaseDir(ret.stack.InstanceDir()),
+ inputs.WithSlug("BlockValidator"))
+ if err != nil {
+ return nil, err
}
+ ret.validationInputsWriter = valInputsWriter
if !config().Dangerous.ResetBlockValidation {
validated, err := ret.ReadLastValidatedInfo()
if err != nil {
@@ -311,6 +342,7 @@ func NewBlockValidator(
func atomicStorePos(addr *atomic.Uint64, val arbutil.MessageIndex, metr metrics.Gauge) {
addr.Store(uint64(val))
+ // #nosec G115
metr.Update(int64(val))
}
@@ -494,18 +526,16 @@ func (v *BlockValidator) sendRecord(s *validationStatus) error {
}
//nolint:gosec
-func (v *BlockValidator) writeToFile(validationEntry *validationEntry, moduleRoot common.Hash) error {
- input, err := validationEntry.ToInput([]string{"wavm"})
+func (v *BlockValidator) writeToFile(validationEntry *validationEntry) error {
+ input, err := validationEntry.ToInput([]ethdb.WasmTarget{rawdb.TargetWavm})
if err != nil {
return err
}
- for _, spawner := range v.execSpawners {
- if validator.SpawnerSupportsModule(spawner, moduleRoot) {
- _, err = spawner.WriteToFile(input, validationEntry.End, moduleRoot).Await(v.GetContext())
- return err
- }
+ inputJson := server_api.ValidationInputToJson(input)
+ if err := v.validationInputsWriter.Write(inputJson); err != nil {
+ return err
}
- return errors.New("did not find exec spawner for wasmModuleRoot")
+ return nil
}
func (v *BlockValidator) SetCurrentWasmModuleRoot(hash common.Hash) error {
@@ -562,32 +592,63 @@ func (v *BlockValidator) createNextValidationEntry(ctx context.Context) (bool, e
}
if v.nextCreateStartGS.PosInBatch == 0 || v.nextCreateBatchReread {
// new batch
- found, batch, batchBlockHash, count, err := v.readBatch(ctx, v.nextCreateStartGS.Batch)
+ found, fullBatchInfo, err := v.readFullBatch(ctx, v.nextCreateStartGS.Batch)
if !found {
return false, err
}
- v.nextCreateBatch = batch
- v.nextCreateBatchBlockHash = batchBlockHash
- v.nextCreateBatchMsgCount = count
- validatorMsgCountCurrentBatch.Update(int64(count))
+ if v.nextCreateBatch != nil {
+ v.prevBatchCache[v.nextCreateBatch.Number] = v.nextCreateBatch.PostedData
+ }
+ v.nextCreateBatch = fullBatchInfo
+ // #nosec G115
+ validatorMsgCountCurrentBatch.Update(int64(fullBatchInfo.MsgCount))
+ batchCacheLimit := v.config().BatchCacheLimit
+ if len(v.prevBatchCache) > int(batchCacheLimit) {
+ for num := range v.prevBatchCache {
+ if num+uint64(batchCacheLimit) < v.nextCreateStartGS.Batch {
+ delete(v.prevBatchCache, num)
+ }
+ }
+ }
v.nextCreateBatchReread = false
}
endGS := validator.GoGlobalState{
BlockHash: endRes.BlockHash,
SendRoot: endRes.SendRoot,
}
- if pos+1 < v.nextCreateBatchMsgCount {
+ if pos+1 < v.nextCreateBatch.MsgCount {
endGS.Batch = v.nextCreateStartGS.Batch
endGS.PosInBatch = v.nextCreateStartGS.PosInBatch + 1
- } else if pos+1 == v.nextCreateBatchMsgCount {
+ } else if pos+1 == v.nextCreateBatch.MsgCount {
endGS.Batch = v.nextCreateStartGS.Batch + 1
endGS.PosInBatch = 0
} else {
- return false, fmt.Errorf("illegal batch msg count %d pos %d batch %d", v.nextCreateBatchMsgCount, pos, endGS.Batch)
+ return false, fmt.Errorf("illegal batch msg count %d pos %d batch %d", v.nextCreateBatch.MsgCount, pos, endGS.Batch)
}
chainConfig := v.streamer.ChainConfig()
+ prevBatchNums, err := msg.Message.PastBatchesRequired()
+ if err != nil {
+ return false, err
+ }
+ prevBatches := make([]validator.BatchInfo, 0, len(prevBatchNums))
+ // prevBatchNums are only used for batch reports, each is only used once
+ for _, batchNum := range prevBatchNums {
+ data, found := v.prevBatchCache[batchNum]
+ if found {
+ delete(v.prevBatchCache, batchNum)
+ } else {
+ data, err = v.readPostedBatch(ctx, batchNum)
+ if err != nil {
+ return false, err
+ }
+ }
+ prevBatches = append(prevBatches, validator.BatchInfo{
+ Number: batchNum,
+ Data: data,
+ })
+ }
entry, err := newValidationEntry(
- pos, v.nextCreateStartGS, endGS, msg, v.nextCreateBatch, v.nextCreateBatchBlockHash, v.nextCreatePrevDelayed, chainConfig,
+ pos, v.nextCreateStartGS, endGS, msg, v.nextCreateBatch, prevBatches, v.nextCreatePrevDelayed, chainConfig,
)
if err != nil {
return false, err
@@ -646,6 +707,10 @@ func (v *BlockValidator) sendNextRecordRequests(ctx context.Context) (bool, erro
if recordUntil < pos {
return false, nil
}
+ recordUntilLimit := pos + arbutil.MessageIndex(v.config().RecordingIterLimit)
+ if recordUntil > recordUntilLimit {
+ recordUntil = recordUntilLimit
+ }
log.Trace("preparing to record", "pos", pos, "until", recordUntil)
// prepare could take a long time so we do it without a lock
err := v.recorder.PrepareForRecord(ctx, pos, recordUntil)
@@ -719,6 +784,7 @@ func (v *BlockValidator) iterativeValidationPrint(ctx context.Context) time.Dura
if err != nil {
printedCount = -1
} else {
+ // #nosec G115
printedCount = int64(batchMsgs) + int64(validated.GlobalState.PosInBatch)
}
log.Info("validated execution", "messageCount", printedCount, "globalstate", validated.GlobalState, "WasmRoots", validated.WasmRoots)
@@ -773,7 +839,7 @@ validationsLoop:
runEnd, err := run.Current()
if err == nil && runEnd != validationStatus.Entry.End {
err = fmt.Errorf("validation failed: expected %v got %v", validationStatus.Entry.End, runEnd)
- writeErr := v.writeToFile(validationStatus.Entry, run.WasmModuleRoot())
+ writeErr := v.writeToFile(validationStatus.Entry)
if writeErr != nil {
log.Warn("failed to write debug results file", "err", writeErr)
}
@@ -982,14 +1048,19 @@ func (v *BlockValidator) UpdateLatestStaked(count arbutil.MessageIndex, globalSt
v.nextCreateStartGS = globalState
v.nextCreatePrevDelayed = msg.DelayedMessagesRead
v.nextCreateBatchReread = true
+ if v.nextCreateBatch != nil {
+ v.prevBatchCache[v.nextCreateBatch.Number] = v.nextCreateBatch.PostedData
+ }
v.createdA.Store(countUint64)
}
// under the reorg mutex we don't need atomic access
if v.recordSentA.Load() < countUint64 {
v.recordSentA.Store(countUint64)
}
+ // #nosec G115
v.validatedA.Store(countUint64)
v.valLoopPos = count
+ // #nosec G115
validatorMsgCountValidatedGauge.Update(int64(countUint64))
err = v.writeLastValidated(globalState, nil) // we don't know which wasm roots were validated
if err != nil {
@@ -1006,6 +1077,7 @@ func (v *BlockValidator) ReorgToBatchCount(count uint64) {
defer v.reorgMutex.Unlock()
if v.nextCreateStartGS.Batch >= count {
v.nextCreateBatchReread = true
+ v.prevBatchCache = make(map[uint64][]byte)
}
}
@@ -1046,6 +1118,7 @@ func (v *BlockValidator) Reorg(ctx context.Context, count arbutil.MessageIndex)
v.nextCreateStartGS = buildGlobalState(*res, endPosition)
v.nextCreatePrevDelayed = msg.DelayedMessagesRead
v.nextCreateBatchReread = true
+ v.prevBatchCache = make(map[uint64][]byte)
countUint64 := uint64(count)
v.createdA.Store(countUint64)
// under the reorg mutex we don't need atomic access
@@ -1054,6 +1127,7 @@ func (v *BlockValidator) Reorg(ctx context.Context, count arbutil.MessageIndex)
}
if v.validatedA.Load() > countUint64 {
v.validatedA.Store(countUint64)
+ // #nosec G115
validatorMsgCountValidatedGauge.Update(int64(countUint64))
err := v.writeLastValidated(v.nextCreateStartGS, nil) // we don't know which wasm roots were validated
if err != nil {
@@ -1245,6 +1319,7 @@ func (v *BlockValidator) checkValidatedGSCaughtUp() (bool, error) {
atomicStorePos(&v.createdA, count, validatorMsgCountCreatedGauge)
atomicStorePos(&v.recordSentA, count, validatorMsgCountRecordSentGauge)
atomicStorePos(&v.validatedA, count, validatorMsgCountValidatedGauge)
+ // #nosec G115
validatorMsgCountValidatedGauge.Update(int64(count))
v.chainCaughtUp = true
return true, nil
diff --git a/staker/challenge-cache/cache.go b/staker/challenge-cache/cache.go
index ed4fad6450..5dca2764e8 100644
--- a/staker/challenge-cache/cache.go
+++ b/staker/challenge-cache/cache.go
@@ -187,12 +187,12 @@ func (c *Cache) Prune(ctx context.Context, messageNumber uint64) error {
if info.IsDir() {
matches := pattern.FindStringSubmatch(info.Name())
if len(matches) > 1 {
- dirNameMessageNum, err := strconv.Atoi(matches[1])
+ dirNameMessageNum, err := strconv.ParseUint(matches[1], 10, 64)
if err != nil {
return err
}
// Collect the directory path if the message number is <= the specified value.
- if dirNameMessageNum <= int(messageNumber) {
+ if dirNameMessageNum <= messageNumber {
pathsToDelete = append(pathsToDelete, path)
}
}
diff --git a/staker/challenge-cache/cache_test.go b/staker/challenge-cache/cache_test.go
index af0a058f78..40be627b7a 100644
--- a/staker/challenge-cache/cache_test.go
+++ b/staker/challenge-cache/cache_test.go
@@ -166,8 +166,9 @@ func TestPrune(t *testing.T) {
}
key = &Key{
WavmModuleRoot: root,
- MessageHeight: uint64(i),
- StepHeights: []uint64{0},
+ // #nosec G115
+ MessageHeight: uint64(i),
+ StepHeights: []uint64{0},
}
if err = cache.Put(key, hashes); err != nil {
t.Fatal(err)
@@ -182,8 +183,9 @@ func TestPrune(t *testing.T) {
for i := 0; i <= 5; i++ {
key = &Key{
WavmModuleRoot: root,
- MessageHeight: uint64(i),
- StepHeights: []uint64{0},
+ // #nosec G115
+ MessageHeight: uint64(i),
+ StepHeights: []uint64{0},
}
if _, err = cache.Get(key, 3); !errors.Is(err, ErrNotFoundInCache) {
t.Error(err)
@@ -193,8 +195,9 @@ func TestPrune(t *testing.T) {
for i := 6; i < totalMessages; i++ {
key = &Key{
WavmModuleRoot: root,
- MessageHeight: uint64(i),
- StepHeights: []uint64{0},
+ // #nosec G115
+ MessageHeight: uint64(i),
+ StepHeights: []uint64{0},
}
items, err := cache.Get(key, 3)
if err != nil {
diff --git a/staker/challenge_manager.go b/staker/challenge_manager.go
index 80cafccced..27cb92a5c7 100644
--- a/staker/challenge_manager.go
+++ b/staker/challenge_manager.go
@@ -14,7 +14,9 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/offchainlabs/nitro/arbutil"
@@ -293,7 +295,7 @@ func (m *ChallengeManager) bisect(ctx context.Context, backend ChallengeBackend,
if newChallengeLength < bisectionDegree {
bisectionDegree = newChallengeLength
}
- newSegments := make([][32]byte, int(bisectionDegree+1))
+ newSegments := make([][32]byte, bisectionDegree+1)
position := startSegmentPosition
normalSegmentLength := newChallengeLength / bisectionDegree
for i := range newSegments {
@@ -467,7 +469,7 @@ func (m *ChallengeManager) createExecutionBackend(ctx context.Context, step uint
if err != nil {
return fmt.Errorf("error creating validation entry for challenge %v msg %v for execution challenge: %w", m.challengeIndex, initialCount, err)
}
- input, err := entry.ToInput([]string{"wavm"})
+ input, err := entry.ToInput([]ethdb.WasmTarget{rawdb.TargetWavm})
if err != nil {
return fmt.Errorf("error getting validation entry input of challenge %v msg %v: %w", m.challengeIndex, initialCount, err)
}
@@ -564,6 +566,7 @@ func (m *ChallengeManager) Act(ctx context.Context) (*types.Transaction, error)
nextMovePos,
)
}
+ // #nosec G115
err = m.createExecutionBackend(ctx, uint64(nextMovePos))
if err != nil {
return nil, fmt.Errorf("error creating execution backend: %w", err)
diff --git a/staker/challenge_test.go b/staker/challenge_test.go
index 4534b04a25..33f1644c63 100644
--- a/staker/challenge_test.go
+++ b/staker/challenge_test.go
@@ -77,7 +77,7 @@ func CreateChallenge(
resultReceiverAddr,
maxInboxMessage,
[2][32]byte{startHashBytes, endHashBytes},
- big.NewInt(int64(endMachineSteps)),
+ new(big.Int).SetUint64(endMachineSteps),
asserter,
challenger,
big.NewInt(100),
diff --git a/staker/fast_confirm.go b/staker/fast_confirm.go
index 88f457f528..5dc7f01205 100644
--- a/staker/fast_confirm.go
+++ b/staker/fast_confirm.go
@@ -121,10 +121,12 @@ func (f *FastConfirmSafe) tryFastConfirmation(ctx context.Context, blockHash com
return err
}
if alreadyApproved.Cmp(common.Big1) == 0 {
+ log.Info("Already approved Safe tx hash for fast confirmation, checking if we can execute the Safe tx", "safeHash", safeTxHash, "nodeHash", nodeHash)
_, err = f.checkApprovedHashAndExecTransaction(ctx, fastConfirmCallData, safeTxHash)
return err
}
+ log.Info("Approving Safe tx hash to fast confirm", "safeHash", safeTxHash, "nodeHash", nodeHash)
auth, err := f.builder.Auth(ctx)
if err != nil {
return err
@@ -231,6 +233,7 @@ func (f *FastConfirmSafe) checkApprovedHashAndExecTransaction(ctx context.Contex
if err != nil {
return false, err
}
+ log.Info("Executing Safe tx to fast confirm", "safeHash", safeTxHash)
_, err = f.safe.ExecTransaction(
auth,
f.wallet.RollupAddress(),
@@ -249,5 +252,6 @@ func (f *FastConfirmSafe) checkApprovedHashAndExecTransaction(ctx context.Contex
}
return true, nil
}
+ log.Info("Not enough Safe tx approvals yet to fast confirm", "safeHash", safeTxHash)
return false, nil
}
diff --git a/staker/l1_validator.go b/staker/l1_validator.go
index dd9673ee0b..5b0c211324 100644
--- a/staker/l1_validator.go
+++ b/staker/l1_validator.go
@@ -19,6 +19,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/solgen/go/rollupgen"
@@ -45,7 +46,7 @@ type L1Validator struct {
rollup *RollupWatcher
rollupAddress common.Address
validatorUtils *rollupgen.ValidatorUtils
- client arbutil.L1Interface
+ client *ethclient.Client
builder *txbuilder.Builder
wallet ValidatorWalletInterface
callOpts bind.CallOpts
@@ -57,7 +58,7 @@ type L1Validator struct {
}
func NewL1Validator(
- client arbutil.L1Interface,
+ client *ethclient.Client,
wallet ValidatorWalletInterface,
validatorUtilsAddress common.Address,
callOpts bind.CallOpts,
@@ -247,6 +248,7 @@ func (v *L1Validator) generateNodeAction(
startStateProposedParentChain, err,
)
}
+ // #nosec G115
startStateProposedTime := time.Unix(int64(startStateProposedHeader.Time), 0)
v.txStreamer.PauseReorgs()
@@ -375,6 +377,7 @@ func (v *L1Validator) generateNodeAction(
return nil, false, fmt.Errorf("error getting rollup minimum assertion period: %w", err)
}
+ // #nosec G115
timeSinceProposed := big.NewInt(int64(l1BlockNumber) - int64(startStateProposedL1))
if timeSinceProposed.Cmp(minAssertionPeriod) < 0 {
// Too soon to assert
diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go
index b35bebd1c6..4d7db52322 100644
--- a/staker/rollup_watcher.go
+++ b/staker/rollup_watcher.go
@@ -4,16 +4,19 @@
package staker
import (
+ "bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"math/big"
+ "strings"
"sync/atomic"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rpc"
"github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/solgen/go/rollupgen"
"github.com/offchainlabs/nitro/util/headerreader"
@@ -48,12 +51,19 @@ type RollupWatcher struct {
*rollupgen.RollupUserLogic
address common.Address
fromBlock *big.Int
- client arbutil.L1Interface
+ client RollupWatcherL1Interface
baseCallOpts bind.CallOpts
unSupportedL3Method atomic.Bool
+ supportedL3Method atomic.Bool
}
-func NewRollupWatcher(address common.Address, client arbutil.L1Interface, callOpts bind.CallOpts) (*RollupWatcher, error) {
+type RollupWatcherL1Interface interface {
+ bind.ContractBackend
+ HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
+ FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error)
+}
+
+func NewRollupWatcher(address common.Address, client RollupWatcherL1Interface, callOpts bind.CallOpts) (*RollupWatcher, error) {
con, err := rollupgen.NewRollupUserLogic(address, client)
if err != nil {
return nil, err
@@ -73,15 +83,41 @@ func (r *RollupWatcher) getCallOpts(ctx context.Context) *bind.CallOpts {
return &opts
}
+const noNodeErr string = "NO_NODE"
+
+func looksLikeNoNodeError(err error) bool {
+ if err == nil {
+ return false
+ }
+ if strings.Contains(err.Error(), noNodeErr) {
+ return true
+ }
+ var errWithData rpc.DataError
+ ok := errors.As(err, &errWithData)
+ if !ok {
+ return false
+ }
+ dataString, ok := errWithData.ErrorData().(string)
+ if !ok {
+ return false
+ }
+ data := common.FromHex(dataString)
+ return bytes.Contains(data, []byte(noNodeErr))
+}
+
func (r *RollupWatcher) getNodeCreationBlock(ctx context.Context, nodeNum uint64) (*big.Int, error) {
callOpts := r.getCallOpts(ctx)
if !r.unSupportedL3Method.Load() {
createdAtBlock, err := r.GetNodeCreationBlockForLogLookup(callOpts, nodeNum)
if err == nil {
+ r.supportedL3Method.Store(true)
return createdAtBlock, nil
}
- log.Trace("failed to call getNodeCreationBlockForLogLookup, falling back on node CreatedAtBlock field", "err", err)
- if headerreader.ExecutionRevertedRegexp.MatchString(err.Error()) {
+ if headerreader.ExecutionRevertedRegexp.MatchString(err.Error()) && !looksLikeNoNodeError(err) {
+ if r.supportedL3Method.Load() {
+ return nil, fmt.Errorf("getNodeCreationBlockForLogLookup failed despite previously succeeding: %w", err)
+ }
+ log.Info("getNodeCreationBlockForLogLookup does not seem to exist, falling back on node CreatedAtBlock field", "err", err)
r.unSupportedL3Method.Store(true)
} else {
return nil, err
@@ -196,7 +232,7 @@ func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64,
if logQueryRangeSize == 0 {
query.ToBlock = toBlock
} else {
- query.ToBlock = new(big.Int).Add(fromBlock, big.NewInt(int64(logQueryRangeSize)))
+ query.ToBlock = new(big.Int).Add(fromBlock, new(big.Int).SetUint64(logQueryRangeSize))
}
if query.ToBlock.Cmp(toBlock) > 0 {
query.ToBlock = toBlock
diff --git a/staker/staker.go b/staker/staker.go
index 9d917d1853..45e6f6f551 100644
--- a/staker/staker.go
+++ b/staker/staker.go
@@ -15,6 +15,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rpc"
@@ -142,6 +143,8 @@ func (c *L1ValidatorConfig) Validate() error {
return nil
}
+type L1ValidatorConfigFetcher func() *L1ValidatorConfig
+
var DefaultL1ValidatorConfig = L1ValidatorConfig{
Enable: true,
Strategy: "Watchtower",
@@ -257,7 +260,7 @@ type Staker struct {
confirmedNotifiers []LatestConfirmedNotifier
activeChallenge *ChallengeManager
baseCallOpts bind.CallOpts
- config L1ValidatorConfig
+ config L1ValidatorConfigFetcher
highGasBlocksBuffer *big.Int
lastActCalledBlock *big.Int
inactiveLastCheckedNode *nodeAndHash
@@ -266,7 +269,6 @@ type Staker struct {
inboxReader InboxReaderInterface
statelessBlockValidator *StatelessBlockValidator
fatalErr chan<- error
- enableFastConfirmation bool
fastConfirmSafe *FastConfirmSafe
}
@@ -279,7 +281,7 @@ type ValidatorWalletInterface interface {
TxSenderAddress() *common.Address
RollupAddress() common.Address
ChallengeManagerAddress() common.Address
- L1Client() arbutil.L1Interface
+ L1Client() *ethclient.Client
TestTransactions(context.Context, []*types.Transaction) error
ExecuteTransactions(context.Context, *txbuilder.Builder, common.Address) (*types.Transaction, error)
TimeoutChallenges(context.Context, []uint64) (*types.Transaction, error)
@@ -295,7 +297,7 @@ func NewStaker(
l1Reader *headerreader.HeaderReader,
wallet ValidatorWalletInterface,
callOpts bind.CallOpts,
- config L1ValidatorConfig,
+ config L1ValidatorConfigFetcher,
blockValidator *BlockValidator,
statelessBlockValidator *StatelessBlockValidator,
stakedNotifiers []LatestStakedNotifier,
@@ -303,8 +305,7 @@ func NewStaker(
validatorUtilsAddress common.Address,
fatalErr chan<- error,
) (*Staker, error) {
-
- if err := config.Validate(); err != nil {
+ if err := config().Validate(); err != nil {
return nil, err
}
client := l1Reader.Client()
@@ -314,7 +315,7 @@ func NewStaker(
return nil, err
}
stakerLastSuccessfulActionGauge.Update(time.Now().Unix())
- if config.StartValidationFromStaked && blockValidator != nil {
+ if config().StartValidationFromStaked && blockValidator != nil {
stakedNotifiers = append(stakedNotifiers, blockValidator)
}
inactiveValidatedNodes := btree.NewG(2, func(a, b validatedNode) bool {
@@ -327,7 +328,7 @@ func NewStaker(
confirmedNotifiers: confirmedNotifiers,
baseCallOpts: callOpts,
config: config,
- highGasBlocksBuffer: big.NewInt(config.PostingStrategy.HighGasDelayBlocks),
+ highGasBlocksBuffer: big.NewInt(config().PostingStrategy.HighGasDelayBlocks),
lastActCalledBlock: nil,
inboxReader: statelessBlockValidator.inboxReader,
statelessBlockValidator: statelessBlockValidator,
@@ -345,11 +346,12 @@ func (s *Staker) Initialize(ctx context.Context) error {
if walletAddressOrZero != (common.Address{}) {
s.updateStakerBalanceMetric(ctx)
}
- if s.blockValidator != nil && s.config.StartValidationFromStaked {
+ if s.blockValidator != nil && s.config().StartValidationFromStaked {
latestStaked, _, err := s.validatorUtils.LatestStaked(&s.baseCallOpts, s.rollupAddress, walletAddressOrZero)
if err != nil {
return err
}
+ // #nosec G115
stakerLatestStakedNodeGauge.Update(int64(latestStaked))
if latestStaked == 0 {
return nil
@@ -360,7 +362,10 @@ func (s *Staker) Initialize(ctx context.Context) error {
return err
}
- return s.blockValidator.InitAssumeValid(stakedInfo.AfterState().GlobalState)
+ err = s.blockValidator.InitAssumeValid(stakedInfo.AfterState().GlobalState)
+ if err != nil {
+ return err
+ }
}
return s.setupFastConfirmation(ctx)
}
@@ -369,7 +374,8 @@ func (s *Staker) Initialize(ctx context.Context) error {
// based on the config, the wallet address, and the on-chain rollup designated fast confirmer.
// Before this function, both variables should be their default (i.e. fast confirmation is disabled).
func (s *Staker) setupFastConfirmation(ctx context.Context) error {
- if !s.config.EnableFastConfirmation {
+ cfg := s.config()
+ if !cfg.EnableFastConfirmation {
return nil
}
if s.wallet.Address() == nil {
@@ -386,9 +392,9 @@ func (s *Staker) setupFastConfirmation(ctx context.Context) error {
if err != nil {
return fmt.Errorf("getting rollup fast confirmer address: %w", err)
}
+ log.Info("Setting up fast confirmation", "wallet", walletAddress, "fastConfirmer", fastConfirmer)
if fastConfirmer == walletAddress {
// We can directly fast confirm nodes
- s.enableFastConfirmation = true
return nil
} else if fastConfirmer == (common.Address{}) {
// No fast confirmer enabled
@@ -400,7 +406,7 @@ func (s *Staker) setupFastConfirmation(ctx context.Context) error {
fastConfirmer,
s.builder,
s.wallet,
- s.config.gasRefunder,
+ cfg.gasRefunder,
s.l1Reader,
)
if err != nil {
@@ -415,13 +421,12 @@ func (s *Staker) setupFastConfirmation(ctx context.Context) error {
if !isOwner {
return fmt.Errorf("staker wallet address %v is not an owner of the fast confirm safe %v", walletAddress, fastConfirmer)
}
- s.enableFastConfirmation = true
s.fastConfirmSafe = fastConfirmSafe
return nil
}
func (s *Staker) tryFastConfirmationNodeNumber(ctx context.Context, number uint64, hash common.Hash) error {
- if !s.enableFastConfirmation {
+ if !s.config().EnableFastConfirmation {
return nil
}
nodeInfo, err := s.rollup.LookupNode(ctx, number)
@@ -432,7 +437,7 @@ func (s *Staker) tryFastConfirmationNodeNumber(ctx context.Context, number uint6
}
func (s *Staker) tryFastConfirmation(ctx context.Context, blockHash common.Hash, sendRoot common.Hash, nodeHash common.Hash) error {
- if !s.enableFastConfirmation {
+ if !s.config().EnableFastConfirmation {
return nil
}
if s.fastConfirmSafe != nil {
@@ -442,6 +447,7 @@ func (s *Staker) tryFastConfirmation(ctx context.Context, blockHash common.Hash,
if err != nil {
return err
}
+ log.Info("Fast confirming node with wallet", "wallet", auth.From, "nodeHash", nodeHash)
_, err = s.rollup.FastConfirmNextNode(auth, blockHash, sendRoot, nodeHash)
return err
}
@@ -505,7 +511,9 @@ func (s *Staker) Start(ctxIn context.Context) {
}
s.StopWaiter.Start(ctxIn, s)
backoff := time.Second
- ephemeralErrorHandler := util.NewEphemeralErrorHandler(10*time.Minute, "is ahead of on-chain nonce", 0)
+ isAheadOfOnChainNonceEphemeralErrorHandler := util.NewEphemeralErrorHandler(10*time.Minute, "is ahead of on-chain nonce", 0)
+ exceedsMaxMempoolSizeEphemeralErrorHandler := util.NewEphemeralErrorHandler(10*time.Minute, dataposter.ErrExceedsMaxMempoolSize.Error(), 0)
+ blockValidationPendingEphemeralErrorHandler := util.NewEphemeralErrorHandler(10*time.Minute, "block validation is still pending", 0)
s.CallIteratively(func(ctx context.Context) (returningWait time.Duration) {
defer func() {
panicErr := recover()
@@ -516,8 +524,9 @@ func (s *Staker) Start(ctxIn context.Context) {
}
}()
var err error
- if common.HexToAddress(s.config.GasRefunderAddress) != (common.Address{}) {
- gasRefunderBalance, err := s.client.BalanceAt(ctx, common.HexToAddress(s.config.GasRefunderAddress), nil)
+ cfg := s.config()
+ if common.HexToAddress(cfg.GasRefunderAddress) != (common.Address{}) {
+ gasRefunderBalance, err := s.client.BalanceAt(ctx, common.HexToAddress(cfg.GasRefunderAddress), nil)
if err != nil {
log.Warn("error fetching validator gas refunder balance", "err", err)
} else {
@@ -538,7 +547,9 @@ func (s *Staker) Start(ctxIn context.Context) {
}
}
if err == nil {
- ephemeralErrorHandler.Reset()
+ isAheadOfOnChainNonceEphemeralErrorHandler.Reset()
+ exceedsMaxMempoolSizeEphemeralErrorHandler.Reset()
+ blockValidationPendingEphemeralErrorHandler.Reset()
backoff = time.Second
stakerLastSuccessfulActionGauge.Update(time.Now().Unix())
stakerActionSuccessCounter.Inc(1)
@@ -546,7 +557,7 @@ func (s *Staker) Start(ctxIn context.Context) {
// Try to create another tx
return 0
}
- return s.config.StakerInterval
+ return cfg.StakerInterval
}
stakerActionFailureCounter.Inc(1)
backoff *= 2
@@ -556,7 +567,9 @@ func (s *Staker) Start(ctxIn context.Context) {
} else {
logLevel = log.Warn
}
- logLevel = ephemeralErrorHandler.LogLevel(err, logLevel)
+ logLevel = isAheadOfOnChainNonceEphemeralErrorHandler.LogLevel(err, logLevel)
+ logLevel = exceedsMaxMempoolSizeEphemeralErrorHandler.LogLevel(err, logLevel)
+ logLevel = blockValidationPendingEphemeralErrorHandler.LogLevel(err, logLevel)
logLevel("error acting as staker", "err", err)
return backoff
})
@@ -566,6 +579,7 @@ func (s *Staker) Start(ctxIn context.Context) {
if err != nil && ctx.Err() == nil {
log.Error("staker: error checking latest staked", "err", err)
}
+ // #nosec G115
stakerLatestStakedNodeGauge.Update(int64(staked))
if stakedGlobalState != nil {
for _, notifier := range s.stakedNotifiers {
@@ -581,13 +595,14 @@ func (s *Staker) Start(ctxIn context.Context) {
log.Error("staker: error checking latest confirmed", "err", err)
}
}
+ // #nosec G115
stakerLatestConfirmedNodeGauge.Update(int64(confirmed))
if confirmedGlobalState != nil {
for _, notifier := range s.confirmedNotifiers {
notifier.UpdateLatestConfirmed(confirmedMsgCount, *confirmedGlobalState)
}
}
- return s.config.StakerInterval
+ return s.config().StakerInterval
})
}
@@ -608,6 +623,7 @@ func (s *Staker) IsWhitelisted(ctx context.Context) (bool, error) {
}
func (s *Staker) shouldAct(ctx context.Context) bool {
+ cfg := s.config()
var gasPriceHigh = false
var gasPriceFloat float64
gasPrice, err := s.client.SuggestGasPrice(ctx)
@@ -615,7 +631,7 @@ func (s *Staker) shouldAct(ctx context.Context) bool {
log.Warn("error getting gas price", "err", err)
} else {
gasPriceFloat = float64(gasPrice.Int64()) / 1e9
- if gasPriceFloat >= s.config.PostingStrategy.HighGasThreshold {
+ if gasPriceFloat >= cfg.PostingStrategy.HighGasThreshold {
gasPriceHigh = true
}
}
@@ -640,14 +656,14 @@ func (s *Staker) shouldAct(ctx context.Context) bool {
// Clamp `s.highGasBlocksBuffer` to between 0 and HighGasDelayBlocks
if s.highGasBlocksBuffer.Sign() < 0 {
s.highGasBlocksBuffer.SetInt64(0)
- } else if s.highGasBlocksBuffer.Cmp(big.NewInt(s.config.PostingStrategy.HighGasDelayBlocks)) > 0 {
- s.highGasBlocksBuffer.SetInt64(s.config.PostingStrategy.HighGasDelayBlocks)
+ } else if s.highGasBlocksBuffer.Cmp(big.NewInt(cfg.PostingStrategy.HighGasDelayBlocks)) > 0 {
+ s.highGasBlocksBuffer.SetInt64(cfg.PostingStrategy.HighGasDelayBlocks)
}
if gasPriceHigh && s.highGasBlocksBuffer.Sign() > 0 {
log.Warn(
"not acting yet as gas price is high",
"gasPrice", gasPriceFloat,
- "highGasPriceConfig", s.config.PostingStrategy.HighGasThreshold,
+ "highGasPriceConfig", cfg.PostingStrategy.HighGasThreshold,
"highGasBuffer", s.highGasBlocksBuffer,
)
return false
@@ -678,7 +694,8 @@ func (s *Staker) confirmDataPosterIsReady(ctx context.Context) error {
}
func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) {
- if s.config.strategy != WatchtowerStrategy {
+ cfg := s.config()
+ if cfg.strategy != WatchtowerStrategy {
err := s.confirmDataPosterIsReady(ctx)
if err != nil {
return nil, err
@@ -720,6 +737,7 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) {
if err != nil {
return nil, fmt.Errorf("error getting latest staked node of own wallet %v: %w", walletAddressOrZero, err)
}
+ // #nosec G115
stakerLatestStakedNodeGauge.Update(int64(latestStakedNodeNum))
if rawInfo != nil {
rawInfo.LatestStakedNode = latestStakedNodeNum
@@ -732,7 +750,7 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) {
StakeExists: rawInfo != nil,
}
- effectiveStrategy := s.config.strategy
+ effectiveStrategy := cfg.strategy
nodesLinear, err := s.validatorUtils.AreUnresolvedNodesLinear(callOpts, s.rollupAddress)
if err != nil {
return nil, fmt.Errorf("error checking for rollup assertion fork: %w", err)
@@ -760,7 +778,7 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) {
info.LatestStakedNodeHash = s.inactiveLastCheckedNode.hash
}
- if s.config.EnableFastConfirmation {
+ if cfg.EnableFastConfirmation {
firstUnresolvedNode, err := s.rollup.FirstUnresolvedNode(callOpts)
if err != nil {
return nil, err
@@ -792,14 +810,14 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) {
confirmedCorrect = stakedOnNode
}
if confirmedCorrect {
+ log.Info("trying to fast confirm previous node", "node", firstUnresolvedNode, "nodeHash", nodeInfo.NodeHash)
err = s.tryFastConfirmationNodeNumber(ctx, firstUnresolvedNode, nodeInfo.NodeHash)
if err != nil {
return nil, err
}
if s.builder.BuildingTransactionCount() > 0 {
// Try to fast confirm previous nodes before working on new ones
- log.Info("fast confirming previous node", "node", firstUnresolvedNode)
- return s.wallet.ExecuteTransactions(ctx, s.builder, s.config.gasRefunder)
+ return s.wallet.ExecuteTransactions(ctx, s.builder, cfg.gasRefunder)
}
}
}
@@ -886,7 +904,7 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) {
return nil, fmt.Errorf("error withdrawing staker funds from our staker %v: %w", walletAddressOrZero, err)
}
log.Info("removing old stake and withdrawing funds")
- return s.wallet.ExecuteTransactions(ctx, s.builder, s.config.gasRefunder)
+ return s.wallet.ExecuteTransactions(ctx, s.builder, cfg.gasRefunder)
}
}
@@ -940,7 +958,7 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) {
if info.StakerInfo == nil && info.StakeExists {
log.Info("staking to execute transactions")
}
- return s.wallet.ExecuteTransactions(ctx, s.builder, s.config.gasRefunder)
+ return s.wallet.ExecuteTransactions(ctx, s.builder, cfg.gasRefunder)
}
func (s *Staker) handleConflict(ctx context.Context, info *StakerInfo) error {
@@ -966,7 +984,7 @@ func (s *Staker) handleConflict(ctx context.Context, info *StakerInfo) error {
*info.CurrentChallenge,
s.statelessBlockValidator,
latestConfirmedCreated,
- s.config.ConfirmationBlocks,
+ s.config().ConfirmationBlocks,
)
if err != nil {
return fmt.Errorf("error creating challenge manager: %w", err)
@@ -980,8 +998,9 @@ func (s *Staker) handleConflict(ctx context.Context, info *StakerInfo) error {
}
func (s *Staker) advanceStake(ctx context.Context, info *OurStakerInfo, effectiveStrategy StakerStrategy) error {
+ cfg := s.config()
active := effectiveStrategy >= StakeLatestStrategy
- action, wrongNodesExist, err := s.generateNodeAction(ctx, info, effectiveStrategy, &s.config)
+ action, wrongNodesExist, err := s.generateNodeAction(ctx, info, effectiveStrategy, cfg)
if err != nil {
return fmt.Errorf("error generating node action: %w", err)
}
@@ -995,7 +1014,7 @@ func (s *Staker) advanceStake(ctx context.Context, info *OurStakerInfo, effectiv
switch action := action.(type) {
case createNodeAction:
- if wrongNodesExist && s.config.DisableChallenge {
+ if wrongNodesExist && cfg.DisableChallenge {
log.Error("refusing to challenge assertion as config disables challenges")
info.CanProgress = false
return nil
@@ -1192,7 +1211,7 @@ func (s *Staker) createConflict(ctx context.Context, info *StakerInfo) error {
}
func (s *Staker) Strategy() StakerStrategy {
- return s.config.strategy
+ return s.config().strategy
}
func (s *Staker) Rollup() *RollupWatcher {
@@ -1207,7 +1226,7 @@ func (s *Staker) updateStakerBalanceMetric(ctx context.Context) {
}
balance, err := s.client.BalanceAt(ctx, *txSenderAddress, nil)
if err != nil {
- log.Error("error getting staker balance", "txSenderAddress", *txSenderAddress, "err", err)
+ log.Warn("error getting staker balance", "txSenderAddress", *txSenderAddress, "err", err)
return
}
stakerBalanceGauge.Update(arbmath.BalancePerEther(balance))
diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go
index e8232264fe..9257c5582a 100644
--- a/staker/stateless_block_validator.go
+++ b/staker/stateless_block_validator.go
@@ -7,7 +7,6 @@ import (
"context"
"errors"
"fmt"
- "runtime"
"testing"
"github.com/offchainlabs/nitro/arbstate/daprovider"
@@ -24,6 +23,7 @@ import (
"github.com/offchainlabs/nitro/util/rpcclient"
"github.com/offchainlabs/nitro/validator"
"github.com/offchainlabs/nitro/validator/client/redis"
+ "github.com/offchainlabs/nitro/validator/server_api"
validatorclient "github.com/offchainlabs/nitro/validator/client"
)
@@ -41,6 +41,7 @@ type StatelessBlockValidator struct {
streamer TransactionStreamerInterface
db ethdb.Database
dapReaders []daprovider.Reader
+ stack *node.Node
}
type BlockValidatorRegistrer interface {
@@ -115,6 +116,13 @@ const (
Ready
)
+type FullBatchInfo struct {
+ Number uint64
+ PostedData []byte
+ MsgCount arbutil.MessageIndex
+ Preimages map[arbutil.PreimageType]map[common.Hash][]byte
+}
+
type validationEntry struct {
Stage ValidationEntryStage
// Valid since ReadyforRecord:
@@ -134,7 +142,7 @@ type validationEntry struct {
DelayedMsg []byte
}
-func (e *validationEntry) ToInput(stylusArchs []string) (*validator.ValidationInput, error) {
+func (e *validationEntry) ToInput(stylusArchs []ethdb.WasmTarget) (*validator.ValidationInput, error) {
if e.Stage != Ready {
return nil, errors.New("cannot create input from non-ready entry")
}
@@ -143,21 +151,22 @@ func (e *validationEntry) ToInput(stylusArchs []string) (*validator.ValidationIn
HasDelayedMsg: e.HasDelayedMsg,
DelayedMsgNr: e.DelayedMsgNr,
Preimages: e.Preimages,
- UserWasms: make(map[string]map[common.Hash][]byte, len(e.UserWasms)),
+ UserWasms: make(map[ethdb.WasmTarget]map[common.Hash][]byte, len(e.UserWasms)),
BatchInfo: e.BatchInfo,
DelayedMsg: e.DelayedMsg,
StartState: e.Start,
DebugChain: e.ChainConfig.DebugMode(),
}
+ if len(stylusArchs) == 0 && len(e.UserWasms) > 0 {
+ return nil, fmt.Errorf("stylus support is required")
+ }
for _, stylusArch := range stylusArchs {
res.UserWasms[stylusArch] = make(map[common.Hash][]byte)
}
- for hash, info := range e.UserWasms {
+ for hash, asmMap := range e.UserWasms {
for _, stylusArch := range stylusArchs {
- if stylusArch == "wavm" {
- res.UserWasms[stylusArch][hash] = info.Module
- } else if stylusArch == runtime.GOARCH {
- res.UserWasms[stylusArch][hash] = info.Asm
+ if asm, exists := asmMap[stylusArch]; exists {
+ res.UserWasms[stylusArch][hash] = asm
} else {
return nil, fmt.Errorf("stylusArch not supported by block validator: %v", stylusArch)
}
@@ -171,16 +180,28 @@ func newValidationEntry(
start validator.GoGlobalState,
end validator.GoGlobalState,
msg *arbostypes.MessageWithMetadata,
- batch []byte,
- batchBlockHash common.Hash,
+ fullBatchInfo *FullBatchInfo,
+ prevBatches []validator.BatchInfo,
prevDelayed uint64,
chainConfig *params.ChainConfig,
) (*validationEntry, error) {
- batchInfo := validator.BatchInfo{
- Number: start.Batch,
- BlockHash: batchBlockHash,
- Data: batch,
+ preimages := make(map[arbutil.PreimageType]map[common.Hash][]byte)
+ if fullBatchInfo == nil {
+ return nil, fmt.Errorf("fullbatchInfo cannot be nil")
+ }
+ if fullBatchInfo.Number != start.Batch {
+ return nil, fmt.Errorf("got wrong batch expected: %d got: %d", start.Batch, fullBatchInfo.Number)
+ }
+ valBatches := []validator.BatchInfo{
+ {
+ Number: fullBatchInfo.Number,
+ Data: fullBatchInfo.PostedData,
+ },
}
+ valBatches = append(valBatches, prevBatches...)
+
+ copyPreimagesInto(preimages, fullBatchInfo.Preimages)
+
hasDelayed := false
var delayedNum uint64
if msg.DelayedMessagesRead == prevDelayed+1 {
@@ -189,6 +210,7 @@ func newValidationEntry(
} else if msg.DelayedMessagesRead != prevDelayed {
return nil, fmt.Errorf("illegal validation entry delayedMessage %d, previous %d", msg.DelayedMessagesRead, prevDelayed)
}
+
return &validationEntry{
Stage: ReadyForRecord,
Pos: pos,
@@ -197,8 +219,9 @@ func newValidationEntry(
HasDelayedMsg: hasDelayed,
DelayedMsgNr: delayedNum,
msg: msg,
- BatchInfo: []validator.BatchInfo{batchInfo},
+ BatchInfo: valBatches,
ChainConfig: chainConfig,
+ Preimages: preimages,
}, nil
}
@@ -243,33 +266,88 @@ func NewStatelessBlockValidator(
db: arbdb,
dapReaders: dapReaders,
execSpawners: executionSpawners,
+ stack: stack,
}, nil
}
-func (v *StatelessBlockValidator) readBatch(ctx context.Context, batchNum uint64) (bool, []byte, common.Hash, arbutil.MessageIndex, error) {
+func (v *StatelessBlockValidator) readPostedBatch(ctx context.Context, batchNum uint64) ([]byte, error) {
batchCount, err := v.inboxTracker.GetBatchCount()
if err != nil {
- return false, nil, common.Hash{}, 0, err
+ return nil, err
}
if batchCount <= batchNum {
- return false, nil, common.Hash{}, 0, nil
+ return nil, fmt.Errorf("batch not found: %d", batchNum)
+ }
+ postedData, _, err := v.inboxReader.GetSequencerMessageBytes(ctx, batchNum)
+ return postedData, err
+}
+
+func (v *StatelessBlockValidator) readFullBatch(ctx context.Context, batchNum uint64) (bool, *FullBatchInfo, error) {
+ batchCount, err := v.inboxTracker.GetBatchCount()
+ if err != nil {
+ return false, nil, err
+ }
+ if batchCount <= batchNum {
+ return false, nil, nil
}
batchMsgCount, err := v.inboxTracker.GetBatchMessageCount(batchNum)
if err != nil {
- return false, nil, common.Hash{}, 0, err
+ return false, nil, err
}
- batch, batchBlockHash, err := v.inboxReader.GetSequencerMessageBytes(ctx, batchNum)
+ postedData, batchBlockHash, err := v.inboxReader.GetSequencerMessageBytes(ctx, batchNum)
if err != nil {
- return false, nil, common.Hash{}, 0, err
+ return false, nil, err
+ }
+ preimages := make(map[arbutil.PreimageType]map[common.Hash][]byte)
+ if len(postedData) > 40 {
+ foundDA := false
+ for _, dapReader := range v.dapReaders {
+ if dapReader != nil && dapReader.IsValidHeaderByte(postedData[40]) {
+ preimageRecorder := daprovider.RecordPreimagesTo(preimages)
+ _, err := dapReader.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, postedData, preimageRecorder, true)
+ if err != nil {
+ // Matches the way keyset validation was done inside DAS readers i.e logging the error
+ // But other daproviders might just want to return the error
+ if errors.Is(err, daprovider.ErrSeqMsgValidation) && daprovider.IsDASMessageHeaderByte(postedData[40]) {
+ log.Error(err.Error())
+ } else {
+ return false, nil, err
+ }
+ }
+ foundDA = true
+ break
+ }
+ }
+ if !foundDA {
+ if daprovider.IsDASMessageHeaderByte(postedData[40]) {
+ log.Error("No DAS Reader configured, but sequencer message found with DAS header")
+ }
+ }
+ }
+ fullInfo := FullBatchInfo{
+ Number: batchNum,
+ PostedData: postedData,
+ MsgCount: batchMsgCount,
+ Preimages: preimages,
+ }
+ return true, &fullInfo, nil
+}
+
+func copyPreimagesInto(dest, source map[arbutil.PreimageType]map[common.Hash][]byte) {
+ for piType, piMap := range source {
+ if dest[piType] == nil {
+ dest[piType] = make(map[common.Hash][]byte, len(piMap))
+ }
+ for hash, preimage := range piMap {
+ dest[piType][hash] = preimage
+ }
}
- return true, batch, batchBlockHash, batchMsgCount, nil
}
func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e *validationEntry) error {
if e.Stage != ReadyForRecord {
return fmt.Errorf("validation entry should be ReadyForRecord, is: %v", e.Stage)
}
- e.Preimages = make(map[arbutil.PreimageType]map[common.Hash][]byte)
if e.Pos != 0 {
recording, err := v.recorder.RecordBlockCreation(ctx, e.Pos, e.msg)
if err != nil {
@@ -278,30 +356,11 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e *
if recording.BlockHash != e.End.BlockHash {
return fmt.Errorf("recording failed: pos %d, hash expected %v, got %v", e.Pos, e.End.BlockHash, recording.BlockHash)
}
- // record any additional batch fetching
- batchFetcher := func(batchNum uint64) ([]byte, error) {
- found, data, hash, _, err := v.readBatch(ctx, batchNum)
- if err != nil {
- return nil, err
- }
- if !found {
- return nil, errors.New("batch not found")
- }
- e.BatchInfo = append(e.BatchInfo, validator.BatchInfo{
- Number: batchNum,
- BlockHash: hash,
- Data: data,
- })
- return data, nil
- }
- e.msg.Message.BatchGasCost = nil
- err = e.msg.Message.FillInBatchGasCost(batchFetcher)
- if err != nil {
- return err
- }
-
if recording.Preimages != nil {
- e.Preimages[arbutil.Keccak256PreimageType] = recording.Preimages
+ recordingPreimages := map[arbutil.PreimageType]map[common.Hash][]byte{
+ arbutil.Keccak256PreimageType: recording.Preimages,
+ }
+ copyPreimagesInto(e.Preimages, recordingPreimages)
}
e.UserWasms = recording.UserWasms
}
@@ -316,35 +375,6 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e *
}
e.DelayedMsg = delayedMsg
}
- for _, batch := range e.BatchInfo {
- if len(batch.Data) <= 40 {
- continue
- }
- foundDA := false
- for _, dapReader := range v.dapReaders {
- if dapReader != nil && dapReader.IsValidHeaderByte(batch.Data[40]) {
- preimageRecorder := daprovider.RecordPreimagesTo(e.Preimages)
- _, err := dapReader.RecoverPayloadFromBatch(ctx, batch.Number, batch.BlockHash, batch.Data, preimageRecorder, true)
- if err != nil {
- // Matches the way keyset validation was done inside DAS readers i.e logging the error
- // But other daproviders might just want to return the error
- if errors.Is(err, daprovider.ErrSeqMsgValidation) && daprovider.IsDASMessageHeaderByte(batch.Data[40]) {
- log.Error(err.Error())
- } else {
- return err
- }
- }
- foundDA = true
- break
- }
- }
- if !foundDA {
- if daprovider.IsDASMessageHeaderByte(batch.Data[40]) {
- log.Error("No DAS Reader configured, but sequencer message found with DAS header")
- }
- }
- }
-
e.msg = nil // no longer needed
e.Stage = Ready
return nil
@@ -404,11 +434,30 @@ func (v *StatelessBlockValidator) CreateReadyValidationEntry(ctx context.Context
}
start := buildGlobalState(*prevResult, startPos)
end := buildGlobalState(*result, endPos)
- seqMsg, batchBlockHash, err := v.inboxReader.GetSequencerMessageBytes(ctx, startPos.BatchNumber)
+ found, fullBatchInfo, err := v.readFullBatch(ctx, start.Batch)
+ if err != nil {
+ return nil, err
+ }
+ if !found {
+ return nil, fmt.Errorf("batch %d not found", startPos.BatchNumber)
+ }
+
+ prevBatchNums, err := msg.Message.PastBatchesRequired()
if err != nil {
return nil, err
}
- entry, err := newValidationEntry(pos, start, end, msg, seqMsg, batchBlockHash, prevDelayed, v.streamer.ChainConfig())
+ prevBatches := make([]validator.BatchInfo, 0, len(prevBatchNums))
+ for _, batchNum := range prevBatchNums {
+ data, err := v.readPostedBatch(ctx, batchNum)
+ if err != nil {
+ return nil, err
+ }
+ prevBatches = append(prevBatches, validator.BatchInfo{
+ Number: batchNum,
+ Data: data,
+ })
+ }
+ entry, err := newValidationEntry(pos, start, end, msg, fullBatchInfo, prevBatches, prevDelayed, v.streamer.ChainConfig())
if err != nil {
return nil, err
}
@@ -462,6 +511,18 @@ func (v *StatelessBlockValidator) ValidateResult(
return true, &entry.End, nil
}
+func (v *StatelessBlockValidator) ValidationInputsAt(ctx context.Context, pos arbutil.MessageIndex, target ethdb.WasmTarget) (server_api.InputJSON, error) {
+ entry, err := v.CreateReadyValidationEntry(ctx, pos)
+ if err != nil {
+ return server_api.InputJSON{}, err
+ }
+ input, err := entry.ToInput([]ethdb.WasmTarget{target})
+ if err != nil {
+ return server_api.InputJSON{}, err
+ }
+ return *server_api.ValidationInputToJson(input), nil
+}
+
func (v *StatelessBlockValidator) OverrideRecorder(t *testing.T, recorder execution.ExecutionRecorder) {
v.recorder = recorder
}
diff --git a/staker/txbuilder/builder.go b/staker/txbuilder/builder.go
index 9a5e9df2b5..f52b03a781 100644
--- a/staker/txbuilder/builder.go
+++ b/staker/txbuilder/builder.go
@@ -12,13 +12,13 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/offchainlabs/nitro/arbutil"
+ "github.com/ethereum/go-ethereum/ethclient"
)
type ValidatorWalletInterface interface {
// Address must be able to be called concurrently with other functions
Address() *common.Address
- L1Client() arbutil.L1Interface
+ L1Client() *ethclient.Client
TestTransactions(context.Context, []*types.Transaction) error
ExecuteTransactions(context.Context, *Builder, common.Address) (*types.Transaction, error)
AuthIfEoa() *bind.TransactOpts
@@ -27,10 +27,10 @@ type ValidatorWalletInterface interface {
// Builder combines any transactions sent to it via SendTransaction into one batch,
// which is then sent to the validator wallet.
// This lets the validator make multiple atomic transactions.
-// This inherits from an eth client so it can be used as an L1Interface,
-// where it transparently intercepts calls to SendTransaction and queues them for the next batch.
+// This inherits from an ethclient.Client so it can be used to transparently
+// intercept calls to SendTransaction and queue them for the next batch.
type Builder struct {
- arbutil.L1Interface
+ *ethclient.Client
transactions []*types.Transaction
builderAuth *bind.TransactOpts
isAuthFake bool
@@ -55,7 +55,7 @@ func NewBuilder(wallet ValidatorWalletInterface) (*Builder, error) {
return &Builder{
builderAuth: builderAuth,
wallet: wallet,
- L1Interface: wallet.L1Client(),
+ Client: wallet.L1Client(),
isAuthFake: isAuthFake,
}, nil
}
@@ -70,7 +70,7 @@ func (b *Builder) ClearTransactions() {
func (b *Builder) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) {
if len(b.transactions) == 0 && !b.isAuthFake {
- return b.L1Interface.EstimateGas(ctx, call)
+ return b.Client.EstimateGas(ctx, call)
}
return 0, nil
}
diff --git a/staker/validatorwallet/contract.go b/staker/validatorwallet/contract.go
index 77b403b669..3202d58569 100644
--- a/staker/validatorwallet/contract.go
+++ b/staker/validatorwallet/contract.go
@@ -16,18 +16,21 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/offchainlabs/nitro/arbnode/dataposter"
- "github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/solgen/go/rollupgen"
"github.com/offchainlabs/nitro/staker/txbuilder"
"github.com/offchainlabs/nitro/util/arbmath"
"github.com/offchainlabs/nitro/util/headerreader"
)
-var validatorABI abi.ABI
-var walletCreatedID common.Hash
+var (
+ validatorABI abi.ABI
+ validatorWalletCreatorABI abi.ABI
+ walletCreatedID common.Hash
+)
func init() {
parsedValidator, err := abi.JSON(strings.NewReader(rollupgen.ValidatorWalletABI))
@@ -40,6 +43,7 @@ func init() {
if err != nil {
panic(err)
}
+ validatorWalletCreatorABI = parsedValidatorWalletCreator
walletCreatedID = parsedValidatorWalletCreator.Events["WalletCreated"].ID
}
@@ -151,16 +155,19 @@ func (v *Contract) From() common.Address {
}
// nil value == 0 value
-func (v *Contract) getAuth(ctx context.Context, value *big.Int) (*bind.TransactOpts, error) {
- newAuth := *v.auth
- newAuth.Context = ctx
- newAuth.Value = value
- nonce, err := v.L1Client().NonceAt(ctx, v.auth.From, nil)
+func getAuthWithUpdatedNonceFromL1(ctx context.Context, l1Reader *headerreader.HeaderReader, auth bind.TransactOpts, value *big.Int) (*bind.TransactOpts, error) {
+ auth.Context = ctx
+ auth.Value = value
+ nonce, err := l1Reader.Client().NonceAt(ctx, auth.From, nil)
if err != nil {
return nil, err
}
- newAuth.Nonce = new(big.Int).SetUint64(nonce)
- return &newAuth, nil
+ auth.Nonce = new(big.Int).SetUint64(nonce)
+ return &auth, nil
+}
+
+func (v *Contract) getAuth(ctx context.Context, value *big.Int) (*bind.TransactOpts, error) {
+ return getAuthWithUpdatedNonceFromL1(ctx, v.l1Reader, *v.auth, value)
}
func (v *Contract) executeTransaction(ctx context.Context, tx *types.Transaction, gasRefunder common.Address) (*types.Transaction, error) {
@@ -179,6 +186,35 @@ func (v *Contract) executeTransaction(ctx context.Context, tx *types.Transaction
return v.dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), *v.Address(), data, gas, auth.Value)
}
+func createWalletContract(
+ ctx context.Context,
+ l1Reader *headerreader.HeaderReader,
+ auth *bind.TransactOpts,
+ dataPoster *dataposter.DataPoster,
+ getExtraGas func() uint64,
+ validatorWalletFactoryAddr common.Address,
+) (*types.Transaction, error) {
+ var initialExecutorAllowedDests []common.Address
+ txData, err := validatorWalletCreatorABI.Pack("createWallet", initialExecutorAllowedDests)
+ if err != nil {
+ return nil, err
+ }
+
+ gas, err := gasForTxData(
+ ctx,
+ l1Reader,
+ auth,
+ &validatorWalletFactoryAddr,
+ txData,
+ getExtraGas,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("getting gas for tx data when creating validator wallet, validatorWalletFactory=%v: %w", validatorWalletFactoryAddr, err)
+ }
+
+ return dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), validatorWalletFactoryAddr, txData, gas, common.Big0)
+}
+
func (v *Contract) populateWallet(ctx context.Context, createIfMissing bool) error {
if v.con != nil {
return nil
@@ -190,11 +226,10 @@ func (v *Contract) populateWallet(ctx context.Context, createIfMissing bool) err
return nil
}
if v.address.Load() == nil {
- auth, err := v.getAuth(ctx, nil)
- if err != nil {
- return err
- }
- addr, err := GetValidatorWalletContract(ctx, v.walletFactoryAddr, v.rollupFromBlock, auth, v.l1Reader, createIfMissing)
+ // By passing v.dataPoster as a parameter to GetValidatorWalletContract we force to create a validator wallet through the Staker's DataPoster object.
+ // DataPoster keeps in its internal state information related to the transactions sent through it, which is used to infer the expected nonce in a transaction for example.
+ // If a transaction is sent using the Staker's DataPoster key, but not through the Staker's DataPoster object, DataPoster's internal state will be outdated, which can compromise the expected nonce inference.
+ addr, err := GetValidatorWalletContract(ctx, v.walletFactoryAddr, v.rollupFromBlock, v.l1Reader, createIfMissing, v.dataPoster, v.getExtraGas)
if err != nil {
return err
}
@@ -295,25 +330,29 @@ func (v *Contract) ExecuteTransactions(ctx context.Context, builder *txbuilder.B
return arbTx, nil
}
-func (v *Contract) estimateGas(ctx context.Context, value *big.Int, data []byte) (uint64, error) {
- h, err := v.l1Reader.LastHeader(ctx)
+func gasForTxData(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, to *common.Address, data []byte, getExtraGas func() uint64) (uint64, error) {
+ if auth.GasLimit != 0 {
+ return auth.GasLimit, nil
+ }
+
+ h, err := l1Reader.LastHeader(ctx)
if err != nil {
return 0, fmt.Errorf("getting the last header: %w", err)
}
gasFeeCap := new(big.Int).Mul(h.BaseFee, big.NewInt(2))
gasFeeCap = arbmath.BigMax(gasFeeCap, arbmath.FloatToBig(params.GWei))
- gasTipCap, err := v.l1Reader.Client().SuggestGasTipCap(ctx)
+ gasTipCap, err := l1Reader.Client().SuggestGasTipCap(ctx)
if err != nil {
return 0, fmt.Errorf("getting suggested gas tip cap: %w", err)
}
gasFeeCap.Add(gasFeeCap, gasTipCap)
- g, err := v.l1Reader.Client().EstimateGas(
+ g, err := l1Reader.Client().EstimateGas(
ctx,
ethereum.CallMsg{
- From: v.auth.From,
- To: v.Address(),
- Value: value,
+ From: auth.From,
+ To: to,
+ Value: auth.Value,
Data: data,
GasFeeCap: gasFeeCap,
GasTipCap: gasTipCap,
@@ -322,7 +361,11 @@ func (v *Contract) estimateGas(ctx context.Context, value *big.Int, data []byte)
if err != nil {
return 0, fmt.Errorf("estimating gas: %w", err)
}
- return g + v.getExtraGas(), nil
+ return g + getExtraGas(), nil
+}
+
+func (v *Contract) gasForTxData(ctx context.Context, auth *bind.TransactOpts, data []byte) (uint64, error) {
+ return gasForTxData(ctx, v.l1Reader, auth, v.Address(), data, v.getExtraGas)
}
func (v *Contract) TimeoutChallenges(ctx context.Context, challenges []uint64) (*types.Transaction, error) {
@@ -341,15 +384,7 @@ func (v *Contract) TimeoutChallenges(ctx context.Context, challenges []uint64) (
return v.dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), *v.Address(), data, gas, auth.Value)
}
-// gasForTxData returns auth.GasLimit if it's nonzero, otherwise returns estimate.
-func (v *Contract) gasForTxData(ctx context.Context, auth *bind.TransactOpts, data []byte) (uint64, error) {
- if auth.GasLimit != 0 {
- return auth.GasLimit, nil
- }
- return v.estimateGas(ctx, auth.Value, data)
-}
-
-func (v *Contract) L1Client() arbutil.L1Interface {
+func (v *Contract) L1Client() *ethclient.Client {
return v.l1Reader.Client()
}
@@ -400,15 +435,22 @@ func (b *Contract) DataPoster() *dataposter.DataPoster {
return b.dataPoster
}
+// Exported for testing
+func (b *Contract) GetExtraGas() func() uint64 {
+ return b.getExtraGas
+}
+
func GetValidatorWalletContract(
ctx context.Context,
validatorWalletFactoryAddr common.Address,
fromBlock int64,
- transactAuth *bind.TransactOpts,
l1Reader *headerreader.HeaderReader,
createIfMissing bool,
+ dataPoster *dataposter.DataPoster,
+ getExtraGas func() uint64,
) (*common.Address, error) {
client := l1Reader.Client()
+ transactAuth := dataPoster.Auth()
// TODO: If we just save a mapping in the wallet creator we won't need log search
walletCreator, err := rollupgen.NewValidatorWalletCreator(validatorWalletFactoryAddr, client)
@@ -443,8 +485,12 @@ func GetValidatorWalletContract(
return nil, nil
}
- var initialExecutorAllowedDests []common.Address
- tx, err := walletCreator.CreateWallet(transactAuth, initialExecutorAllowedDests)
+ transactAuth, err = getAuthWithUpdatedNonceFromL1(ctx, l1Reader, *transactAuth, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ tx, err := createWalletContract(ctx, l1Reader, transactAuth, dataPoster, getExtraGas, validatorWalletFactoryAddr)
if err != nil {
return nil, err
}
diff --git a/staker/validatorwallet/eoa.go b/staker/validatorwallet/eoa.go
index 3ae305b36c..7c7f472579 100644
--- a/staker/validatorwallet/eoa.go
+++ b/staker/validatorwallet/eoa.go
@@ -10,8 +10,8 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/offchainlabs/nitro/arbnode/dataposter"
- "github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/solgen/go/challengegen"
"github.com/offchainlabs/nitro/solgen/go/rollupgen"
"github.com/offchainlabs/nitro/staker/txbuilder"
@@ -19,7 +19,7 @@ import (
type EOA struct {
auth *bind.TransactOpts
- client arbutil.L1Interface
+ client *ethclient.Client
rollupAddress common.Address
challengeManager *challengegen.ChallengeManager
challengeManagerAddress common.Address
@@ -27,7 +27,7 @@ type EOA struct {
getExtraGas func() uint64
}
-func NewEOA(dataPoster *dataposter.DataPoster, rollupAddress common.Address, l1Client arbutil.L1Interface, getExtraGas func() uint64) (*EOA, error) {
+func NewEOA(dataPoster *dataposter.DataPoster, rollupAddress common.Address, l1Client *ethclient.Client, getExtraGas func() uint64) (*EOA, error) {
return &EOA{
auth: dataPoster.Auth(),
client: l1Client,
@@ -63,7 +63,7 @@ func (w *EOA) TxSenderAddress() *common.Address {
return &w.auth.From
}
-func (w *EOA) L1Client() arbutil.L1Interface {
+func (w *EOA) L1Client() *ethclient.Client {
return w.client
}
diff --git a/staker/validatorwallet/noop.go b/staker/validatorwallet/noop.go
index b050ebe861..fec39ac2b1 100644
--- a/staker/validatorwallet/noop.go
+++ b/staker/validatorwallet/noop.go
@@ -10,18 +10,18 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/offchainlabs/nitro/arbnode/dataposter"
- "github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/staker/txbuilder"
)
// NoOp validator wallet is used for watchtower mode.
type NoOp struct {
- l1Client arbutil.L1Interface
+ l1Client *ethclient.Client
rollupAddress common.Address
}
-func NewNoOp(l1Client arbutil.L1Interface, rollupAddress common.Address) *NoOp {
+func NewNoOp(l1Client *ethclient.Client, rollupAddress common.Address) *NoOp {
return &NoOp{
l1Client: l1Client,
rollupAddress: rollupAddress,
@@ -46,7 +46,7 @@ func (*NoOp) TimeoutChallenges(ctx context.Context, challenges []uint64) (*types
return nil, errors.New("no op validator wallet cannot timeout challenges")
}
-func (n *NoOp) L1Client() arbutil.L1Interface { return n.l1Client }
+func (n *NoOp) L1Client() *ethclient.Client { return n.l1Client }
func (n *NoOp) RollupAddress() common.Address { return n.rollupAddress }
diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go
index bd0a1f3336..9125c3921e 100644
--- a/system_tests/block_validator_test.go
+++ b/system_tests/block_validator_test.go
@@ -63,7 +63,6 @@ func testBlockValidatorSimple(t *testing.T, opts Options) {
var delayEvery int
if opts.workloadLoops > 1 {
- l1NodeConfigA.BatchPoster.MaxDelay = time.Millisecond * 500
delayEvery = opts.workloadLoops / 3
}
@@ -259,6 +258,7 @@ func testBlockValidatorSimple(t *testing.T, opts Options) {
Require(t, err)
// up to 3 extra references: awaiting validation, recently valid, lastValidatedHeader
largestRefCount := lastBlockNow.NumberU64() - lastBlock.NumberU64() + 3
+ // #nosec G115
if finalRefCount < 0 || finalRefCount > int64(largestRefCount) {
Fatal(t, "unexpected refcount:", finalRefCount)
}
@@ -284,6 +284,20 @@ func TestBlockValidatorSimpleOnchain(t *testing.T) {
testBlockValidatorSimple(t, opts)
}
+func TestBlockValidatorSimpleJITOnchainWithPublishedMachine(t *testing.T) {
+ cr, err := github.LatestConsensusRelease(context.Background())
+ Require(t, err)
+ machPath := populateMachineDir(t, cr)
+ opts := Options{
+ dasModeString: "onchain",
+ workloadLoops: 1,
+ workload: ethSend,
+ arbitrator: false,
+ wasmRootDir: machPath,
+ }
+ testBlockValidatorSimple(t, opts)
+}
+
func TestBlockValidatorSimpleOnchainWithPublishedMachine(t *testing.T) {
cr, err := github.LatestConsensusRelease(context.Background())
Require(t, err)
diff --git a/system_tests/bloom_test.go b/system_tests/bloom_test.go
index a3cab748e2..68fb7c3add 100644
--- a/system_tests/bloom_test.go
+++ b/system_tests/bloom_test.go
@@ -48,11 +48,13 @@ func TestBloom(t *testing.T) {
nullEventCounts := make(map[uint64]struct{})
for i := 0; i < eventsNum; i++ {
+ // #nosec G115
count := uint64(rand.Int() % countsNum)
eventCounts[count] = struct{}{}
}
for i := 0; i < nullEventsNum; i++ {
+ // #nosec G115
count := uint64(rand.Int() % countsNum)
nullEventCounts[count] = struct{}{}
}
@@ -60,6 +62,7 @@ func TestBloom(t *testing.T) {
for i := 0; i <= countsNum; i++ {
var tx *types.Transaction
var err error
+ // #nosec G115
_, sendNullEvent := nullEventCounts[uint64(i)]
if sendNullEvent {
tx, err = simple.EmitNullEvent(&ownerTxOpts)
@@ -68,6 +71,7 @@ func TestBloom(t *testing.T) {
Require(t, err)
}
+ // #nosec G115
_, sendEvent := eventCounts[uint64(i)]
if sendEvent {
tx, err = simple.IncrementEmit(&ownerTxOpts)
@@ -86,7 +90,9 @@ func TestBloom(t *testing.T) {
if sectionSize != 256 {
Fatal(t, "unexpected section size: ", sectionSize)
}
+ // #nosec G115
t.Log("sections: ", sectionNum, "/", uint64(countsNum)/sectionSize)
+ // #nosec G115
if sectionSize*(sectionNum+1) > uint64(countsNum) && sectionNum > 1 {
break
}
diff --git a/system_tests/common_test.go b/system_tests/common_test.go
index 8ad0832633..93c38b5eae 100644
--- a/system_tests/common_test.go
+++ b/system_tests/common_test.go
@@ -25,6 +25,7 @@ import (
"github.com/offchainlabs/nitro/arbos/arbostypes"
"github.com/offchainlabs/nitro/arbos/util"
"github.com/offchainlabs/nitro/arbstate/daprovider"
+ "github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/blsSignatures"
"github.com/offchainlabs/nitro/cmd/chaininfo"
"github.com/offchainlabs/nitro/cmd/conf"
@@ -36,6 +37,7 @@ import (
"github.com/offchainlabs/nitro/util/headerreader"
"github.com/offchainlabs/nitro/util/redisutil"
"github.com/offchainlabs/nitro/util/signature"
+ "github.com/offchainlabs/nitro/validator/inputs"
"github.com/offchainlabs/nitro/validator/server_api"
"github.com/offchainlabs/nitro/validator/server_common"
"github.com/offchainlabs/nitro/validator/valnode"
@@ -69,7 +71,6 @@ import (
"github.com/ethereum/go-ethereum/rpc"
"github.com/offchainlabs/nitro/arbnode"
- "github.com/offchainlabs/nitro/arbutil"
_ "github.com/offchainlabs/nitro/execution/nodeInterface"
"github.com/offchainlabs/nitro/solgen/go/bridgegen"
"github.com/offchainlabs/nitro/solgen/go/mocksgen"
@@ -83,7 +84,6 @@ import (
)
type info = *BlockchainTestInfo
-type client = arbutil.L1Interface
type SecondNodeParams struct {
nodeConfig *arbnode.Config
@@ -138,8 +138,8 @@ func (tc *TestClient) GetBaseFeeAt(t *testing.T, blockNum *big.Int) *big.Int {
return GetBaseFeeAt(t, tc.Client, tc.ctx, blockNum)
}
-func (tc *TestClient) SendWaitTestTransactions(t *testing.T, txs []*types.Transaction) {
- SendWaitTestTransactions(t, tc.ctx, tc.Client, txs)
+func (tc *TestClient) SendWaitTestTransactions(t *testing.T, txs []*types.Transaction) []*types.Receipt {
+ return SendWaitTestTransactions(t, tc.ctx, tc.Client, txs)
}
func (tc *TestClient) DeploySimple(t *testing.T, auth bind.TransactOpts) (common.Address, *mocksgen.Simple) {
@@ -166,7 +166,7 @@ var TestCachingConfig = gethexec.CachingConfig{
SnapshotRestoreGasLimit: 300_000_000_000,
MaxNumberOfBlocksToSkipStateSaving: 0,
MaxAmountOfGasToSkipStateSaving: 0,
- StylusLRUCache: 0,
+ StylusLRUCacheCapacity: 0,
StateScheme: env.GetTestStateScheme(),
}
@@ -197,27 +197,29 @@ var TestSequencerConfig = gethexec.SequencerConfig{
EnableProfiling: false,
}
-func ExecConfigDefaultNonSequencerTest() *gethexec.Config {
+func ExecConfigDefaultNonSequencerTest(t *testing.T) *gethexec.Config {
config := gethexec.ConfigDefault
config.Caching = TestCachingConfig
config.ParentChainReader = headerreader.TestConfig
config.Sequencer.Enable = false
config.Forwarder = DefaultTestForwarderConfig
config.ForwardingTarget = "null"
+ config.TxPreChecker.Strictness = gethexec.TxPreCheckerStrictnessNone
- _ = config.Validate()
+ Require(t, config.Validate())
return &config
}
-func ExecConfigDefaultTest() *gethexec.Config {
+func ExecConfigDefaultTest(t *testing.T) *gethexec.Config {
config := gethexec.ConfigDefault
config.Caching = TestCachingConfig
config.Sequencer = TestSequencerConfig
config.ParentChainReader = headerreader.TestConfig
config.ForwardingTarget = "null"
+ config.TxPreChecker.Strictness = gethexec.TxPreCheckerStrictnessNone
- _ = config.Validate()
+ Require(t, config.Validate())
return &config
}
@@ -231,21 +233,73 @@ type NodeBuilder struct {
l1StackConfig *node.Config
l2StackConfig *node.Config
valnodeConfig *valnode.Config
+ l3Config *NitroConfig
L1Info info
L2Info info
+ L3Info info
- // L1, L2 Node parameters
+ // L1, L2, L3 Node parameters
dataDir string
isSequencer bool
takeOwnership bool
withL1 bool
addresses *chaininfo.RollupAddresses
+ l3Addresses *chaininfo.RollupAddresses
initMessage *arbostypes.ParsedInitMessage
+ l3InitMessage *arbostypes.ParsedInitMessage
withProdConfirmPeriodBlocks bool
// Created nodes
L1 *TestClient
L2 *TestClient
+ L3 *TestClient
+}
+
+type NitroConfig struct {
+ chainConfig *params.ChainConfig
+ nodeConfig *arbnode.Config
+ execConfig *gethexec.Config
+ stackConfig *node.Config
+ valnodeConfig *valnode.Config
+
+ withProdConfirmPeriodBlocks bool
+ isSequencer bool
+}
+
+func L3NitroConfigDefaultTest(t *testing.T) *NitroConfig {
+ chainConfig := ¶ms.ChainConfig{
+ ChainID: big.NewInt(333333),
+ HomesteadBlock: big.NewInt(0),
+ DAOForkBlock: nil,
+ DAOForkSupport: true,
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArbitrumChainParams: params.ArbitrumDevTestParams(),
+ Clique: ¶ms.CliqueConfig{
+ Period: 0,
+ Epoch: 0,
+ },
+ }
+
+ valnodeConfig := valnode.TestValidationConfig
+ return &NitroConfig{
+ chainConfig: chainConfig,
+ nodeConfig: arbnode.ConfigDefaultL1Test(),
+ execConfig: ExecConfigDefaultTest(t),
+ stackConfig: testhelpers.CreateStackConfigForTest(t.TempDir()),
+ valnodeConfig: &valnodeConfig,
+
+ withProdConfirmPeriodBlocks: false,
+ isSequencer: true,
+ }
}
func NewNodeBuilder(ctx context.Context) *NodeBuilder {
@@ -270,7 +324,8 @@ func (b *NodeBuilder) DefaultConfig(t *testing.T, withL1 bool) *NodeBuilder {
b.l2StackConfig = testhelpers.CreateStackConfigForTest(b.dataDir)
cp := valnode.TestValidationConfig
b.valnodeConfig = &cp
- b.execConfig = ExecConfigDefaultTest()
+ b.execConfig = ExecConfigDefaultTest(t)
+ b.l3Config = L3NitroConfigDefaultTest(t)
return b
}
@@ -291,6 +346,11 @@ func (b *NodeBuilder) WithWasmRootDir(wasmRootDir string) *NodeBuilder {
return b
}
+func (b *NodeBuilder) WithExtraArchs(targets []string) *NodeBuilder {
+ b.execConfig.StylusTarget.ExtraArchs = targets
+ return b
+}
+
func (b *NodeBuilder) Build(t *testing.T) func() {
b.CheckConfig(t)
if b.withL1 {
@@ -308,7 +368,7 @@ func (b *NodeBuilder) CheckConfig(t *testing.T) {
b.nodeConfig = arbnode.ConfigDefaultL1Test()
}
if b.execConfig == nil {
- b.execConfig = ExecConfigDefaultTest()
+ b.execConfig = ExecConfigDefaultTest(t)
}
if b.L1Info == nil {
b.L1Info = NewL1TestInfo(t)
@@ -330,64 +390,169 @@ func (b *NodeBuilder) BuildL1(t *testing.T) {
b.L1Info, b.L1.Client, b.L1.L1Backend, b.L1.Stack = createTestL1BlockChain(t, b.L1Info)
locator, err := server_common.NewMachineLocator(b.valnodeConfig.Wasm.RootPath)
Require(t, err)
- b.addresses, b.initMessage = DeployOnTestL1(t, b.ctx, b.L1Info, b.L1.Client, b.chainConfig, locator.LatestWasmModuleRoot(), b.withProdConfirmPeriodBlocks)
+ b.addresses, b.initMessage = deployOnParentChain(
+ t,
+ b.ctx,
+ b.L1Info,
+ b.L1.Client,
+ &headerreader.TestConfig,
+ b.chainConfig,
+ locator.LatestWasmModuleRoot(),
+ b.withProdConfirmPeriodBlocks,
+ true,
+ )
b.L1.cleanup = func() { requireClose(t, b.L1.Stack) }
}
-func (b *NodeBuilder) BuildL2OnL1(t *testing.T) func() {
- if b.L1 == nil {
- t.Fatal("must build L1 before building L2")
+func buildOnParentChain(
+ t *testing.T,
+ ctx context.Context,
+
+ dataDir string,
+
+ parentChainInfo info,
+ parentChainTestClient *TestClient,
+ parentChainId *big.Int,
+
+ chainConfig *params.ChainConfig,
+ stackConfig *node.Config,
+ execConfig *gethexec.Config,
+ nodeConfig *arbnode.Config,
+ valnodeConfig *valnode.Config,
+ isSequencer bool,
+ chainInfo info,
+
+ initMessage *arbostypes.ParsedInitMessage,
+ addresses *chaininfo.RollupAddresses,
+) *TestClient {
+ if parentChainTestClient == nil {
+ t.Fatal("must build parent chain before building chain")
}
- b.L2 = NewTestClient(b.ctx)
- var l2chainDb ethdb.Database
- var l2arbDb ethdb.Database
- var l2blockchain *core.BlockChain
- _, b.L2.Stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig(
- t, b.L2Info, b.dataDir, b.chainConfig, b.initMessage, b.l2StackConfig, &b.execConfig.Caching)
+ chainTestClient := NewTestClient(ctx)
+
+ var chainDb ethdb.Database
+ var arbDb ethdb.Database
+ var blockchain *core.BlockChain
+ _, chainTestClient.Stack, chainDb, arbDb, blockchain = createNonL1BlockChainWithStackConfig(
+ t, chainInfo, dataDir, chainConfig, initMessage, stackConfig, execConfig)
var sequencerTxOptsPtr *bind.TransactOpts
var dataSigner signature.DataSignerFunc
- if b.isSequencer {
- sequencerTxOpts := b.L1Info.GetDefaultTransactOpts("Sequencer", b.ctx)
+ if isSequencer {
+ sequencerTxOpts := parentChainInfo.GetDefaultTransactOpts("Sequencer", ctx)
sequencerTxOptsPtr = &sequencerTxOpts
- dataSigner = signature.DataSignerFromPrivateKey(b.L1Info.GetInfoWithPrivKey("Sequencer").PrivateKey)
+ dataSigner = signature.DataSignerFromPrivateKey(parentChainInfo.GetInfoWithPrivKey("Sequencer").PrivateKey)
} else {
- b.nodeConfig.BatchPoster.Enable = false
- b.nodeConfig.Sequencer = false
- b.nodeConfig.DelayedSequencer.Enable = false
- b.execConfig.Sequencer.Enable = false
+ nodeConfig.BatchPoster.Enable = false
+ nodeConfig.Sequencer = false
+ nodeConfig.DelayedSequencer.Enable = false
+ execConfig.Sequencer.Enable = false
}
var validatorTxOptsPtr *bind.TransactOpts
- if b.nodeConfig.Staker.Enable {
- validatorTxOpts := b.L1Info.GetDefaultTransactOpts("Validator", b.ctx)
+ if nodeConfig.Staker.Enable {
+ validatorTxOpts := parentChainInfo.GetDefaultTransactOpts("Validator", ctx)
validatorTxOptsPtr = &validatorTxOpts
}
- AddValNodeIfNeeded(t, b.ctx, b.nodeConfig, true, "", b.valnodeConfig.Wasm.RootPath)
+ AddValNodeIfNeeded(t, ctx, nodeConfig, true, "", valnodeConfig.Wasm.RootPath)
- Require(t, b.execConfig.Validate())
- execConfig := b.execConfig
- execConfigFetcher := func() *gethexec.Config { return execConfig }
- execNode, err := gethexec.CreateExecutionNode(b.ctx, b.L2.Stack, l2chainDb, l2blockchain, b.L1.Client, execConfigFetcher)
+ Require(t, execConfig.Validate())
+ execConfigToBeUsedInConfigFetcher := execConfig
+ execConfigFetcher := func() *gethexec.Config { return execConfigToBeUsedInConfigFetcher }
+ execNode, err := gethexec.CreateExecutionNode(ctx, chainTestClient.Stack, chainDb, blockchain, parentChainTestClient.Client, execConfigFetcher)
Require(t, err)
fatalErrChan := make(chan error, 10)
- b.L2.ConsensusNode, err = arbnode.CreateNode(
- b.ctx, b.L2.Stack, execNode, l2arbDb, NewFetcherFromConfig(b.nodeConfig), l2blockchain.Config(), b.L1.Client,
- b.addresses, validatorTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, big.NewInt(1337), nil)
+ chainTestClient.ConsensusNode, err = arbnode.CreateNode(
+ ctx, chainTestClient.Stack, execNode, arbDb, NewFetcherFromConfig(nodeConfig), blockchain.Config(), parentChainTestClient.Client,
+ addresses, validatorTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, parentChainId, nil)
Require(t, err)
- err = b.L2.ConsensusNode.Start(b.ctx)
+ err = chainTestClient.ConsensusNode.Start(ctx)
Require(t, err)
- b.L2.Client = ClientForStack(t, b.L2.Stack)
+ chainTestClient.Client = ClientForStack(t, chainTestClient.Stack)
- StartWatchChanErr(t, b.ctx, fatalErrChan, b.L2.ConsensusNode)
+ StartWatchChanErr(t, ctx, fatalErrChan, chainTestClient.ConsensusNode)
+
+ chainTestClient.ExecNode = getExecNode(t, chainTestClient.ConsensusNode)
+ chainTestClient.cleanup = func() { chainTestClient.ConsensusNode.StopAndWait() }
+
+ return chainTestClient
+}
+
+func (b *NodeBuilder) BuildL3OnL2(t *testing.T) func() {
+ b.L3Info = NewArbTestInfo(t, b.l3Config.chainConfig.ChainID)
+
+ locator, err := server_common.NewMachineLocator(b.l3Config.valnodeConfig.Wasm.RootPath)
+ Require(t, err)
+
+ parentChainReaderConfig := headerreader.TestConfig
+ parentChainReaderConfig.Dangerous.WaitForTxApprovalSafePoll = 0
+ b.l3Addresses, b.l3InitMessage = deployOnParentChain(
+ t,
+ b.ctx,
+ b.L2Info,
+ b.L2.Client,
+ &parentChainReaderConfig,
+ b.l3Config.chainConfig,
+ locator.LatestWasmModuleRoot(),
+ b.l3Config.withProdConfirmPeriodBlocks,
+ false,
+ )
+
+ b.L3 = buildOnParentChain(
+ t,
+ b.ctx,
+
+ b.dataDir,
+
+ b.L2Info,
+ b.L2,
+ b.chainConfig.ChainID,
+
+ b.l3Config.chainConfig,
+ b.l3Config.stackConfig,
+ b.l3Config.execConfig,
+ b.l3Config.nodeConfig,
+ b.l3Config.valnodeConfig,
+ b.l3Config.isSequencer,
+ b.L3Info,
+
+ b.l3InitMessage,
+ b.l3Addresses,
+ )
+
+ return func() {
+ b.L3.cleanup()
+ }
+}
+
+func (b *NodeBuilder) BuildL2OnL1(t *testing.T) func() {
+ b.L2 = buildOnParentChain(
+ t,
+ b.ctx,
+
+ b.dataDir,
+
+ b.L1Info,
+ b.L1,
+ big.NewInt(1337),
+
+ b.chainConfig,
+ b.l2StackConfig,
+ b.execConfig,
+ b.nodeConfig,
+ b.valnodeConfig,
+ b.isSequencer,
+ b.L2Info,
+
+ b.initMessage,
+ b.addresses,
+ )
- b.L2.ExecNode = getExecNode(t, b.L2.ConsensusNode)
- b.L2.cleanup = func() { b.L2.ConsensusNode.StopAndWait() }
return func() {
b.L2.cleanup()
if b.L1 != nil && b.L1.cleanup != nil {
@@ -407,7 +572,7 @@ func (b *NodeBuilder) BuildL2(t *testing.T) func() {
var arbDb ethdb.Database
var blockchain *core.BlockChain
b.L2Info, b.L2.Stack, chainDb, arbDb, blockchain = createL2BlockChain(
- t, b.L2Info, b.dataDir, b.chainConfig, &b.execConfig.Caching)
+ t, b.L2Info, b.dataDir, b.chainConfig, b.execConfig)
Require(t, b.execConfig.Validate())
execConfig := b.execConfig
@@ -458,7 +623,7 @@ func (b *NodeBuilder) RestartL2Node(t *testing.T) {
}
b.L2.cleanup()
- l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, b.L2Info, b.dataDir, b.chainConfig, &b.execConfig.Caching)
+ l2info, stack, chainDb, arbDb, blockchain := createNonL1BlockChainWithStackConfig(t, b.L2Info, b.dataDir, b.chainConfig, b.initMessage, b.l2StackConfig, b.execConfig)
execConfigFetcher := func() *gethexec.Config { return b.execConfig }
execNode, err := gethexec.CreateExecutionNode(b.ctx, stack, chainDb, blockchain, nil, execConfigFetcher)
@@ -483,13 +648,25 @@ func (b *NodeBuilder) RestartL2Node(t *testing.T) {
b.L2Info = l2info
}
-func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*TestClient, func()) {
- if b.L2 == nil {
- t.Fatal("builder did not previously build a L2 Node")
- }
- if b.withL1 && b.L1 == nil {
- t.Fatal("builder did not previously build a L1 Node")
- }
+func build2ndNode(
+ t *testing.T,
+ ctx context.Context,
+
+ firstNodeStackConfig *node.Config,
+ firsNodeExecConfig *gethexec.Config,
+ firstNodeNodeConfig *arbnode.Config,
+ firstNodeInfo info,
+ firstNodeTestClient *TestClient,
+ valnodeConfig *valnode.Config,
+
+ parentChainTestClient *TestClient,
+ parentChainInfo info,
+
+ params *SecondNodeParams,
+
+ addresses *chaininfo.RollupAddresses,
+ initMessage *arbostypes.ParsedInitMessage,
+) (*TestClient, func()) {
if params.nodeConfig == nil {
params.nodeConfig = arbnode.ConfigDefaultL1NonSequencerTest()
}
@@ -497,18 +674,18 @@ func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*Tes
params.nodeConfig.DataAvailability = *params.dasConfig
}
if params.stackConfig == nil {
- params.stackConfig = b.l2StackConfig
+ params.stackConfig = firstNodeStackConfig
// should use different dataDir from the previously used ones
params.stackConfig.DataDir = t.TempDir()
}
if params.initData == nil {
- params.initData = &b.L2Info.ArbInitData
+ params.initData = &firstNodeInfo.ArbInitData
}
if params.execConfig == nil {
- params.execConfig = b.execConfig
+ params.execConfig = firsNodeExecConfig
}
if params.addresses == nil {
- params.addresses = b.addresses
+ params.addresses = addresses
}
if params.execConfig.RPC.MaxRecreateStateDepth == arbitrum.UninitializedMaxRecreateStateDepth {
if params.execConfig.Caching.Archive {
@@ -517,42 +694,98 @@ func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*Tes
params.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultNonArchiveNodeMaxRecreateStateDepth
}
}
- if b.nodeConfig.BatchPoster.Enable && params.nodeConfig.BatchPoster.Enable && params.nodeConfig.BatchPoster.RedisUrl == "" {
+ if firstNodeNodeConfig.BatchPoster.Enable && params.nodeConfig.BatchPoster.Enable && params.nodeConfig.BatchPoster.RedisUrl == "" {
t.Fatal("The batch poster must use Redis when enabled for multiple nodes")
}
- l2 := NewTestClient(b.ctx)
- l2.Client, l2.ConsensusNode =
- Create2ndNodeWithConfig(t, b.ctx, b.L2.ConsensusNode, b.L1.Stack, b.L1Info, params.initData, params.nodeConfig, params.execConfig, params.stackConfig, b.valnodeConfig, params.addresses, b.initMessage)
- l2.ExecNode = getExecNode(t, l2.ConsensusNode)
- l2.cleanup = func() { l2.ConsensusNode.StopAndWait() }
- return l2, func() { l2.cleanup() }
+ testClient := NewTestClient(ctx)
+ testClient.Client, testClient.ConsensusNode =
+ Create2ndNodeWithConfig(t, ctx, firstNodeTestClient.ConsensusNode, parentChainTestClient.Stack, parentChainInfo, params.initData, params.nodeConfig, params.execConfig, params.stackConfig, valnodeConfig, params.addresses, initMessage)
+ testClient.ExecNode = getExecNode(t, testClient.ConsensusNode)
+ testClient.cleanup = func() { testClient.ConsensusNode.StopAndWait() }
+ return testClient, func() { testClient.cleanup() }
+}
+
+func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*TestClient, func()) {
+ if b.L2 == nil {
+ t.Fatal("builder did not previously built an L2 Node")
+ }
+ if b.withL1 && b.L1 == nil {
+ t.Fatal("builder did not previously built an L1 Node")
+ }
+ return build2ndNode(
+ t,
+ b.ctx,
+
+ b.l2StackConfig,
+ b.execConfig,
+ b.nodeConfig,
+ b.L2Info,
+ b.L2,
+ b.valnodeConfig,
+
+ b.L1,
+ b.L1Info,
+
+ params,
+
+ b.addresses,
+ b.initMessage,
+ )
+}
+
+func (b *NodeBuilder) Build2ndNodeOnL3(t *testing.T, params *SecondNodeParams) (*TestClient, func()) {
+ if b.L3 == nil {
+ t.Fatal("builder did not previously built an L3 Node")
+ }
+ return build2ndNode(
+ t,
+ b.ctx,
+
+ b.l3Config.stackConfig,
+ b.l3Config.execConfig,
+ b.l3Config.nodeConfig,
+ b.L3Info,
+ b.L3,
+ b.l3Config.valnodeConfig,
+
+ b.L2,
+ b.L2Info,
+
+ params,
+
+ b.l3Addresses,
+ b.l3InitMessage,
+ )
}
func (b *NodeBuilder) BridgeBalance(t *testing.T, account string, amount *big.Int) (*types.Transaction, *types.Receipt) {
return BridgeBalance(t, account, amount, b.L1Info, b.L2Info, b.L1.Client, b.L2.Client, b.ctx)
}
-func SendWaitTestTransactions(t *testing.T, ctx context.Context, client client, txs []*types.Transaction) {
+func SendWaitTestTransactions(t *testing.T, ctx context.Context, client *ethclient.Client, txs []*types.Transaction) []*types.Receipt {
t.Helper()
+ receipts := make([]*types.Receipt, len(txs))
for _, tx := range txs {
Require(t, client.SendTransaction(ctx, tx))
}
- for _, tx := range txs {
- _, err := EnsureTxSucceeded(ctx, client, tx)
+ for i, tx := range txs {
+ var err error
+ receipts[i], err = EnsureTxSucceeded(ctx, client, tx)
Require(t, err)
}
+ return receipts
}
func TransferBalance(
- t *testing.T, from, to string, amount *big.Int, l2info info, client client, ctx context.Context,
+ t *testing.T, from, to string, amount *big.Int, l2info info, client *ethclient.Client, ctx context.Context,
) (*types.Transaction, *types.Receipt) {
t.Helper()
return TransferBalanceTo(t, from, l2info.GetAddress(to), amount, l2info, client, ctx)
}
func TransferBalanceTo(
- t *testing.T, from string, to common.Address, amount *big.Int, l2info info, client client, ctx context.Context,
+ t *testing.T, from string, to common.Address, amount *big.Int, l2info info, client *ethclient.Client, ctx context.Context,
) (*types.Transaction, *types.Receipt) {
t.Helper()
tx := l2info.PrepareTxTo(from, &to, l2info.TransferGas, amount, nil)
@@ -565,7 +798,7 @@ func TransferBalanceTo(
// if l2client is not nil - will wait until balance appears in l2
func BridgeBalance(
- t *testing.T, account string, amount *big.Int, l1info info, l2info info, l1client client, l2client client, ctx context.Context,
+ t *testing.T, account string, amount *big.Int, l1info info, l2info info, l1client *ethclient.Client, l2client *ethclient.Client, ctx context.Context,
) (*types.Transaction, *types.Receipt) {
t.Helper()
@@ -611,7 +844,7 @@ func BridgeBalance(
break
}
TransferBalance(t, "Faucet", "User", big.NewInt(1), l1info, l1client, ctx)
- if i > 20 {
+ if i > 200 {
Fatal(t, "bridging failed")
}
<-time.After(time.Millisecond * 100)
@@ -625,8 +858,8 @@ func SendSignedTxesInBatchViaL1(
t *testing.T,
ctx context.Context,
l1info *BlockchainTestInfo,
- l1client arbutil.L1Interface,
- l2client arbutil.L1Interface,
+ l1client *ethclient.Client,
+ l2client *ethclient.Client,
delayedTxes types.Transactions,
) types.Receipts {
delayedInboxContract, err := bridgegen.NewInbox(l1info.GetAddress("Inbox"), l1client)
@@ -664,7 +897,7 @@ func l2MessageBatchDataFromTxes(txes types.Transactions) ([]byte, error) {
if err != nil {
return nil, err
}
- binary.BigEndian.PutUint64(sizeBuf, uint64(len(txBytes)+1))
+ binary.BigEndian.PutUint64(sizeBuf, uint64(len(txBytes))+1)
l2Message = append(l2Message, sizeBuf...)
l2Message = append(l2Message, arbos.L2MessageKind_SignedTx)
l2Message = append(l2Message, txBytes...)
@@ -676,8 +909,8 @@ func SendSignedTxViaL1(
t *testing.T,
ctx context.Context,
l1info *BlockchainTestInfo,
- l1client arbutil.L1Interface,
- l2client arbutil.L1Interface,
+ l1client *ethclient.Client,
+ l2client *ethclient.Client,
delayedTx *types.Transaction,
) *types.Receipt {
delayedInboxContract, err := bridgegen.NewInbox(l1info.GetAddress("Inbox"), l1client)
@@ -707,8 +940,8 @@ func SendUnsignedTxViaL1(
t *testing.T,
ctx context.Context,
l1info *BlockchainTestInfo,
- l1client arbutil.L1Interface,
- l2client arbutil.L1Interface,
+ l1client *ethclient.Client,
+ l2client *ethclient.Client,
templateTx *types.Transaction,
) *types.Receipt {
delayedInboxContract, err := bridgegen.NewInbox(l1info.GetAddress("Inbox"), l1client)
@@ -754,13 +987,13 @@ func SendUnsignedTxViaL1(
return receipt
}
-func GetBaseFee(t *testing.T, client client, ctx context.Context) *big.Int {
+func GetBaseFee(t *testing.T, client *ethclient.Client, ctx context.Context) *big.Int {
header, err := client.HeaderByNumber(ctx, nil)
Require(t, err)
return header.BaseFee
}
-func GetBaseFeeAt(t *testing.T, client client, ctx context.Context, blockNum *big.Int) *big.Int {
+func GetBaseFeeAt(t *testing.T, client *ethclient.Client, ctx context.Context, blockNum *big.Int) *big.Int {
header, err := client.HeaderByNumber(ctx, blockNum)
Require(t, err)
return header.BaseFee
@@ -982,8 +1215,8 @@ func createTestL1BlockChain(t *testing.T, l1info info) (info, *ethclient.Client,
return l1info, l1Client, l1backend, stack
}
-func getInitMessage(ctx context.Context, t *testing.T, l1client client, addresses *chaininfo.RollupAddresses) *arbostypes.ParsedInitMessage {
- bridge, err := arbnode.NewDelayedBridge(l1client, addresses.Bridge, addresses.DeployedAt)
+func getInitMessage(ctx context.Context, t *testing.T, parentChainClient *ethclient.Client, addresses *chaininfo.RollupAddresses) *arbostypes.ParsedInitMessage {
+ bridge, err := arbnode.NewDelayedBridge(parentChainClient, addresses.Bridge, addresses.DeployedAt)
Require(t, err)
deployedAtBig := arbmath.UintToBig(addresses.DeployedAt)
messages, err := bridge.LookupMessagesInRange(ctx, deployedAtBig, deployedAtBig, nil)
@@ -997,82 +1230,92 @@ func getInitMessage(ctx context.Context, t *testing.T, l1client client, addresse
return initMessage
}
-func DeployOnTestL1(
- t *testing.T, ctx context.Context, l1info info, l1client client, chainConfig *params.ChainConfig, wasmModuleRoot common.Hash, prodConfirmPeriodBlocks bool,
+func deployOnParentChain(
+ t *testing.T,
+ ctx context.Context,
+ parentChainInfo info,
+ parentChainClient *ethclient.Client,
+ parentChainReaderConfig *headerreader.Config,
+ chainConfig *params.ChainConfig,
+ wasmModuleRoot common.Hash,
+ prodConfirmPeriodBlocks bool,
+ chainSupportsBlobs bool,
) (*chaininfo.RollupAddresses, *arbostypes.ParsedInitMessage) {
- l1info.GenerateAccount("RollupOwner")
- l1info.GenerateAccount("Sequencer")
- l1info.GenerateAccount("Validator")
- l1info.GenerateAccount("User")
-
- SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{
- l1info.PrepareTx("Faucet", "RollupOwner", 30000, big.NewInt(9223372036854775807), nil),
- l1info.PrepareTx("Faucet", "Sequencer", 30000, big.NewInt(9223372036854775807), nil),
- l1info.PrepareTx("Faucet", "Validator", 30000, big.NewInt(9223372036854775807), nil),
- l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(9223372036854775807), nil)})
-
- l1TransactionOpts := l1info.GetDefaultTransactOpts("RollupOwner", ctx)
+ parentChainInfo.GenerateAccount("RollupOwner")
+ parentChainInfo.GenerateAccount("Sequencer")
+ parentChainInfo.GenerateAccount("Validator")
+ parentChainInfo.GenerateAccount("User")
+
+ SendWaitTestTransactions(t, ctx, parentChainClient, []*types.Transaction{
+ parentChainInfo.PrepareTx("Faucet", "RollupOwner", parentChainInfo.TransferGas, big.NewInt(9223372036854775807), nil),
+ parentChainInfo.PrepareTx("Faucet", "Sequencer", parentChainInfo.TransferGas, big.NewInt(9223372036854775807), nil),
+ parentChainInfo.PrepareTx("Faucet", "Validator", parentChainInfo.TransferGas, big.NewInt(9223372036854775807), nil),
+ parentChainInfo.PrepareTx("Faucet", "User", parentChainInfo.TransferGas, big.NewInt(9223372036854775807), nil)})
+
+ parentChainTransactionOpts := parentChainInfo.GetDefaultTransactOpts("RollupOwner", ctx)
serializedChainConfig, err := json.Marshal(chainConfig)
Require(t, err)
- arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client)
- l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }, arbSys)
+ arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, parentChainClient)
+ parentChainReader, err := headerreader.New(ctx, parentChainClient, func() *headerreader.Config { return parentChainReaderConfig }, arbSys)
Require(t, err)
- l1Reader.Start(ctx)
- defer l1Reader.StopAndWait()
+ parentChainReader.Start(ctx)
+ defer parentChainReader.StopAndWait()
nativeToken := common.Address{}
maxDataSize := big.NewInt(117964)
- addresses, err := deploy.DeployOnL1(
+ addresses, err := deploy.DeployOnParentChain(
ctx,
- l1Reader,
- &l1TransactionOpts,
- []common.Address{l1info.GetAddress("Sequencer")},
- l1info.GetAddress("RollupOwner"),
+ parentChainReader,
+ &parentChainTransactionOpts,
+ []common.Address{parentChainInfo.GetAddress("Sequencer")},
+ parentChainInfo.GetAddress("RollupOwner"),
0,
- arbnode.GenerateRollupConfig(prodConfirmPeriodBlocks, wasmModuleRoot, l1info.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}),
+ arbnode.GenerateRollupConfig(prodConfirmPeriodBlocks, wasmModuleRoot, parentChainInfo.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}),
nativeToken,
maxDataSize,
- false,
+ chainSupportsBlobs,
)
Require(t, err)
- l1info.SetContract("Bridge", addresses.Bridge)
- l1info.SetContract("SequencerInbox", addresses.SequencerInbox)
- l1info.SetContract("Inbox", addresses.Inbox)
- l1info.SetContract("UpgradeExecutor", addresses.UpgradeExecutor)
- initMessage := getInitMessage(ctx, t, l1client, addresses)
+ parentChainInfo.SetContract("Bridge", addresses.Bridge)
+ parentChainInfo.SetContract("SequencerInbox", addresses.SequencerInbox)
+ parentChainInfo.SetContract("Inbox", addresses.Inbox)
+ parentChainInfo.SetContract("UpgradeExecutor", addresses.UpgradeExecutor)
+ initMessage := getInitMessage(ctx, t, parentChainClient, addresses)
return addresses, initMessage
}
func createL2BlockChain(
- t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, cacheConfig *gethexec.CachingConfig,
+ t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, execConfig *gethexec.Config,
) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) {
- return createL2BlockChainWithStackConfig(t, l2info, dataDir, chainConfig, nil, nil, cacheConfig)
+ return createNonL1BlockChainWithStackConfig(t, l2info, dataDir, chainConfig, nil, nil, execConfig)
}
-func createL2BlockChainWithStackConfig(
- t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, stackConfig *node.Config, cacheConfig *gethexec.CachingConfig,
+func createNonL1BlockChainWithStackConfig(
+ t *testing.T, info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, stackConfig *node.Config, execConfig *gethexec.Config,
) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) {
- if l2info == nil {
- l2info = NewArbTestInfo(t, chainConfig.ChainID)
+ if info == nil {
+ info = NewArbTestInfo(t, chainConfig.ChainID)
}
- var stack *node.Node
- var err error
if stackConfig == nil {
stackConfig = testhelpers.CreateStackConfigForTest(dataDir)
}
- stack, err = node.New(stackConfig)
+ if execConfig == nil {
+ execConfig = ExecConfigDefaultTest(t)
+ }
+
+ stack, err := node.New(stackConfig)
Require(t, err)
chainData, err := stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata"))
Require(t, err)
wasmData, err := stack.OpenDatabaseWithExtraOptions("wasm", 0, 0, "wasm/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("wasm"))
Require(t, err)
- chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData, 0)
+ chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData, 0, execConfig.StylusTarget.WasmTargets())
arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata"))
Require(t, err)
- initReader := statetransfer.NewMemoryInitDataReader(&l2info.ArbInitData)
+ initReader := statetransfer.NewMemoryInitDataReader(&info.ArbInitData)
if initMessage == nil {
serializedChainConfig, err := json.Marshal(chainConfig)
Require(t, err)
@@ -1083,14 +1326,11 @@ func createL2BlockChainWithStackConfig(
SerializedChainConfig: serializedChainConfig,
}
}
- var coreCacheConfig *core.CacheConfig
- if cacheConfig != nil {
- coreCacheConfig = gethexec.DefaultCacheConfigFor(stack, cacheConfig)
- }
- blockchain, err := gethexec.WriteOrTestBlockChain(chainDb, coreCacheConfig, initReader, chainConfig, initMessage, ExecConfigDefaultTest().TxLookupLimit, 0)
+ coreCacheConfig := gethexec.DefaultCacheConfigFor(stack, &execConfig.Caching)
+ blockchain, err := gethexec.WriteOrTestBlockChain(chainDb, coreCacheConfig, initReader, chainConfig, initMessage, ExecConfigDefaultTest(t).TxLookupLimit, 0)
Require(t, err)
- return l2info, stack, chainDb, arbDb, blockchain
+ return info, stack, chainDb, arbDb, blockchain
}
func ClientForStack(t *testing.T, backend *node.Node) *ethclient.Client {
@@ -1133,9 +1373,9 @@ func Create2ndNodeWithConfig(
t *testing.T,
ctx context.Context,
first *arbnode.Node,
- l1stack *node.Node,
- l1info *BlockchainTestInfo,
- l2InitData *statetransfer.ArbosInitializationInfo,
+ parentChainStack *node.Node,
+ parentChainInfo *BlockchainTestInfo,
+ chainInitData *statetransfer.ArbosInitializationInfo,
nodeConfig *arbnode.Config,
execConfig *gethexec.Config,
stackConfig *node.Config,
@@ -1147,37 +1387,37 @@ func Create2ndNodeWithConfig(
nodeConfig = arbnode.ConfigDefaultL1NonSequencerTest()
}
if execConfig == nil {
- execConfig = ExecConfigDefaultNonSequencerTest()
+ execConfig = ExecConfigDefaultNonSequencerTest(t)
}
feedErrChan := make(chan error, 10)
- l1rpcClient := l1stack.Attach()
- l1client := ethclient.NewClient(l1rpcClient)
+ parentChainRpcClient := parentChainStack.Attach()
+ parentChainClient := ethclient.NewClient(parentChainRpcClient)
if stackConfig == nil {
stackConfig = testhelpers.CreateStackConfigForTest(t.TempDir())
}
- l2stack, err := node.New(stackConfig)
+ chainStack, err := node.New(stackConfig)
Require(t, err)
- l2chainData, err := l2stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata"))
+ chainData, err := chainStack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata"))
Require(t, err)
- wasmData, err := l2stack.OpenDatabaseWithExtraOptions("wasm", 0, 0, "wasm/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("wasm"))
+ wasmData, err := chainStack.OpenDatabaseWithExtraOptions("wasm", 0, 0, "wasm/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("wasm"))
Require(t, err)
- l2chainDb := rawdb.WrapDatabaseWithWasm(l2chainData, wasmData, 0)
+ chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData, 0, execConfig.StylusTarget.WasmTargets())
- l2arbDb, err := l2stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata"))
+ arbDb, err := chainStack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata"))
Require(t, err)
- initReader := statetransfer.NewMemoryInitDataReader(l2InitData)
+ initReader := statetransfer.NewMemoryInitDataReader(chainInitData)
- dataSigner := signature.DataSignerFromPrivateKey(l1info.GetInfoWithPrivKey("Sequencer").PrivateKey)
- sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx)
- validatorTxOpts := l1info.GetDefaultTransactOpts("Validator", ctx)
+ dataSigner := signature.DataSignerFromPrivateKey(parentChainInfo.GetInfoWithPrivKey("Sequencer").PrivateKey)
+ sequencerTxOpts := parentChainInfo.GetDefaultTransactOpts("Sequencer", ctx)
+ validatorTxOpts := parentChainInfo.GetDefaultTransactOpts("Validator", ctx)
firstExec := getExecNode(t, first)
chainConfig := firstExec.ArbInterface.BlockChain().Config()
- coreCacheConfig := gethexec.DefaultCacheConfigFor(l2stack, &execConfig.Caching)
- l2blockchain, err := gethexec.WriteOrTestBlockChain(l2chainDb, coreCacheConfig, initReader, chainConfig, initMessage, ExecConfigDefaultTest().TxLookupLimit, 0)
+ coreCacheConfig := gethexec.DefaultCacheConfigFor(chainStack, &execConfig.Caching)
+ blockchain, err := gethexec.WriteOrTestBlockChain(chainDb, coreCacheConfig, initReader, chainConfig, initMessage, ExecConfigDefaultTest(t).TxLookupLimit, 0)
Require(t, err)
AddValNodeIfNeeded(t, ctx, nodeConfig, true, "", valnodeConfig.Wasm.RootPath)
@@ -1185,19 +1425,19 @@ func Create2ndNodeWithConfig(
Require(t, execConfig.Validate())
Require(t, nodeConfig.Validate())
configFetcher := func() *gethexec.Config { return execConfig }
- currentExec, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, configFetcher)
+ currentExec, err := gethexec.CreateExecutionNode(ctx, chainStack, chainDb, blockchain, parentChainClient, configFetcher)
Require(t, err)
- currentNode, err := arbnode.CreateNode(ctx, l2stack, currentExec, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, addresses, &validatorTxOpts, &sequencerTxOpts, dataSigner, feedErrChan, big.NewInt(1337), nil)
+ currentNode, err := arbnode.CreateNode(ctx, chainStack, currentExec, arbDb, NewFetcherFromConfig(nodeConfig), blockchain.Config(), parentChainClient, addresses, &validatorTxOpts, &sequencerTxOpts, dataSigner, feedErrChan, big.NewInt(1337), nil)
Require(t, err)
err = currentNode.Start(ctx)
Require(t, err)
- l2client := ClientForStack(t, l2stack)
+ chainClient := ClientForStack(t, chainStack)
StartWatchChanErr(t, ctx, feedErrChan, currentNode)
- return l2client, currentNode
+ return chainClient, currentNode
}
func GetBalance(t *testing.T, ctx context.Context, client *ethclient.Client, account common.Address) *big.Int {
@@ -1217,7 +1457,7 @@ func authorizeDASKeyset(
ctx context.Context,
dasSignerKey *blsSignatures.PublicKey,
l1info info,
- l1client arbutil.L1Interface,
+ l1client *ethclient.Client,
) {
if dasSignerKey == nil {
return
@@ -1457,6 +1697,34 @@ func logParser[T any](t *testing.T, source string, name string) func(*types.Log)
}
}
+// recordBlock writes a json file with all of the data needed to validate a block.
+//
+// This can be used as an input to the arbitrator prover to validate a block.
+func recordBlock(t *testing.T, block uint64, builder *NodeBuilder) {
+ t.Helper()
+ ctx := builder.ctx
+ inboxPos := arbutil.MessageIndex(block)
+ for {
+ time.Sleep(250 * time.Millisecond)
+ batches, err := builder.L2.ConsensusNode.InboxTracker.GetBatchCount()
+ Require(t, err)
+ haveMessages, err := builder.L2.ConsensusNode.InboxTracker.GetBatchMessageCount(batches - 1)
+ Require(t, err)
+ if haveMessages >= inboxPos {
+ break
+ }
+ }
+ validationInputsWriter, err := inputs.NewWriter(inputs.WithSlug(t.Name()))
+ Require(t, err)
+ inputJson, err := builder.L2.ConsensusNode.StatelessBlockValidator.ValidationInputsAt(ctx, inboxPos, rawdb.TargetWavm)
+ if err != nil {
+ Fatal(t, "failed to get validation inputs", block, err)
+ }
+ if err := validationInputsWriter.Write(&inputJson); err != nil {
+ Fatal(t, "failed to write validation inputs", block, err)
+ }
+}
+
func populateMachineDir(t *testing.T, cr *github.ConsensusRelease) string {
baseDir := t.TempDir()
machineDir := baseDir + "/machines"
diff --git a/system_tests/contract_tx_test.go b/system_tests/contract_tx_test.go
index 7d66e516b4..c1ef840c43 100644
--- a/system_tests/contract_tx_test.go
+++ b/system_tests/contract_tx_test.go
@@ -51,6 +51,7 @@ func TestContractTxDeploy(t *testing.T) {
0xF3, // RETURN
}
var requestId common.Hash
+ // #nosec G115
requestId[0] = uint8(stateNonce)
contractTx := &types.ArbitrumContractTx{
ChainId: params.ArbitrumDevTestChainConfig().ChainID,
diff --git a/system_tests/das_test.go b/system_tests/das_test.go
index 9f4d153b6f..ed3844d528 100644
--- a/system_tests/das_test.go
+++ b/system_tests/das_test.go
@@ -6,6 +6,7 @@ package arbtest
import (
"context"
"encoding/base64"
+ "errors"
"io"
"math/big"
"net"
@@ -22,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/offchainlabs/nitro/arbnode"
- "github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/blsSignatures"
"github.com/offchainlabs/nitro/cmd/genericconf"
"github.com/offchainlabs/nitro/das"
@@ -37,25 +37,20 @@ func startLocalDASServer(
t *testing.T,
ctx context.Context,
dataDir string,
- l1client arbutil.L1Interface,
+ l1client *ethclient.Client,
seqInboxAddress common.Address,
) (*http.Server, *blsSignatures.PublicKey, das.BackendConfig, *das.RestfulDasServer, string) {
keyDir := t.TempDir()
pubkey, _, err := das.GenerateAndStoreKeys(keyDir)
Require(t, err)
- config := das.DataAvailabilityConfig{
- Enable: true,
- Key: das.KeyConfig{
- KeyDir: keyDir,
- },
- LocalFileStorage: das.LocalFileStorageConfig{
- Enable: true,
- DataDir: dataDir,
- },
- ParentChainNodeURL: "none",
- RequestTimeout: 5 * time.Second,
- }
+ config := das.DefaultDataAvailabilityConfig
+ config.Enable = true
+ config.Key = das.KeyConfig{KeyDir: keyDir}
+ config.ParentChainNodeURL = "none"
+ config.LocalFileStorage = das.DefaultLocalFileStorageConfig
+ config.LocalFileStorage.Enable = true
+ config.LocalFileStorage.DataDir = dataDir
storageService, lifecycleManager, err := das.CreatePersistentStorageService(ctx, &config)
defer lifecycleManager.StopAndWaitUntil(time.Second)
@@ -327,3 +322,80 @@ func initTest(t *testing.T) {
enableLogging(logLvl)
}
}
+
+func TestDASBatchPosterFallback(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // Setup L1
+ builder := NewNodeBuilder(ctx).DefaultConfig(t, true)
+ builder.chainConfig = params.ArbitrumDevTestDASChainConfig()
+ builder.BuildL1(t)
+ l1client := builder.L1.Client
+ l1info := builder.L1Info
+
+ // Setup DAS server
+ dasDataDir := t.TempDir()
+ dasRpcServer, pubkey, backendConfig, _, restServerUrl := startLocalDASServer(
+ t, ctx, dasDataDir, l1client, builder.addresses.SequencerInbox)
+ authorizeDASKeyset(t, ctx, pubkey, l1info, l1client)
+
+ // Setup sequence/batch-poster L2 node
+ builder.nodeConfig.DataAvailability.Enable = true
+ builder.nodeConfig.DataAvailability.RPCAggregator = aggConfigForBackend(backendConfig)
+ builder.nodeConfig.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig
+ builder.nodeConfig.DataAvailability.RestAggregator.Enable = true
+ builder.nodeConfig.DataAvailability.RestAggregator.Urls = []string{restServerUrl}
+ builder.nodeConfig.DataAvailability.ParentChainNodeURL = "none"
+ builder.nodeConfig.BatchPoster.DisableDapFallbackStoreDataOnChain = true // Disable DAS fallback
+ builder.nodeConfig.BatchPoster.ErrorDelay = time.Millisecond * 250 // Increase error delay because we expect errors
+ builder.L2Info = NewArbTestInfo(t, builder.chainConfig.ChainID)
+ builder.L2Info.GenerateAccount("User2")
+ cleanup := builder.BuildL2OnL1(t)
+ defer cleanup()
+ l2client := builder.L2.Client
+ l2info := builder.L2Info
+
+ // Setup secondary L2 node
+ nodeConfigB := arbnode.ConfigDefaultL1NonSequencerTest()
+ nodeConfigB.BlockValidator.Enable = false
+ nodeConfigB.DataAvailability.Enable = true
+ nodeConfigB.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig
+ nodeConfigB.DataAvailability.RestAggregator.Enable = true
+ nodeConfigB.DataAvailability.RestAggregator.Urls = []string{restServerUrl}
+ nodeConfigB.DataAvailability.ParentChainNodeURL = "none"
+ nodeBParams := SecondNodeParams{
+ nodeConfig: nodeConfigB,
+ initData: &l2info.ArbInitData,
+ }
+ l2B, cleanupB := builder.Build2ndNode(t, &nodeBParams)
+ defer cleanupB()
+
+ // Check batch posting using the DAS
+ checkBatchPosting(t, ctx, l1client, l2client, l1info, l2info, big.NewInt(1e12), l2B.Client)
+
+ // Shutdown the DAS
+ err := dasRpcServer.Shutdown(ctx)
+ Require(t, err)
+
+ // Send 2nd transaction and check it doesn't arrive on second node
+ tx, _ := TransferBalanceTo(t, "Owner", l2info.GetAddress("User2"), big.NewInt(1e12), l2info, l2client, ctx)
+ _, err = WaitForTx(ctx, l2B.Client, tx.Hash(), time.Second*3)
+ if err == nil || !errors.Is(err, context.DeadlineExceeded) {
+ Fatal(t, "expected context-deadline exceeded error, but got:", err)
+ }
+
+ // Enable the DAP fallback and check the transaction on the second node.
+ // (We don't need to restart the node because of the hot-reload.)
+ builder.nodeConfig.BatchPoster.DisableDapFallbackStoreDataOnChain = false
+ _, err = WaitForTx(ctx, l2B.Client, tx.Hash(), time.Second*3)
+ Require(t, err)
+ l2balance, err := l2B.Client.BalanceAt(ctx, l2info.GetAddress("User2"), nil)
+ Require(t, err)
+ if l2balance.Cmp(big.NewInt(2e12)) != 0 {
+ Fatal(t, "Unexpected balance:", l2balance)
+ }
+
+ // Send another transaction with fallback on
+ checkBatchPosting(t, ctx, l1client, l2client, l1info, l2info, big.NewInt(3e12), l2B.Client)
+}
diff --git a/system_tests/db_conversion_test.go b/system_tests/db_conversion_test.go
new file mode 100644
index 0000000000..aca28262cb
--- /dev/null
+++ b/system_tests/db_conversion_test.go
@@ -0,0 +1,125 @@
+package arbtest
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path"
+ "path/filepath"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/offchainlabs/nitro/cmd/dbconv/dbconv"
+ "github.com/offchainlabs/nitro/util/arbmath"
+)
+
+func TestDatabaseConversion(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ builder := NewNodeBuilder(ctx).DefaultConfig(t, true)
+ builder.l2StackConfig.DBEngine = "leveldb"
+ builder.l2StackConfig.Name = "testl2"
+ // currently only HashScheme supports archive mode
+ if builder.execConfig.Caching.StateScheme == rawdb.HashScheme {
+ builder.execConfig.Caching.Archive = true
+ }
+ cleanup := builder.Build(t)
+ dataDir := builder.dataDir
+ cleanupDone := false
+ defer func() { // TODO we should be able to call cleanup twice, rn it gets stuck then
+ if !cleanupDone {
+ cleanup()
+ }
+ }()
+ builder.L2Info.GenerateAccount("User2")
+ var txs []*types.Transaction
+ for i := uint64(0); i < 200; i++ {
+ tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil)
+ txs = append(txs, tx)
+ err := builder.L2.Client.SendTransaction(ctx, tx)
+ Require(t, err)
+ }
+ for _, tx := range txs {
+ _, err := builder.L2.EnsureTxSucceeded(tx)
+ Require(t, err)
+ }
+ block, err := builder.L2.Client.BlockByNumber(ctx, nil)
+ Require(t, err)
+ user2Balance := builder.L2.GetBalance(t, builder.L2Info.GetAddress("User2"))
+ ownerBalance := builder.L2.GetBalance(t, builder.L2Info.GetAddress("Owner"))
+
+ cleanup()
+ cleanupDone = true
+ t.Log("stopped first node")
+
+ instanceDir := filepath.Join(dataDir, builder.l2StackConfig.Name)
+ for _, dbname := range []string{"l2chaindata", "arbitrumdata", "wasm"} {
+ err := os.Rename(filepath.Join(instanceDir, dbname), filepath.Join(instanceDir, fmt.Sprintf("%s_old", dbname)))
+ Require(t, err)
+ t.Log("converting:", dbname)
+ convConfig := dbconv.DefaultDBConvConfig
+ convConfig.Src.Data = path.Join(instanceDir, fmt.Sprintf("%s_old", dbname))
+ convConfig.Dst.Data = path.Join(instanceDir, dbname)
+ conv := dbconv.NewDBConverter(&convConfig)
+ err = conv.Convert(ctx)
+ Require(t, err)
+ }
+
+ builder.l2StackConfig.DBEngine = "pebble"
+ builder.nodeConfig.ParentChainReader.Enable = false
+ builder.withL1 = false
+ builder.L2.cleanup = func() {}
+ builder.RestartL2Node(t)
+ t.Log("restarted the node")
+
+ blockAfterRestart, err := builder.L2.Client.BlockByNumber(ctx, nil)
+ Require(t, err)
+ user2BalanceAfterRestart := builder.L2.GetBalance(t, builder.L2Info.GetAddress("User2"))
+ ownerBalanceAfterRestart := builder.L2.GetBalance(t, builder.L2Info.GetAddress("Owner"))
+ if block.Hash() != blockAfterRestart.Hash() {
+ t.Fatal("block hash mismatch")
+ }
+ if !arbmath.BigEquals(user2Balance, user2BalanceAfterRestart) {
+ t.Fatal("unexpected User2 balance, have:", user2BalanceAfterRestart, "want:", user2Balance)
+ }
+ if !arbmath.BigEquals(ownerBalance, ownerBalanceAfterRestart) {
+ t.Fatal("unexpected Owner balance, have:", ownerBalanceAfterRestart, "want:", ownerBalance)
+ }
+
+ bc := builder.L2.ExecNode.Backend.ArbInterface().BlockChain()
+ current := bc.CurrentBlock()
+ if current == nil {
+ Fatal(t, "failed to get current block header")
+ }
+ triedb := bc.StateCache().TrieDB()
+ visited := 0
+ i := uint64(0)
+ // don't query historical blocks when PathSchem is used
+ if builder.execConfig.Caching.StateScheme == rawdb.PathScheme {
+ i = current.Number.Uint64()
+ }
+ for ; i <= current.Number.Uint64(); i++ {
+ header := bc.GetHeaderByNumber(i)
+ _, err := bc.StateAt(header.Root)
+ Require(t, err)
+ tr, err := trie.New(trie.TrieID(header.Root), triedb)
+ Require(t, err)
+ it, err := tr.NodeIterator(nil)
+ Require(t, err)
+ for it.Next(true) {
+ visited++
+ }
+ Require(t, it.Error())
+ }
+ t.Log("visited nodes:", visited)
+
+ tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil)
+ err = builder.L2.Client.SendTransaction(ctx, tx)
+ Require(t, err)
+ _, err = builder.L2.EnsureTxSucceeded(tx)
+ Require(t, err)
+
+}
diff --git a/system_tests/estimation_test.go b/system_tests/estimation_test.go
index 284c709fad..6285702342 100644
--- a/system_tests/estimation_test.go
+++ b/system_tests/estimation_test.go
@@ -214,7 +214,7 @@ func TestComponentEstimate(t *testing.T) {
userBalance := big.NewInt(1e16)
maxPriorityFeePerGas := big.NewInt(0)
- maxFeePerGas := arbmath.BigMulByUfrac(l2BaseFee, 3, 2)
+ maxFeePerGas := arbmath.BigMulByUFrac(l2BaseFee, 3, 2)
builder.L2Info.GenerateAccount("User")
builder.L2.TransferBalance(t, "Owner", "User", userBalance, builder.L2Info)
diff --git a/system_tests/eth_sync_test.go b/system_tests/eth_sync_test.go
index 1f07f7c45f..ce9994fb1e 100644
--- a/system_tests/eth_sync_test.go
+++ b/system_tests/eth_sync_test.go
@@ -71,7 +71,7 @@ func TestEthSyncing(t *testing.T) {
if progress == nil {
Fatal(t, "eth_syncing returned nil but shouldn't have")
}
- for testClientB.ConsensusNode.TxStreamer.ExecuteNextMsg(ctx, testClientB.ExecNode) {
+ for testClientB.ConsensusNode.TxStreamer.ExecuteNextMsg(ctx) {
}
progress, err = testClientB.Client.SyncProgress(ctx)
Require(t, err)
diff --git a/system_tests/fast_confirm_test.go b/system_tests/fast_confirm_test.go
index f05219d994..dae2699b9f 100644
--- a/system_tests/fast_confirm_test.go
+++ b/system_tests/fast_confirm_test.go
@@ -79,15 +79,6 @@ func TestFastConfirmation(t *testing.T) {
builder.L1.TransferBalance(t, "Faucet", "Validator", balance, builder.L1Info)
l1auth := builder.L1Info.GetDefaultTransactOpts("Validator", ctx)
- valWalletAddrPtr, err := validatorwallet.GetValidatorWalletContract(ctx, l2node.DeployInfo.ValidatorWalletCreator, 0, &l1auth, l2node.L1Reader, true)
- Require(t, err)
- valWalletAddr := *valWalletAddrPtr
- valWalletAddrCheck, err := validatorwallet.GetValidatorWalletContract(ctx, l2node.DeployInfo.ValidatorWalletCreator, 0, &l1auth, l2node.L1Reader, true)
- Require(t, err)
- if valWalletAddr == *valWalletAddrCheck {
- Require(t, err, "didn't cache validator wallet address", valWalletAddr.String(), "vs", valWalletAddrCheck.String())
- }
-
rollup, err := rollupgen.NewRollupAdminLogic(l2node.DeployInfo.Rollup, builder.L1.Client)
Require(t, err)
@@ -96,27 +87,13 @@ func TestFastConfirmation(t *testing.T) {
rollupABI, err := abi.JSON(strings.NewReader(rollupgen.RollupAdminLogicABI))
Require(t, err, "unable to parse rollup ABI")
- setValidatorCalldata, err := rollupABI.Pack("setValidator", []common.Address{valWalletAddr, srv.Address}, []bool{true, true})
- Require(t, err, "unable to generate setValidator calldata")
- tx, err := upgradeExecutor.ExecuteCall(&deployAuth, l2node.DeployInfo.Rollup, setValidatorCalldata)
- Require(t, err, "unable to set validators")
- _, err = builder.L1.EnsureTxSucceeded(tx)
- Require(t, err)
-
setMinAssertPeriodCalldata, err := rollupABI.Pack("setMinimumAssertionPeriod", big.NewInt(1))
Require(t, err, "unable to generate setMinimumAssertionPeriod calldata")
- tx, err = upgradeExecutor.ExecuteCall(&deployAuth, l2node.DeployInfo.Rollup, setMinAssertPeriodCalldata)
+ tx, err := upgradeExecutor.ExecuteCall(&deployAuth, l2node.DeployInfo.Rollup, setMinAssertPeriodCalldata)
Require(t, err, "unable to set minimum assertion period")
_, err = builder.L1.EnsureTxSucceeded(tx)
Require(t, err)
- setAnyTrustFastConfirmerCalldata, err := rollupABI.Pack("setAnyTrustFastConfirmer", valWalletAddr)
- Require(t, err, "unable to generate setAnyTrustFastConfirmer calldata")
- tx, err = upgradeExecutor.ExecuteCall(&deployAuth, l2node.DeployInfo.Rollup, setAnyTrustFastConfirmerCalldata)
- Require(t, err, "unable to set anytrust fast confirmer")
- _, err = builder.L1.EnsureTxSucceeded(tx)
- Require(t, err)
-
valConfig := staker.TestL1ValidatorConfig
valConfig.EnableFastConfirmation = true
parentChainID, err := builder.L1.Client.ChainID(ctx)
@@ -138,6 +115,29 @@ func TestFastConfirmation(t *testing.T) {
Require(t, err)
valConfig.Strategy = "MakeNodes"
+ valWalletAddrPtr, err := validatorwallet.GetValidatorWalletContract(ctx, l2node.DeployInfo.ValidatorWalletCreator, 0, l2node.L1Reader, true, valWallet.DataPoster(), valWallet.GetExtraGas())
+ Require(t, err)
+ valWalletAddr := *valWalletAddrPtr
+ valWalletAddrCheck, err := validatorwallet.GetValidatorWalletContract(ctx, l2node.DeployInfo.ValidatorWalletCreator, 0, l2node.L1Reader, true, valWallet.DataPoster(), valWallet.GetExtraGas())
+ Require(t, err)
+ if valWalletAddr == *valWalletAddrCheck {
+ Require(t, err, "didn't cache validator wallet address", valWalletAddr.String(), "vs", valWalletAddrCheck.String())
+ }
+
+ setValidatorCalldata, err := rollupABI.Pack("setValidator", []common.Address{valWalletAddr, srv.Address}, []bool{true, true})
+ Require(t, err, "unable to generate setValidator calldata")
+ tx, err = upgradeExecutor.ExecuteCall(&deployAuth, l2node.DeployInfo.Rollup, setValidatorCalldata)
+ Require(t, err, "unable to set validators")
+ _, err = builder.L1.EnsureTxSucceeded(tx)
+ Require(t, err)
+
+ setAnyTrustFastConfirmerCalldata, err := rollupABI.Pack("setAnyTrustFastConfirmer", valWalletAddr)
+ Require(t, err, "unable to generate setAnyTrustFastConfirmer calldata")
+ tx, err = upgradeExecutor.ExecuteCall(&deployAuth, l2node.DeployInfo.Rollup, setAnyTrustFastConfirmerCalldata)
+ Require(t, err, "unable to set anytrust fast confirmer")
+ _, err = builder.L1.EnsureTxSucceeded(tx)
+ Require(t, err)
+
_, valStack := createTestValidationNode(t, ctx, &valnode.TestValidationConfig)
blockValidatorConfig := staker.TestBlockValidatorConfig
@@ -160,7 +160,7 @@ func TestFastConfirmation(t *testing.T) {
l2node.L1Reader,
valWallet,
bind.CallOpts{},
- valConfig,
+ func() *staker.L1ValidatorConfig { return &valConfig },
nil,
stateless,
nil,
@@ -278,15 +278,6 @@ func TestFastConfirmationWithSafe(t *testing.T) {
builder.L1.TransferBalance(t, "Faucet", "ValidatorB", balance, builder.L1Info)
l1authB := builder.L1Info.GetDefaultTransactOpts("ValidatorB", ctx)
- valWalletAddrAPtr, err := validatorwallet.GetValidatorWalletContract(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, &l1authA, l2nodeA.L1Reader, true)
- Require(t, err)
- valWalletAddrA := *valWalletAddrAPtr
- valWalletAddrCheck, err := validatorwallet.GetValidatorWalletContract(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, &l1authA, l2nodeA.L1Reader, true)
- Require(t, err)
- if valWalletAddrA == *valWalletAddrCheck {
- Require(t, err, "didn't cache validator wallet address", valWalletAddrA.String(), "vs", valWalletAddrCheck.String())
- }
-
rollup, err := rollupgen.NewRollupAdminLogic(l2nodeA.DeployInfo.Rollup, builder.L1.Client)
Require(t, err)
@@ -295,30 +286,15 @@ func TestFastConfirmationWithSafe(t *testing.T) {
rollupABI, err := abi.JSON(strings.NewReader(rollupgen.RollupAdminLogicABI))
Require(t, err, "unable to parse rollup ABI")
- safeAddress := deploySafe(t, builder.L1, builder.L1.Client, deployAuth, []common.Address{valWalletAddrA, srv.Address})
- setValidatorCalldata, err := rollupABI.Pack("setValidator", []common.Address{valWalletAddrA, l1authB.From, srv.Address, safeAddress}, []bool{true, true, true, true})
- Require(t, err, "unable to generate setValidator calldata")
- tx, err := upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setValidatorCalldata)
- Require(t, err, "unable to set validators")
- _, err = builder.L1.EnsureTxSucceeded(tx)
- Require(t, err)
-
setMinAssertPeriodCalldata, err := rollupABI.Pack("setMinimumAssertionPeriod", big.NewInt(1))
Require(t, err, "unable to generate setMinimumAssertionPeriod calldata")
- tx, err = upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setMinAssertPeriodCalldata)
+ tx, err := upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setMinAssertPeriodCalldata)
Require(t, err, "unable to set minimum assertion period")
_, err = builder.L1.EnsureTxSucceeded(tx)
Require(t, err)
- setAnyTrustFastConfirmerCalldata, err := rollupABI.Pack("setAnyTrustFastConfirmer", safeAddress)
- Require(t, err, "unable to generate setAnyTrustFastConfirmer calldata")
- tx, err = upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setAnyTrustFastConfirmerCalldata)
- Require(t, err, "unable to set anytrust fast confirmer")
- _, err = builder.L1.EnsureTxSucceeded(tx)
- Require(t, err)
-
- valConfig := staker.TestL1ValidatorConfig
- valConfig.EnableFastConfirmation = true
+ valConfigA := staker.TestL1ValidatorConfig
+ valConfigA.EnableFastConfirmation = true
parentChainID, err := builder.L1.Client.ChainID(ctx)
if err != nil {
@@ -335,9 +311,33 @@ func TestFastConfirmationWithSafe(t *testing.T) {
if err != nil {
t.Fatalf("Error creating validator dataposter: %v", err)
}
- valWalletA, err := validatorwallet.NewContract(dpA, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}, func() uint64 { return valConfig.ExtraGas })
+ valWalletA, err := validatorwallet.NewContract(dpA, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}, func() uint64 { return valConfigA.ExtraGas })
+ Require(t, err)
+ valConfigA.Strategy = "MakeNodes"
+
+ valWalletAddrAPtr, err := validatorwallet.GetValidatorWalletContract(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, l2nodeA.L1Reader, true, valWalletA.DataPoster(), valWalletA.GetExtraGas())
+ Require(t, err)
+ valWalletAddrA := *valWalletAddrAPtr
+ valWalletAddrCheck, err := validatorwallet.GetValidatorWalletContract(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, l2nodeA.L1Reader, true, valWalletA.DataPoster(), valWalletA.GetExtraGas())
+ Require(t, err)
+ if valWalletAddrA == *valWalletAddrCheck {
+ Require(t, err, "didn't cache validator wallet address", valWalletAddrA.String(), "vs", valWalletAddrCheck.String())
+ }
+
+ safeAddress := deploySafe(t, builder.L1, builder.L1.Client, deployAuth, []common.Address{valWalletAddrA, srv.Address})
+ setValidatorCalldata, err := rollupABI.Pack("setValidator", []common.Address{valWalletAddrA, l1authB.From, srv.Address, safeAddress}, []bool{true, true, true, true})
+ Require(t, err, "unable to generate setValidator calldata")
+ tx, err = upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setValidatorCalldata)
+ Require(t, err, "unable to set validators")
+ _, err = builder.L1.EnsureTxSucceeded(tx)
+ Require(t, err)
+
+ setAnyTrustFastConfirmerCalldata, err := rollupABI.Pack("setAnyTrustFastConfirmer", safeAddress)
+ Require(t, err, "unable to generate setAnyTrustFastConfirmer calldata")
+ tx, err = upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setAnyTrustFastConfirmerCalldata)
+ Require(t, err, "unable to set anytrust fast confirmer")
+ _, err = builder.L1.EnsureTxSucceeded(tx)
Require(t, err)
- valConfig.Strategy = "MakeNodes"
_, valStack := createTestValidationNode(t, ctx, &valnode.TestValidationConfig)
blockValidatorConfig := staker.TestBlockValidatorConfig
@@ -361,7 +361,7 @@ func TestFastConfirmationWithSafe(t *testing.T) {
l2nodeA.L1Reader,
valWalletA,
bind.CallOpts{},
- valConfig,
+ func() *staker.L1ValidatorConfig { return &valConfigA },
nil,
statelessA,
nil,
@@ -391,7 +391,9 @@ func TestFastConfirmationWithSafe(t *testing.T) {
}
valWalletB, err := validatorwallet.NewEOA(dpB, l2nodeB.DeployInfo.Rollup, l2nodeB.L1Reader.Client(), func() uint64 { return 0 })
Require(t, err)
- valConfig.Strategy = "watchtower"
+ valConfigB := staker.TestL1ValidatorConfig
+ valConfigB.EnableFastConfirmation = true
+ valConfigB.Strategy = "watchtower"
statelessB, err := staker.NewStatelessBlockValidator(
l2nodeB.InboxReader,
l2nodeB.InboxTracker,
@@ -411,7 +413,7 @@ func TestFastConfirmationWithSafe(t *testing.T) {
l2nodeB.L1Reader,
valWalletB,
bind.CallOpts{},
- valConfig,
+ func() *staker.L1ValidatorConfig { return &valConfigB },
nil,
statelessB,
nil,
diff --git a/system_tests/forwarder_test.go b/system_tests/forwarder_test.go
index 9fe419593e..57381ca84e 100644
--- a/system_tests/forwarder_test.go
+++ b/system_tests/forwarder_test.go
@@ -38,7 +38,7 @@ func TestStaticForwarder(t *testing.T) {
clientA := builder.L2.Client
nodeConfigB := arbnode.ConfigDefaultL1Test()
- execConfigB := ExecConfigDefaultTest()
+ execConfigB := ExecConfigDefaultTest(t)
execConfigB.Sequencer.Enable = false
nodeConfigB.Sequencer = false
nodeConfigB.DelayedSequencer.Enable = false
@@ -109,7 +109,7 @@ func createForwardingNode(t *testing.T, builder *NodeBuilder, ipcPath string, re
nodeConfig.Sequencer = false
nodeConfig.DelayedSequencer.Enable = false
nodeConfig.BatchPoster.Enable = false
- execConfig := ExecConfigDefaultTest()
+ execConfig := ExecConfigDefaultTest(t)
execConfig.Sequencer.Enable = false
execConfig.Forwarder.RedisUrl = redisUrl
execConfig.ForwardingTarget = fallbackPath
@@ -246,6 +246,7 @@ func TestRedisForwarder(t *testing.T) {
for i := range seqClients {
userA := user("A", i)
builder.L2Info.GenerateAccount(userA)
+ // #nosec G115
tx := builder.L2Info.PrepareTx("Owner", userA, builder.L2Info.TransferGas, big.NewInt(1e12+int64(builder.L2Info.TransferGas)*builder.L2Info.GasPrice.Int64()), nil)
err := fallbackClient.SendTransaction(ctx, tx)
Require(t, err)
diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go
index ddc229074c..bf30c928d8 100644
--- a/system_tests/full_challenge_impl_test.go
+++ b/system_tests/full_challenge_impl_test.go
@@ -27,7 +27,6 @@ import (
"github.com/offchainlabs/nitro/arbnode"
"github.com/offchainlabs/nitro/arbos"
"github.com/offchainlabs/nitro/arbstate"
- "github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/solgen/go/challengegen"
"github.com/offchainlabs/nitro/solgen/go/mocksgen"
"github.com/offchainlabs/nitro/solgen/go/ospgen"
@@ -178,7 +177,7 @@ func makeBatch(t *testing.T, l2Node *arbnode.Node, l2Info *BlockchainTestInfo, b
Require(t, err, "failed to get batch metadata after adding batch:")
}
-func confirmLatestBlock(ctx context.Context, t *testing.T, l1Info *BlockchainTestInfo, backend arbutil.L1Interface) {
+func confirmLatestBlock(ctx context.Context, t *testing.T, l1Info *BlockchainTestInfo, backend *ethclient.Client) {
t.Helper()
// With SimulatedBeacon running in on-demand block production mode, the
// finalized block is considered to be be the nearest multiple of 32 less
@@ -190,7 +189,7 @@ func confirmLatestBlock(ctx context.Context, t *testing.T, l1Info *BlockchainTes
}
}
-func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *BlockchainTestInfo, l1Client arbutil.L1Interface, chainConfig *params.ChainConfig) (common.Address, *mocksgen.SequencerInboxStub, common.Address) {
+func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *BlockchainTestInfo, l1Client *ethclient.Client, chainConfig *params.ChainConfig) (common.Address, *mocksgen.SequencerInboxStub, common.Address) {
txOpts := l1Info.GetDefaultTransactOpts("deployer", ctx)
bridgeAddr, tx, bridge, err := mocksgen.DeployBridgeUnproxied(&txOpts, l1Client)
Require(t, err)
diff --git a/system_tests/initialization_test.go b/system_tests/initialization_test.go
index f0797404a9..17e020e6ab 100644
--- a/system_tests/initialization_test.go
+++ b/system_tests/initialization_test.go
@@ -21,6 +21,7 @@ func InitOneContract(prand *testhelpers.PseudoRandomDataSource) (*statetransfer.
storageMap := make(map[common.Hash]common.Hash)
code := []byte{0x60, 0x0} // PUSH1 0
sum := big.NewInt(0)
+ // #nosec G115
numCells := int(prand.GetUint64() % 1000)
for i := 0; i < numCells; i++ {
storageAddr := prand.GetHash()
diff --git a/system_tests/l3_test.go b/system_tests/l3_test.go
new file mode 100644
index 0000000000..97eabcee78
--- /dev/null
+++ b/system_tests/l3_test.go
@@ -0,0 +1,53 @@
+package arbtest
+
+import (
+ "context"
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/offchainlabs/nitro/arbnode"
+)
+
+func TestSimpleL3(t *testing.T) {
+ t.Parallel()
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ builder := NewNodeBuilder(ctx).DefaultConfig(t, true)
+ cleanupL1AndL2 := builder.Build(t)
+ defer cleanupL1AndL2()
+
+ cleanupL3FirstNode := builder.BuildL3OnL2(t)
+ defer cleanupL3FirstNode()
+ firstNodeTestClient := builder.L3
+
+ secondNodeNodeConfig := arbnode.ConfigDefaultL1NonSequencerTest()
+ secondNodeTestClient, cleanupL3SecondNode := builder.Build2ndNodeOnL3(t, &SecondNodeParams{nodeConfig: secondNodeNodeConfig})
+ defer cleanupL3SecondNode()
+
+ accountName := "User2"
+ builder.L3Info.GenerateAccount(accountName)
+ tx := builder.L3Info.PrepareTx("Owner", accountName, builder.L3Info.TransferGas, big.NewInt(1e12), nil)
+
+ err := firstNodeTestClient.Client.SendTransaction(ctx, tx)
+ Require(t, err)
+
+ // Checks that first node has the correct balance
+ _, err = firstNodeTestClient.EnsureTxSucceeded(tx)
+ Require(t, err)
+ l2balance, err := firstNodeTestClient.Client.BalanceAt(ctx, builder.L3Info.GetAddress(accountName), nil)
+ Require(t, err)
+ if l2balance.Cmp(big.NewInt(1e12)) != 0 {
+ t.Fatal("Unexpected balance:", l2balance)
+ }
+
+ // Checks that second node has the correct balance
+ _, err = WaitForTx(ctx, secondNodeTestClient.Client, tx.Hash(), time.Second*15)
+ Require(t, err)
+ l2balance, err = secondNodeTestClient.Client.BalanceAt(ctx, builder.L3Info.GetAddress(accountName), nil)
+ Require(t, err)
+ if l2balance.Cmp(big.NewInt(1e12)) != 0 {
+ t.Fatal("Unexpected balance:", l2balance)
+ }
+}
diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go
index 17bfb18892..927dc1b630 100644
--- a/system_tests/nodeinterface_test.go
+++ b/system_tests/nodeinterface_test.go
@@ -163,6 +163,7 @@ func TestGetL1Confirmations(t *testing.T) {
numTransactions := 200
+ // #nosec G115
if l1Confs >= uint64(numTransactions) {
t.Fatalf("L1Confirmations for latest block %v is already %v (over %v)", genesisBlock.Number(), l1Confs, numTransactions)
}
@@ -175,6 +176,7 @@ func TestGetL1Confirmations(t *testing.T) {
Require(t, err)
// Allow a gap of 10 for asynchronicity, just in case
+ // #nosec G115
if l1Confs+10 < uint64(numTransactions) {
t.Fatalf("L1Confirmations for latest block %v is only %v (did not hit expected %v)", genesisBlock.Number(), l1Confs, numTransactions)
}
diff --git a/system_tests/outbox_test.go b/system_tests/outbox_test.go
index 739d756a31..25c52396f9 100644
--- a/system_tests/outbox_test.go
+++ b/system_tests/outbox_test.go
@@ -146,6 +146,7 @@ func TestOutboxProofs(t *testing.T) {
treeSize := root.size
balanced := treeSize == arbmath.NextPowerOf2(treeSize)/2
+ // #nosec G115
treeLevels := int(arbmath.Log2ceil(treeSize)) // the # of levels in the tree
proofLevels := treeLevels - 1 // the # of levels where a hash is needed (all but root)
walkLevels := treeLevels // the # of levels we need to consider when building walks
@@ -174,6 +175,7 @@ func TestOutboxProofs(t *testing.T) {
sibling := place ^ which
position := merkletree.LevelAndLeaf{
+ // #nosec G115
Level: uint64(level),
Leaf: sibling,
}
@@ -200,6 +202,7 @@ func TestOutboxProofs(t *testing.T) {
leaf := total - 1 // preceding it. We subtract 1 since we count from 0
partial := merkletree.LevelAndLeaf{
+ // #nosec G115
Level: uint64(level),
Leaf: leaf,
}
@@ -288,6 +291,7 @@ func TestOutboxProofs(t *testing.T) {
step.Leaf += 1 << step.Level // we start on the min partial's zero-hash sibling
known[step] = zero
+ // #nosec G115
for step.Level < uint64(treeLevels) {
curr, ok := known[step]
diff --git a/system_tests/program_gas_test.go b/system_tests/program_gas_test.go
new file mode 100644
index 0000000000..119897cbfe
--- /dev/null
+++ b/system_tests/program_gas_test.go
@@ -0,0 +1,458 @@
+package arbtest
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "math/big"
+ "regexp"
+ "strings"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/eth/tracers/logger"
+ "github.com/ethereum/go-ethereum/ethclient"
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/offchainlabs/nitro/arbos/util"
+ "github.com/offchainlabs/nitro/execution/gethexec"
+ "github.com/offchainlabs/nitro/solgen/go/mocksgen"
+ "github.com/offchainlabs/nitro/solgen/go/precompilesgen"
+ "github.com/offchainlabs/nitro/util/testhelpers"
+)
+
+func TestProgramSimpleCost(t *testing.T) {
+ builder := setupGasCostTest(t)
+ auth := builder.L2Info.GetDefaultTransactOpts("Owner", builder.ctx)
+ stylusProgram := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("hostio-test"))
+ evmProgram := deployEvmContract(t, builder.ctx, auth, builder.L2.Client, mocksgen.HostioTestMetaData)
+ otherProgram := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("storage"))
+ matchSnake := regexp.MustCompile("_[a-z]")
+
+ for _, tc := range []struct {
+ hostio string
+ opcode vm.OpCode
+ params []any
+ maxDiff float64
+ }{
+ {hostio: "exit_early", opcode: vm.STOP},
+ {hostio: "transient_load_bytes32", opcode: vm.TLOAD, params: []any{common.HexToHash("dead")}},
+ {hostio: "transient_store_bytes32", opcode: vm.TSTORE, params: []any{common.HexToHash("dead"), common.HexToHash("beef")}},
+ {hostio: "return_data_size", opcode: vm.RETURNDATASIZE, maxDiff: 1.5},
+ {hostio: "account_balance", opcode: vm.BALANCE, params: []any{builder.L2Info.GetAddress("Owner")}},
+ {hostio: "account_code", opcode: vm.EXTCODECOPY, params: []any{otherProgram}},
+ {hostio: "account_code_size", opcode: vm.EXTCODESIZE, params: []any{otherProgram}, maxDiff: 0.3},
+ {hostio: "account_codehash", opcode: vm.EXTCODEHASH, params: []any{otherProgram}},
+ {hostio: "evm_gas_left", opcode: vm.GAS, maxDiff: 1.5},
+ {hostio: "evm_ink_left", opcode: vm.GAS, maxDiff: 1.5},
+ {hostio: "block_basefee", opcode: vm.BASEFEE, maxDiff: 0.5},
+ {hostio: "chainid", opcode: vm.CHAINID, maxDiff: 1.5},
+ {hostio: "block_coinbase", opcode: vm.COINBASE, maxDiff: 0.5},
+ {hostio: "block_gas_limit", opcode: vm.GASLIMIT, maxDiff: 1.5},
+ {hostio: "block_number", opcode: vm.NUMBER, maxDiff: 1.5},
+ {hostio: "block_timestamp", opcode: vm.TIMESTAMP, maxDiff: 1.5},
+ {hostio: "contract_address", opcode: vm.ADDRESS, maxDiff: 0.5},
+ {hostio: "math_div", opcode: vm.DIV, params: []any{big.NewInt(1), big.NewInt(3)}},
+ {hostio: "math_mod", opcode: vm.MOD, params: []any{big.NewInt(1), big.NewInt(3)}},
+ {hostio: "math_add_mod", opcode: vm.ADDMOD, params: []any{big.NewInt(1), big.NewInt(3), big.NewInt(5)}, maxDiff: 0.7},
+ {hostio: "math_mul_mod", opcode: vm.MULMOD, params: []any{big.NewInt(1), big.NewInt(3), big.NewInt(5)}, maxDiff: 0.7},
+ {hostio: "msg_sender", opcode: vm.CALLER, maxDiff: 0.5},
+ {hostio: "msg_value", opcode: vm.CALLVALUE, maxDiff: 0.5},
+ {hostio: "tx_gas_price", opcode: vm.GASPRICE, maxDiff: 0.5},
+ {hostio: "tx_ink_price", opcode: vm.GASPRICE, maxDiff: 1.5},
+ {hostio: "tx_origin", opcode: vm.ORIGIN, maxDiff: 0.5},
+ } {
+ t.Run(tc.hostio, func(t *testing.T) {
+ solFunc := matchSnake.ReplaceAllStringFunc(tc.hostio, func(s string) string {
+ return strings.ToUpper(strings.TrimPrefix(s, "_"))
+ })
+ packer, _ := util.NewCallParser(mocksgen.HostioTestABI, solFunc)
+ data, err := packer(tc.params...)
+ Require(t, err)
+ compareGasUsage(t, builder, evmProgram, stylusProgram, data, nil, compareGasForEach, tc.maxDiff, compareGasPair{tc.opcode, tc.hostio})
+ })
+ }
+}
+
+func TestProgramPowCost(t *testing.T) {
+ builder := setupGasCostTest(t)
+ auth := builder.L2Info.GetDefaultTransactOpts("Owner", builder.ctx)
+ stylusProgram := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("hostio-test"))
+ evmProgram := deployEvmContract(t, builder.ctx, auth, builder.L2.Client, mocksgen.HostioTestMetaData)
+ packer, _ := util.NewCallParser(mocksgen.HostioTestABI, "mathPow")
+
+ for _, exponentNumBytes := range []uint{1, 2, 10, 32} {
+ name := fmt.Sprintf("exponentNumBytes%v", exponentNumBytes)
+ t.Run(name, func(t *testing.T) {
+ exponent := new(big.Int).Lsh(big.NewInt(1), exponentNumBytes*8-1)
+ params := []any{big.NewInt(1), exponent}
+ data, err := packer(params...)
+ Require(t, err)
+ evmGasUsage, stylusGasUsage := measureGasUsage(t, builder, evmProgram, stylusProgram, data, nil)
+ expectedGas := 2.652 + 1.75*float64(exponentNumBytes+1)
+ t.Logf("evm EXP usage: %v - stylus math_pow usage: %v - expected math_pow usage: %v",
+ evmGasUsage[vm.EXP][0], stylusGasUsage["math_pow"][0], expectedGas)
+ // The math_pow HostIO uses significally less gas than the EXP opcode. So,
+ // instead of comparing it to EVM, we compare it to the expected gas usage
+ // for each test case.
+ checkPercentDiff(t, stylusGasUsage["math_pow"][0], expectedGas, 0.001)
+ })
+ }
+}
+
+func TestProgramStorageCost(t *testing.T) {
+ builder := setupGasCostTest(t)
+ auth := builder.L2Info.GetDefaultTransactOpts("Owner", builder.ctx)
+ stylusMulticall := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("multicall"))
+ evmMulticall := deployEvmContract(t, builder.ctx, auth, builder.L2.Client, mocksgen.MultiCallTestMetaData)
+
+ const numSlots = 42
+ rander := testhelpers.NewPseudoRandomDataSource(t, 0)
+ readData := multicallEmptyArgs()
+ writeRandAData := multicallEmptyArgs()
+ writeRandBData := multicallEmptyArgs()
+ writeZeroData := multicallEmptyArgs()
+ for i := 0; i < numSlots; i++ {
+ slot := rander.GetHash()
+ readData = multicallAppendLoad(readData, slot, false)
+ writeRandAData = multicallAppendStore(writeRandAData, slot, rander.GetHash(), false)
+ writeRandBData = multicallAppendStore(writeRandBData, slot, rander.GetHash(), false)
+ writeZeroData = multicallAppendStore(writeZeroData, slot, common.Hash{}, false)
+ }
+
+ for _, tc := range []struct {
+ name string
+ data []byte
+ }{
+ {"initialWrite", writeRandAData},
+ {"read", readData},
+ {"writeAgain", writeRandBData},
+ {"delete", writeZeroData},
+ {"readZeros", readData},
+ {"writeAgainAgain", writeRandAData},
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ compareGasUsage(t, builder, evmMulticall, stylusMulticall, tc.data, nil, compareGasSum, 0,
+ compareGasPair{vm.SSTORE, "storage_flush_cache"}, compareGasPair{vm.SLOAD, "storage_load_bytes32"})
+ })
+ }
+}
+
+func TestProgramLogCost(t *testing.T) {
+ builder := setupGasCostTest(t)
+ auth := builder.L2Info.GetDefaultTransactOpts("Owner", builder.ctx)
+ stylusProgram := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("hostio-test"))
+ evmProgram := deployEvmContract(t, builder.ctx, auth, builder.L2.Client, mocksgen.HostioTestMetaData)
+ packer, _ := util.NewCallParser(mocksgen.HostioTestABI, "emitLog")
+
+ for ntopics := int8(0); ntopics < 5; ntopics++ {
+ for _, dataSize := range []uint64{10, 100, 1000} {
+ name := fmt.Sprintf("emitLog%dData%d", ntopics, dataSize)
+ t.Run(name, func(t *testing.T) {
+ args := []any{
+ testhelpers.RandomSlice(dataSize),
+ ntopics,
+ }
+ for t := 0; t < 4; t++ {
+ args = append(args, testhelpers.RandomHash())
+ }
+ data, err := packer(args...)
+ Require(t, err)
+ opcode := vm.LOG0 + vm.OpCode(ntopics)
+ compareGasUsage(t, builder, evmProgram, stylusProgram, data, nil, compareGasForEach, 0, compareGasPair{opcode, "emit_log"})
+ })
+ }
+ }
+
+}
+
+func TestProgramCallCost(t *testing.T) {
+ builder := setupGasCostTest(t)
+ auth := builder.L2Info.GetDefaultTransactOpts("Owner", builder.ctx)
+ stylusMulticall := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("multicall"))
+ evmMulticall := deployEvmContract(t, builder.ctx, auth, builder.L2.Client, mocksgen.MultiCallTestMetaData)
+ otherStylusProgram := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("hostio-test"))
+ otherEvmProgram := deployEvmContract(t, builder.ctx, auth, builder.L2.Client, mocksgen.HostioTestMetaData)
+ packer, _ := util.NewCallParser(mocksgen.HostioTestABI, "msgValue")
+ otherData, err := packer()
+ Require(t, err)
+
+ for _, pair := range []compareGasPair{
+ {vm.CALL, "call_contract"},
+ {vm.DELEGATECALL, "delegate_call_contract"},
+ {vm.STATICCALL, "static_call_contract"},
+ } {
+ t.Run(pair.hostio+"/burnGas", func(t *testing.T) {
+ arbTest := common.HexToAddress("0x0000000000000000000000000000000000000069")
+ burnArbGas, _ := util.NewCallParser(precompilesgen.ArbosTestABI, "burnArbGas")
+ burnData, err := burnArbGas(big.NewInt(0))
+ Require(t, err)
+ data := argsForMulticall(pair.opcode, arbTest, nil, burnData)
+ compareGasUsage(t, builder, evmMulticall, stylusMulticall, data, nil, compareGasForEach, 0, pair)
+ })
+
+ t.Run(pair.hostio+"/evmContract", func(t *testing.T) {
+ data := argsForMulticall(pair.opcode, otherEvmProgram, nil, otherData)
+ compareGasUsage(t, builder, evmMulticall, stylusMulticall, data, nil, compareGasForEach, 0, pair,
+ compareGasPair{vm.RETURNDATACOPY, "read_return_data"}) // also test read_return_data
+ })
+
+ t.Run(pair.hostio+"/stylusContract", func(t *testing.T) {
+ data := argsForMulticall(pair.opcode, otherStylusProgram, nil, otherData)
+ compareGasUsage(t, builder, evmMulticall, stylusMulticall, data, nil, compareGasForEach, 0, pair,
+ compareGasPair{vm.RETURNDATACOPY, "read_return_data"}) // also test read_return_data
+ })
+
+ t.Run(pair.hostio+"/multipleTimes", func(t *testing.T) {
+ data := multicallEmptyArgs()
+ for i := 0; i < 9; i++ {
+ data = multicallAppend(data, pair.opcode, otherEvmProgram, otherData)
+ }
+ compareGasUsage(t, builder, evmMulticall, stylusMulticall, data, nil, compareGasForEach, 0, pair)
+ })
+ }
+
+ t.Run("call_contract/evmContractWithValue", func(t *testing.T) {
+ value := big.NewInt(1000)
+ data := argsForMulticall(vm.CALL, otherEvmProgram, value, otherData)
+ compareGasUsage(t, builder, evmMulticall, stylusMulticall, data, value, compareGasForEach, 0, compareGasPair{vm.CALL, "call_contract"})
+ })
+}
+
+func TestProgramCreateCost(t *testing.T) {
+ builder := setupGasCostTest(t)
+ auth := builder.L2Info.GetDefaultTransactOpts("Owner", builder.ctx)
+ stylusCreate := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("create"))
+ evmCreate := deployEvmContract(t, builder.ctx, auth, builder.L2.Client, mocksgen.CreateTestMetaData)
+ deployCode := common.FromHex(mocksgen.ProgramTestMetaData.Bin)
+
+ t.Run("create1", func(t *testing.T) {
+ data := []byte{0x01}
+ data = append(data, (common.Hash{}).Bytes()...) // endowment
+ data = append(data, deployCode...)
+ compareGasUsage(t, builder, evmCreate, stylusCreate, data, nil, compareGasForEach, 0, compareGasPair{vm.CREATE, "create1"})
+ })
+
+ t.Run("create2", func(t *testing.T) {
+ data := []byte{0x02}
+ data = append(data, (common.Hash{}).Bytes()...) // endowment
+ data = append(data, (common.HexToHash("beef")).Bytes()...) // salt
+ data = append(data, deployCode...)
+ compareGasUsage(t, builder, evmCreate, stylusCreate, data, nil, compareGasForEach, 0, compareGasPair{vm.CREATE2, "create2"})
+ })
+}
+
+func TestProgramKeccakCost(t *testing.T) {
+ builder := setupGasCostTest(t)
+ auth := builder.L2Info.GetDefaultTransactOpts("Owner", builder.ctx)
+ stylusProgram := deployWasm(t, builder.ctx, auth, builder.L2.Client, rustFile("hostio-test"))
+ evmProgram := deployEvmContract(t, builder.ctx, auth, builder.L2.Client, mocksgen.HostioTestMetaData)
+ packer, _ := util.NewCallParser(mocksgen.HostioTestABI, "keccak")
+
+ for i := 1; i < 5; i++ {
+ size := uint64(math.Pow10(i))
+ name := fmt.Sprintf("keccak%d", size)
+ t.Run(name, func(t *testing.T) {
+ preImage := testhelpers.RandomSlice(size)
+ preImage[len(preImage)-1] = 0
+ data, err := packer(preImage)
+ Require(t, err)
+ const maxDiff = 2.5 // stylus keccak charges significantly less gas
+ compareGasUsage(t, builder, evmProgram, stylusProgram, data, nil, compareGasForEach, maxDiff, compareGasPair{vm.KECCAK256, "native_keccak256"})
+ })
+ }
+}
+
+func setupGasCostTest(t *testing.T) *NodeBuilder {
+ ctx, cancel := context.WithCancel(context.Background())
+ t.Cleanup(cancel)
+ builder := NewNodeBuilder(ctx).DefaultConfig(t, true)
+ cleanup := builder.Build(t)
+ t.Cleanup(cleanup)
+ return builder
+}
+
+// deployEvmContract deploys an Evm contract and return its address.
+func deployEvmContract(t *testing.T, ctx context.Context, auth bind.TransactOpts, client *ethclient.Client, metadata *bind.MetaData) common.Address {
+ t.Helper()
+ parsed, err := metadata.GetAbi()
+ Require(t, err)
+ address, tx, _, err := bind.DeployContract(&auth, *parsed, common.FromHex(metadata.Bin), client)
+ Require(t, err)
+ _, err = EnsureTxSucceeded(ctx, client, tx)
+ Require(t, err)
+ return address
+}
+
+// measureGasUsage calls an EVM and a Wasm contract passing the same data and the same value.
+func measureGasUsage(
+ t *testing.T,
+ builder *NodeBuilder,
+ evmContract common.Address,
+ stylusContract common.Address,
+ txData []byte,
+ txValue *big.Int,
+) (map[vm.OpCode][]uint64, map[string][]float64) {
+ const txGas uint64 = 32_000_000
+ txs := []*types.Transaction{
+ builder.L2Info.PrepareTxTo("Owner", &evmContract, txGas, txValue, txData),
+ builder.L2Info.PrepareTxTo("Owner", &stylusContract, txGas, txValue, txData),
+ }
+ receipts := builder.L2.SendWaitTestTransactions(t, txs)
+
+ evmGas := receipts[0].GasUsedForL2()
+ evmGasUsage, err := evmOpcodesGasUsage(builder.ctx, builder.L2.Client.Client(), txs[0])
+ Require(t, err)
+
+ stylusGas := receipts[1].GasUsedForL2()
+ stylusGasUsage, err := stylusHostiosGasUsage(builder.ctx, builder.L2.Client.Client(), txs[1])
+ Require(t, err)
+
+ t.Logf("evm total usage: %v - stylus total usage: %v", evmGas, stylusGas)
+
+ return evmGasUsage, stylusGasUsage
+}
+
+type compareGasPair struct {
+ opcode vm.OpCode
+ hostio string
+}
+
+type compareGasMode int
+
+const (
+ compareGasForEach compareGasMode = iota
+ compareGasSum
+)
+
+// compareGasUsage calls measureGasUsage and then it ensures the given opcodes and hostios cost
+// roughly the same amount of gas.
+func compareGasUsage(
+ t *testing.T,
+ builder *NodeBuilder,
+ evmContract common.Address,
+ stylusContract common.Address,
+ txData []byte,
+ txValue *big.Int,
+ mode compareGasMode,
+ maxAllowedDifference float64,
+ pairs ...compareGasPair,
+) {
+ if evmContract == stylusContract {
+ Fatal(t, "evm and stylus contract are the same")
+ }
+ evmGasUsage, stylusGasUsage := measureGasUsage(t, builder, evmContract, stylusContract, txData, txValue)
+ for i := range pairs {
+ opcode := pairs[i].opcode
+ hostio := pairs[i].hostio
+ switch mode {
+ case compareGasForEach:
+ if len(evmGasUsage[opcode]) != len(stylusGasUsage[hostio]) {
+ Fatal(t, "mismatch between hostios and opcodes", evmGasUsage, stylusGasUsage)
+ }
+ for i := range evmGasUsage[opcode] {
+ opcodeGas := evmGasUsage[opcode][i]
+ hostioGas := stylusGasUsage[hostio][i]
+ t.Logf("evm %v usage: %v - stylus %v usage: %v", opcode, opcodeGas, hostio, hostioGas)
+ checkPercentDiff(t, float64(opcodeGas), hostioGas, maxAllowedDifference)
+ }
+ case compareGasSum:
+ evmSum := float64(0)
+ stylusSum := float64(0)
+ for i := range evmGasUsage[opcode] {
+ evmSum += float64(evmGasUsage[opcode][i])
+ stylusSum += stylusGasUsage[hostio][i]
+ }
+ t.Logf("evm %v usage: %v - stylus %v usage: %v", opcode, evmSum, hostio, stylusSum)
+ checkPercentDiff(t, evmSum, stylusSum, maxAllowedDifference)
+ }
+ }
+}
+
+func evmOpcodesGasUsage(ctx context.Context, rpcClient rpc.ClientInterface, tx *types.Transaction) (
+ map[vm.OpCode][]uint64, error) {
+
+ var result logger.ExecutionResult
+ err := rpcClient.CallContext(ctx, &result, "debug_traceTransaction", tx.Hash(), nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to trace evm call: %w", err)
+ }
+
+ gasUsage := map[vm.OpCode][]uint64{}
+ for i := range result.StructLogs {
+ op := vm.StringToOp(result.StructLogs[i].Op)
+ gasUsed := uint64(0)
+ if op == vm.CALL || op == vm.STATICCALL || op == vm.DELEGATECALL || op == vm.CREATE || op == vm.CREATE2 {
+ // For the CALL* opcodes, the GasCost in the tracer represents the gas sent
+ // to the callee contract, which is 63/64 of the remaining gas. This happens
+ // because the tracer is evaluated before the call is executed, so the EVM
+ // doesn't know how much gas will being used.
+ //
+ // In the case of the Stylus tracer, the trace is emitted after the
+ // execution, so the EndInk field is set to the ink after the call returned.
+ // Hence, it also includes the ink spent by the callee contract.
+ //
+ // To make a precise comparison between the EVM and Stylus, we modify the
+ // EVM measurement to include the gas spent by the callee contract. To do
+ // so, we go through the opcodes after CALL until we find the first opcode
+ // in the caller's depth. Then, we subtract the gas before the call by the
+ // gas after the call returned.
+ var gasAfterCall uint64
+ for j := i + 1; j < len(result.StructLogs); j++ {
+ if result.StructLogs[j].Depth == result.StructLogs[i].Depth {
+ // back to the original call
+ gasAfterCall = result.StructLogs[j].Gas + result.StructLogs[j].GasCost
+ break
+ }
+ }
+ if gasAfterCall == 0 {
+ return nil, fmt.Errorf("malformed log: didn't get back to call original depth")
+ }
+ if i == 0 {
+ return nil, fmt.Errorf("malformed log: call is first opcode")
+ }
+ gasUsed = result.StructLogs[i-1].Gas - gasAfterCall
+ } else {
+ gasUsed = result.StructLogs[i].GasCost
+ }
+ gasUsage[op] = append(gasUsage[op], gasUsed)
+ }
+ return gasUsage, nil
+}
+
+func stylusHostiosGasUsage(ctx context.Context, rpcClient rpc.ClientInterface, tx *types.Transaction) (
+ map[string][]float64, error) {
+
+ traceOpts := struct {
+ Tracer string `json:"tracer"`
+ }{
+ Tracer: "stylusTracer",
+ }
+ var result []gethexec.HostioTraceInfo
+ err := rpcClient.CallContext(ctx, &result, "debug_traceTransaction", tx.Hash(), traceOpts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to trace stylus call: %w", err)
+ }
+
+ const InkPerGas = 10000
+ gasUsage := map[string][]float64{}
+ for _, hostioLog := range result {
+ gasCost := float64(hostioLog.StartInk-hostioLog.EndInk) / InkPerGas
+ gasUsage[hostioLog.Name] = append(gasUsage[hostioLog.Name], gasCost)
+ }
+ return gasUsage, nil
+}
+
+// checkPercentDiff checks whether the two values are close enough.
+func checkPercentDiff(t *testing.T, a, b float64, maxAllowedDifference float64) {
+ t.Helper()
+ if maxAllowedDifference == 0 {
+ maxAllowedDifference = 0.25
+ }
+ percentageDifference := (max(a, b) / min(a, b)) - 1
+ if percentageDifference > maxAllowedDifference {
+ Fatal(t, fmt.Sprintf("gas usages are too different; got %v, max allowed is %v", percentageDifference, maxAllowedDifference))
+ }
+}
diff --git a/system_tests/program_recursive_test.go b/system_tests/program_recursive_test.go
index dbf527a293..e928f9f3aa 100644
--- a/system_tests/program_recursive_test.go
+++ b/system_tests/program_recursive_test.go
@@ -154,6 +154,7 @@ func testProgramResursiveCalls(t *testing.T, tests [][]multiCallRecurse, jit boo
// execute transactions
blockNum := uint64(0)
for {
+ // #nosec G115
item := int(rander.GetUint64()/4) % len(tests)
blockNum = testProgramRecursiveCall(t, builder, slotVals, rander, tests[item])
tests[item] = tests[len(tests)-1]
diff --git a/system_tests/program_test.go b/system_tests/program_test.go
index ae34c6c5bb..cf8cd72559 100644
--- a/system_tests/program_test.go
+++ b/system_tests/program_test.go
@@ -41,23 +41,35 @@ import (
"github.com/offchainlabs/nitro/util/colors"
"github.com/offchainlabs/nitro/util/testhelpers"
"github.com/offchainlabs/nitro/validator/valnode"
- "github.com/wasmerio/wasmer-go/wasmer"
)
var oneEth = arbmath.UintToBig(1e18)
+var allWasmTargets = []string{string(rawdb.TargetWavm), string(rawdb.TargetArm64), string(rawdb.TargetAmd64), string(rawdb.TargetHost)}
+
func TestProgramKeccak(t *testing.T) {
t.Parallel()
- keccakTest(t, true)
+ t.Run("WithDefaultWasmTargets", func(t *testing.T) {
+ keccakTest(t, true)
+ })
+
+ t.Run("WithAllWasmTargets", func(t *testing.T) {
+ keccakTest(t, true, func(builder *NodeBuilder) {
+ builder.WithExtraArchs(allWasmTargets)
+ })
+ })
}
-func keccakTest(t *testing.T, jit bool) {
- builder, auth, cleanup := setupProgramTest(t, jit)
+func keccakTest(t *testing.T, jit bool, builderOpts ...func(*NodeBuilder)) {
+ builder, auth, cleanup := setupProgramTest(t, jit, builderOpts...)
ctx := builder.ctx
l2client := builder.L2.Client
defer cleanup()
programAddress := deployWasm(t, ctx, auth, l2client, rustFile("keccak"))
+ wasmDb := builder.L2.ExecNode.Backend.ArbInterface().BlockChain().StateCache().WasmStore()
+ checkWasmStoreContent(t, wasmDb, builder.execConfig.StylusTarget.ExtraArchs, 1)
+
wasm, _ := readWasmFile(t, rustFile("keccak"))
otherAddressSameCode := deployContract(t, ctx, auth, l2client, wasm)
arbWasm, err := pgen.NewArbWasm(types.ArbWasmAddress, l2client)
@@ -69,6 +81,7 @@ func keccakTest(t *testing.T, jit bool) {
Fatal(t, "activate should have failed with ProgramUpToDate", err)
}
})
+ checkWasmStoreContent(t, wasmDb, builder.execConfig.StylusTarget.ExtraArchs, 1)
if programAddress == otherAddressSameCode {
Fatal(t, "expected to deploy at two separate program addresses")
@@ -142,11 +155,18 @@ func keccakTest(t *testing.T, jit bool) {
func TestProgramActivateTwice(t *testing.T) {
t.Parallel()
- testActivateTwice(t, true)
+ t.Run("WithDefaultWasmTargets", func(t *testing.T) {
+ testActivateTwice(t, true)
+ })
+ t.Run("WithAllWasmTargets", func(t *testing.T) {
+ testActivateTwice(t, true, func(builder *NodeBuilder) {
+ builder.WithExtraArchs(allWasmTargets)
+ })
+ })
}
-func testActivateTwice(t *testing.T, jit bool) {
- builder, auth, cleanup := setupProgramTest(t, jit)
+func testActivateTwice(t *testing.T, jit bool, builderOpts ...func(*NodeBuilder)) {
+ builder, auth, cleanup := setupProgramTest(t, jit, builderOpts...)
ctx := builder.ctx
l2info := builder.L2Info
l2client := builder.L2.Client
@@ -172,6 +192,10 @@ func testActivateTwice(t *testing.T, jit bool) {
colors.PrintBlue("keccak program B deployed to ", keccakB)
multiAddr := deployWasm(t, ctx, auth, l2client, rustFile("multicall"))
+
+ wasmDb := builder.L2.ExecNode.Backend.ArbInterface().BlockChain().StateCache().WasmStore()
+ checkWasmStoreContent(t, wasmDb, builder.execConfig.StylusTarget.ExtraArchs, 1)
+
preimage := []byte("it's time to du-du-du-du d-d-d-d-d-d-d de-duplicate")
keccakArgs := []byte{0x01} // keccak the preimage once
@@ -195,6 +219,7 @@ func testActivateTwice(t *testing.T, jit bool) {
// Calling the contract pre-activation should fail.
checkReverts()
+ checkWasmStoreContent(t, wasmDb, builder.execConfig.StylusTarget.ExtraArchs, 1)
// mechanisms for creating calldata
activateProgram, _ := util.NewCallParser(pgen.ArbWasmABI, "activateProgram")
@@ -217,6 +242,7 @@ func testActivateTwice(t *testing.T, jit bool) {
// Ensure the revert also reverted keccak's activation
checkReverts()
+ checkWasmStoreContent(t, wasmDb, builder.execConfig.StylusTarget.ExtraArchs, 1)
// Activate keccak program A, then call into B, which should succeed due to being the same codehash
args = argsForMulticall(vm.CALL, types.ArbWasmAddress, oneEth, pack(activateProgram(keccakA)))
@@ -224,6 +250,7 @@ func testActivateTwice(t *testing.T, jit bool) {
tx = l2info.PrepareTxTo("Owner", &multiAddr, 1e9, oneEth, args)
ensure(tx, l2client.SendTransaction(ctx, tx))
+ checkWasmStoreContent(t, wasmDb, builder.execConfig.StylusTarget.ExtraArchs, 2)
validateBlocks(t, 7, jit, builder)
}
@@ -390,10 +417,15 @@ func storageTest(t *testing.T, jit bool) {
key := testhelpers.RandomHash()
value := testhelpers.RandomHash()
tx := l2info.PrepareTxTo("Owner", &programAddress, l2info.TransferGas, nil, argsForStorageWrite(key, value))
- ensure(tx, l2client.SendTransaction(ctx, tx))
+ receipt := ensure(tx, l2client.SendTransaction(ctx, tx))
+
assertStorageAt(t, ctx, l2client, programAddress, key, value)
validateBlocks(t, 2, jit, builder)
+
+ // Captures a block_input_.json file for the block that included the
+ // storage write transaction.
+ recordBlock(t, receipt.BlockNumber.Uint64(), builder)
}
func TestProgramTransientStorage(t *testing.T) {
@@ -583,6 +615,7 @@ func testCalls(t *testing.T, jit bool) {
for i := 0; i < 2; i++ {
inner := nest(level - 1)
+ // #nosec G115
args = append(args, arbmath.Uint32ToBytes(uint32(len(inner)))...)
args = append(args, inner...)
}
@@ -638,6 +671,7 @@ func testCalls(t *testing.T, jit bool) {
colors.PrintBlue("Calling the ArbosTest precompile (Rust => precompile)")
testPrecompile := func(gas uint64) uint64 {
// Call the burnArbGas() precompile from Rust
+ // #nosec G115
burn := pack(burnArbGas(big.NewInt(int64(gas))))
args := argsForMulticall(vm.CALL, types.ArbosTestAddress, nil, burn)
tx := l2info.PrepareTxTo("Owner", &callsAddr, 1e9, nil, args)
@@ -651,6 +685,7 @@ func testCalls(t *testing.T, jit bool) {
large := testPrecompile(largeGas)
if !arbmath.Within(large-small, largeGas-smallGas, 2) {
+ // #nosec G115
ratio := float64(int64(large)-int64(small)) / float64(int64(largeGas)-int64(smallGas))
Fatal(t, "inconsistent burns", large, small, largeGas, smallGas, ratio)
}
@@ -1528,9 +1563,10 @@ func readWasmFile(t *testing.T, file string) ([]byte, []byte) {
Require(t, err)
// chose a random dictionary for testing, but keep the same files consistent
+ // #nosec G115
randDict := arbcompress.Dictionary((len(file) + len(t.Name())) % 2)
- wasmSource, err := wasmer.Wat2Wasm(string(source))
+ wasmSource, err := programs.Wat2Wasm(source)
Require(t, err)
wasm, err := arbcompress.Compress(wasmSource, arbcompress.LEVEL_WELL, randDict)
Require(t, err)
@@ -1598,6 +1634,7 @@ func argsForMulticall(opcode vm.OpCode, address common.Address, value *big.Int,
if opcode == vm.CALL {
length += 32
}
+ // #nosec G115
args = append(args, arbmath.Uint32ToBytes(uint32(length))...)
args = append(args, kinds[opcode])
if opcode == vm.CALL {
@@ -1830,7 +1867,9 @@ func createMapFromDb(db ethdb.KeyValueStore) (map[string][]byte, error) {
}
func TestWasmStoreRebuilding(t *testing.T) {
- builder, auth, cleanup := setupProgramTest(t, true)
+ builder, auth, cleanup := setupProgramTest(t, true, func(b *NodeBuilder) {
+ b.WithExtraArchs(allWasmTargets)
+ })
ctx := builder.ctx
l2info := builder.L2Info
l2client := builder.L2.Client
@@ -1867,6 +1906,7 @@ func TestWasmStoreRebuilding(t *testing.T) {
storeMap, err := createMapFromDb(wasmDb)
Require(t, err)
+ checkWasmStoreContent(t, wasmDb, builder.execConfig.StylusTarget.ExtraArchs, 1)
// close nodeB
cleanupB()
@@ -1892,7 +1932,8 @@ func TestWasmStoreRebuilding(t *testing.T) {
// Start rebuilding and wait for it to finish
log.Info("starting rebuilding of wasm store")
- Require(t, gethexec.RebuildWasmStore(ctx, wasmDbAfterDelete, nodeB.ExecNode.ChainDB, nodeB.ExecNode.ConfigFetcher().RPC.MaxRecreateStateDepth, bc, common.Hash{}, bc.CurrentBlock().Hash()))
+ execConfig := nodeB.ExecNode.ConfigFetcher()
+ Require(t, gethexec.RebuildWasmStore(ctx, wasmDbAfterDelete, nodeB.ExecNode.ChainDB, execConfig.RPC.MaxRecreateStateDepth, &execConfig.StylusTarget, bc, common.Hash{}, bc.CurrentBlock().Hash()))
wasmDbAfterRebuild := nodeB.ExecNode.Backend.ArbInterface().BlockChain().StateCache().WasmStore()
@@ -1922,5 +1963,177 @@ func TestWasmStoreRebuilding(t *testing.T) {
}
}
+ checkWasmStoreContent(t, wasmDbAfterRebuild, builder.execConfig.StylusTarget.ExtraArchs, 1)
cleanupB()
}
+
+func readModuleHashes(t *testing.T, wasmDb ethdb.KeyValueStore) []common.Hash {
+ modulesSet := make(map[common.Hash]struct{})
+ asmPrefix := []byte{0x00, 'w'}
+ it := wasmDb.NewIterator(asmPrefix, nil)
+ defer it.Release()
+ for it.Next() {
+ key := it.Key()
+ if len(key) != rawdb.WasmKeyLen {
+ t.Fatalf("unexpected activated module key length, len: %d, key: %v", len(key), key)
+ }
+ moduleHash := key[rawdb.WasmPrefixLen:]
+ if len(moduleHash) != common.HashLength {
+ t.Fatalf("Invalid moduleHash length in key: %v, moduleHash: %v", key, moduleHash)
+ }
+ modulesSet[common.BytesToHash(moduleHash)] = struct{}{}
+ }
+ modules := make([]common.Hash, 0, len(modulesSet))
+ for module := range modulesSet {
+ modules = append(modules, module)
+ }
+ return modules
+}
+
+func checkWasmStoreContent(t *testing.T, wasmDb ethdb.KeyValueStore, targets []string, numModules int) {
+ modules := readModuleHashes(t, wasmDb)
+ if len(modules) != numModules {
+ t.Fatalf("Unexpected number of module hashes found in wasm store, want: %d, have: %d", numModules, len(modules))
+ }
+ for _, module := range modules {
+ for _, target := range targets {
+ wasmTarget := ethdb.WasmTarget(target)
+ if !rawdb.IsSupportedWasmTarget(wasmTarget) {
+ t.Fatalf("internal test error - unsupported target passed to checkWasmStoreContent: %v", target)
+ }
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Fatalf("Failed to read activated asm for target: %v, module: %v", target, module)
+ }
+ }()
+ _ = rawdb.ReadActivatedAsm(wasmDb, wasmTarget, module)
+ }()
+ }
+ }
+}
+
+func deployWasmAndGetLruEntrySizeEstimateBytes(
+ t *testing.T,
+ builder *NodeBuilder,
+ auth bind.TransactOpts,
+ wasmName string,
+) (common.Address, uint64) {
+ ctx := builder.ctx
+ l2client := builder.L2.Client
+
+ wasm, _ := readWasmFile(t, rustFile(wasmName))
+ arbWasm, err := pgen.NewArbWasm(types.ArbWasmAddress, l2client)
+ Require(t, err, ", wasmName:", wasmName)
+
+ programAddress := deployContract(t, ctx, auth, l2client, wasm)
+ tx, err := arbWasm.ActivateProgram(&auth, programAddress)
+ Require(t, err, ", wasmName:", wasmName)
+ receipt, err := EnsureTxSucceeded(ctx, l2client, tx)
+ Require(t, err, ", wasmName:", wasmName)
+
+ if len(receipt.Logs) != 1 {
+ Fatal(t, "expected 1 log while activating, got ", len(receipt.Logs), ", wasmName:", wasmName)
+ }
+ log, err := arbWasm.ParseProgramActivated(*receipt.Logs[0])
+ Require(t, err, ", wasmName:", wasmName)
+
+ statedb, err := builder.L2.ExecNode.Backend.ArbInterface().BlockChain().State()
+ Require(t, err, ", wasmName:", wasmName)
+
+ module, err := statedb.TryGetActivatedAsm(rawdb.LocalTarget(), log.ModuleHash)
+ Require(t, err, ", wasmName:", wasmName)
+
+ lruEntrySizeEstimateBytes := programs.GetLruEntrySizeEstimateBytes(module, log.Version, true)
+ // just a sanity check
+ if lruEntrySizeEstimateBytes == 0 {
+ Fatal(t, "lruEntrySizeEstimateBytes is 0, wasmName:", wasmName)
+ }
+ return programAddress, lruEntrySizeEstimateBytes
+}
+
+func TestWasmLruCache(t *testing.T) {
+ builder, auth, cleanup := setupProgramTest(t, true)
+ ctx := builder.ctx
+ l2info := builder.L2Info
+ l2client := builder.L2.Client
+ defer cleanup()
+
+ auth.GasLimit = 32000000
+ auth.Value = oneEth
+
+ fallibleProgramAddress, fallibleLruEntrySizeEstimateBytes := deployWasmAndGetLruEntrySizeEstimateBytes(t, builder, auth, "fallible")
+ keccakProgramAddress, keccakLruEntrySizeEstimateBytes := deployWasmAndGetLruEntrySizeEstimateBytes(t, builder, auth, "keccak")
+ mathProgramAddress, mathLruEntrySizeEstimateBytes := deployWasmAndGetLruEntrySizeEstimateBytes(t, builder, auth, "math")
+ t.Log(
+ "lruEntrySizeEstimateBytes, ",
+ "fallible:", fallibleLruEntrySizeEstimateBytes,
+ "keccak:", keccakLruEntrySizeEstimateBytes,
+ "math:", mathLruEntrySizeEstimateBytes,
+ )
+
+ programs.ClearWasmLruCache()
+ lruMetrics := programs.GetWasmLruCacheMetrics()
+ if lruMetrics.Count != 0 {
+ t.Fatalf("lruMetrics.Count, expected: %v, actual: %v", 0, lruMetrics.Count)
+ }
+ if lruMetrics.SizeBytes != 0 {
+ t.Fatalf("lruMetrics.SizeBytes, expected: %v, actual: %v", 0, lruMetrics.SizeBytes)
+ }
+
+ programs.SetWasmLruCacheCapacity(fallibleLruEntrySizeEstimateBytes - 1)
+ // fallible wasm program will not be cached since its size is greater than lru cache capacity
+ tx := l2info.PrepareTxTo("Owner", &fallibleProgramAddress, l2info.TransferGas, nil, []byte{0x01})
+ Require(t, l2client.SendTransaction(ctx, tx))
+ _, err := EnsureTxSucceeded(ctx, l2client, tx)
+ Require(t, err)
+ lruMetrics = programs.GetWasmLruCacheMetrics()
+ if lruMetrics.Count != 0 {
+ t.Fatalf("lruMetrics.Count, expected: %v, actual: %v", 0, lruMetrics.Count)
+ }
+ if lruMetrics.SizeBytes != 0 {
+ t.Fatalf("lruMetrics.SizeBytes, expected: %v, actual: %v", 0, lruMetrics.SizeBytes)
+ }
+
+ programs.SetWasmLruCacheCapacity(
+ fallibleLruEntrySizeEstimateBytes + keccakLruEntrySizeEstimateBytes + mathLruEntrySizeEstimateBytes - 1,
+ )
+ // fallible wasm program will be cached
+ tx = l2info.PrepareTxTo("Owner", &fallibleProgramAddress, l2info.TransferGas, nil, []byte{0x01})
+ Require(t, l2client.SendTransaction(ctx, tx))
+ _, err = EnsureTxSucceeded(ctx, l2client, tx)
+ Require(t, err)
+ lruMetrics = programs.GetWasmLruCacheMetrics()
+ if lruMetrics.Count != 1 {
+ t.Fatalf("lruMetrics.Count, expected: %v, actual: %v", 1, lruMetrics.Count)
+ }
+ if lruMetrics.SizeBytes != fallibleLruEntrySizeEstimateBytes {
+ t.Fatalf("lruMetrics.SizeBytes, expected: %v, actual: %v", fallibleLruEntrySizeEstimateBytes, lruMetrics.SizeBytes)
+ }
+
+ // keccak wasm program will be cached
+ tx = l2info.PrepareTxTo("Owner", &keccakProgramAddress, l2info.TransferGas, nil, []byte{0x01})
+ Require(t, l2client.SendTransaction(ctx, tx))
+ _, err = EnsureTxSucceeded(ctx, l2client, tx)
+ Require(t, err)
+ lruMetrics = programs.GetWasmLruCacheMetrics()
+ if lruMetrics.Count != 2 {
+ t.Fatalf("lruMetrics.Count, expected: %v, actual: %v", 2, lruMetrics.Count)
+ }
+ if lruMetrics.SizeBytes != fallibleLruEntrySizeEstimateBytes+keccakLruEntrySizeEstimateBytes {
+ t.Fatalf("lruMetrics.SizeBytes, expected: %v, actual: %v", fallibleLruEntrySizeEstimateBytes+keccakLruEntrySizeEstimateBytes, lruMetrics.SizeBytes)
+ }
+
+ // math wasm program will be cached, but fallible will be evicted since (fallible + keccak + math) > lruCacheCapacity
+ tx = l2info.PrepareTxTo("Owner", &mathProgramAddress, l2info.TransferGas, nil, []byte{0x01})
+ Require(t, l2client.SendTransaction(ctx, tx))
+ _, err = EnsureTxSucceeded(ctx, l2client, tx)
+ Require(t, err)
+ lruMetrics = programs.GetWasmLruCacheMetrics()
+ if lruMetrics.Count != 2 {
+ t.Fatalf("lruMetrics.Count, expected: %v, actual: %v", 2, lruMetrics.Count)
+ }
+ if lruMetrics.SizeBytes != keccakLruEntrySizeEstimateBytes+mathLruEntrySizeEstimateBytes {
+ t.Fatalf("lruMetrics.SizeBytes, expected: %v, actual: %v", keccakLruEntrySizeEstimateBytes+mathLruEntrySizeEstimateBytes, lruMetrics.SizeBytes)
+ }
+}
diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go
index 09d53669ee..22329a1be5 100644
--- a/system_tests/recreatestate_rpc_test.go
+++ b/system_tests/recreatestate_rpc_test.go
@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"math/big"
+ "runtime"
"strings"
"sync"
"testing"
@@ -95,7 +96,7 @@ func removeStatesFromDb(t *testing.T, bc *core.BlockChain, db ethdb.Database, fr
func TestRecreateStateForRPCNoDepthLimit(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- execConfig := ExecConfigDefaultTest()
+ execConfig := ExecConfigDefaultTest(t)
execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth
execConfig.Sequencer.MaxBlockSpeed = 0
execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110
@@ -132,8 +133,9 @@ func TestRecreateStateForRPCNoDepthLimit(t *testing.T) {
func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
+ // #nosec G115
depthGasLimit := int64(256 * util.NormalizeL2GasForL1GasInitial(800_000, params.GWei))
- execConfig := ExecConfigDefaultTest()
+ execConfig := ExecConfigDefaultTest(t)
execConfig.RPC.MaxRecreateStateDepth = depthGasLimit
execConfig.Sequencer.MaxBlockSpeed = 0
execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110
@@ -170,7 +172,7 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) {
func TestRecreateStateForRPCDepthLimitExceeded(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- execConfig := ExecConfigDefaultTest()
+ execConfig := ExecConfigDefaultTest(t)
execConfig.RPC.MaxRecreateStateDepth = int64(200)
execConfig.Sequencer.MaxBlockSpeed = 0
execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110
@@ -207,7 +209,7 @@ func TestRecreateStateForRPCMissingBlockParent(t *testing.T) {
var headerCacheLimit uint64 = 512
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- execConfig := ExecConfigDefaultTest()
+ execConfig := ExecConfigDefaultTest(t)
execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth
execConfig.Sequencer.MaxBlockSpeed = 0
execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110
@@ -255,7 +257,7 @@ func TestRecreateStateForRPCBeyondGenesis(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- execConfig := ExecConfigDefaultTest()
+ execConfig := ExecConfigDefaultTest(t)
execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth
execConfig.Sequencer.MaxBlockSpeed = 0
execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110
@@ -293,7 +295,7 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) {
var blockCacheLimit uint64 = 256
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- execConfig := ExecConfigDefaultTest()
+ execConfig := ExecConfigDefaultTest(t)
execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth
execConfig.Sequencer.MaxBlockSpeed = 0
execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110
@@ -337,11 +339,12 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) {
}
func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig *gethexec.CachingConfig, txCount int) {
+ t.Parallel()
maxRecreateStateDepth := int64(30 * 1000 * 1000)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- execConfig := ExecConfigDefaultTest()
+ execConfig := ExecConfigDefaultTest(t)
execConfig.RPC.MaxRecreateStateDepth = maxRecreateStateDepth
execConfig.Sequencer.MaxBlockSpeed = 0
execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110
@@ -361,12 +364,14 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig
Require(t, err)
l2info.GenerateAccount("User2")
+ // #nosec G115
for i := genesis; i < uint64(txCount)+genesis; i++ {
tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, common.Big1, nil)
err := client.SendTransaction(ctx, tx)
Require(t, err)
receipt, err := EnsureTxSucceeded(ctx, client, tx)
Require(t, err)
+ // #nosec G115
if have, want := receipt.BlockNumber.Uint64(), uint64(i)+1; have != want {
Fatal(t, "internal test error - tx got included in unexpected block number, have:", have, "want:", want)
}
@@ -377,6 +382,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig
Fatal(t, "missing current block")
}
lastBlock := currentHeader.Number.Uint64()
+ // #nosec G115
if want := genesis + uint64(txCount); lastBlock < want {
Fatal(t, "internal test error - not enough blocks produced during preparation, want:", want, "have:", lastBlock)
}
@@ -390,6 +396,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig
bc = builder.L2.ExecNode.Backend.ArbInterface().BlockChain()
gas := skipGas
blocks := skipBlocks
+ // #nosec G115
for i := genesis; i <= genesis+uint64(txCount); i++ {
block := bc.GetBlockByNumber(i)
if block == nil {
@@ -407,6 +414,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig
gas = 0
blocks = 0
} else {
+ // #nosec G115
if int(i) >= int(lastBlock)-int(cacheConfig.BlockCount) {
// skipping nonexistence check - the state might have been saved on node shutdown
continue
@@ -421,6 +429,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig
}
}
}
+ // #nosec G115
for i := genesis + 1; i <= genesis+uint64(txCount); i += i % 10 {
_, err = client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(i))
if err != nil {
@@ -444,20 +453,26 @@ func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) {
cacheConfig.SnapshotCache = 0 // disable snapshots
cacheConfig.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are
+ runTestCase := func(t *testing.T, cacheConfig gethexec.CachingConfig, txes int) {
+ t.Run(fmt.Sprintf("skip-blocks-%d-skip-gas-%d-txes-%d", cacheConfig.MaxNumberOfBlocksToSkipStateSaving, cacheConfig.MaxAmountOfGasToSkipStateSaving, txes), func(t *testing.T) {
+ testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, txes)
+ })
+ }
+
// test defaults
- testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512)
+ runTestCase(t, cacheConfig, 512)
cacheConfig.MaxNumberOfBlocksToSkipStateSaving = 127
cacheConfig.MaxAmountOfGasToSkipStateSaving = 0
- testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512)
+ runTestCase(t, cacheConfig, 512)
cacheConfig.MaxNumberOfBlocksToSkipStateSaving = 0
cacheConfig.MaxAmountOfGasToSkipStateSaving = 15 * 1000 * 1000
- testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512)
+ runTestCase(t, cacheConfig, 512)
cacheConfig.MaxNumberOfBlocksToSkipStateSaving = 127
cacheConfig.MaxAmountOfGasToSkipStateSaving = 15 * 1000 * 1000
- testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512)
+ runTestCase(t, cacheConfig, 512)
// lower number of blocks in triegc below 100 blocks, to be able to check for nonexistence in testSkippingSavingStateAndRecreatingAfterRestart (it doesn't check last BlockCount blocks as some of them may be persisted on node shutdown)
cacheConfig.BlockCount = 16
@@ -471,22 +486,18 @@ func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) {
for _, skipGas := range skipGasValues {
for _, skipBlocks := range skipBlockValues[:len(skipBlockValues)-2] {
cacheConfig.MaxAmountOfGasToSkipStateSaving = skipGas
+ // #nosec G115
cacheConfig.MaxNumberOfBlocksToSkipStateSaving = uint32(skipBlocks)
- testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 100)
+ runTestCase(t, cacheConfig, 100)
}
}
}
-func TestGettingStateForRPCFullNode(t *testing.T) {
+func testGettingState(t *testing.T, execConfig *gethexec.Config) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- execConfig := ExecConfigDefaultTest()
- execConfig.Caching.SnapshotCache = 0 // disable snapshots
- execConfig.Caching.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are
- execConfig.Sequencer.MaxBlockSpeed = 0
- execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110
builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 16)
- execNode, _ := builder.L2.ExecNode, builder.L2.Client
+ execNode := builder.L2.ExecNode
defer cancelNode()
bc := execNode.Backend.ArbInterface().BlockChain()
api := execNode.Backend.APIBackend()
@@ -495,6 +506,7 @@ func TestGettingStateForRPCFullNode(t *testing.T) {
if header == nil {
Fatal(t, "failed to get current block header")
}
+ // #nosec G115
state, _, err := api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64()))
Require(t, err)
addr := builder.L2Info.GetAddress("User2")
@@ -505,24 +517,47 @@ func TestGettingStateForRPCFullNode(t *testing.T) {
Fatal(t, "User2 address does not exist in the state")
}
// Get the state again to avoid caching
+ // #nosec G115
state, _, err = api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64()))
Require(t, err)
blockCountRequiredToFlushDirties := builder.execConfig.Caching.BlockCount
makeSomeTransfers(t, ctx, builder, blockCountRequiredToFlushDirties)
+ // force garbage collection to check if it won't break anything
+ runtime.GC()
+
exists = state.Exist(addr)
err = state.Error()
Require(t, err)
if !exists {
Fatal(t, "User2 address does not exist in the state")
}
+
+ // force garbage collection of StateDB object, what should cause the state finalizer to run
+ state = nil
+ runtime.GC()
+ _, err = bc.StateAt(header.Root)
+ if err == nil {
+ Fatal(t, "StateAndHeaderByNumber didn't failed as expected")
+ }
+ expectedErr := &trie.MissingNodeError{}
+ if !errors.As(err, &expectedErr) {
+ Fatal(t, "StateAndHeaderByNumber failed with unexpected error:", err)
+ }
}
-func TestGettingStateForRPCHybridArchiveNode(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- execConfig := ExecConfigDefaultTest()
+func TestGettingState(t *testing.T) {
+ execConfig := ExecConfigDefaultTest(t)
+ execConfig.Caching.SnapshotCache = 0 // disable snapshots
+ execConfig.Caching.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are
+ execConfig.Sequencer.MaxBlockSpeed = 0
+ execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110
+ t.Run("full-node", func(t *testing.T) {
+ testGettingState(t, execConfig)
+ })
+
+ execConfig = ExecConfigDefaultTest(t)
execConfig.Caching.Archive = true
// For now Archive node should use HashScheme
execConfig.Caching.StateScheme = rawdb.HashScheme
@@ -532,40 +567,13 @@ func TestGettingStateForRPCHybridArchiveNode(t *testing.T) {
execConfig.Caching.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are
execConfig.Sequencer.MaxBlockSpeed = 0
execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110
- builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 16)
- execNode, _ := builder.L2.ExecNode, builder.L2.Client
- defer cancelNode()
- bc := execNode.Backend.ArbInterface().BlockChain()
- api := execNode.Backend.APIBackend()
-
- header := bc.CurrentBlock()
- if header == nil {
- Fatal(t, "failed to get current block header")
- }
- state, _, err := api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64()))
- Require(t, err)
- addr := builder.L2Info.GetAddress("User2")
- exists := state.Exist(addr)
- err = state.Error()
- Require(t, err)
- if !exists {
- Fatal(t, "User2 address does not exist in the state")
- }
- // Get the state again to avoid caching
- state, _, err = api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64()))
- Require(t, err)
-
- blockCountRequiredToFlushDirties := builder.execConfig.Caching.BlockCount
- makeSomeTransfers(t, ctx, builder, blockCountRequiredToFlushDirties)
-
- exists = state.Exist(addr)
- err = state.Error()
- Require(t, err)
- if !exists {
- Fatal(t, "User2 address does not exist in the state")
- }
+ t.Run("archive-node", func(t *testing.T) {
+ testGettingState(t, execConfig)
+ })
}
+// regression test for issue caused by accessing block state that has just been committed to TrieDB but not yet referenced in core.BlockChain.writeBlockWithState (here called state of "recent" block)
+// before the corresponding fix, access to the recent block state caused premature garbage collection of the head block state
func TestStateAndHeaderForRecentBlock(t *testing.T) {
threads := 32
ctx, cancel := context.WithCancel(context.Background())
@@ -606,15 +614,22 @@ func TestStateAndHeaderForRecentBlock(t *testing.T) {
}()
api := builder.L2.ExecNode.Backend.APIBackend()
db := builder.L2.ExecNode.Backend.ChainDb()
- i := 1
+
+ recentBlock := 1
var mtx sync.RWMutex
var wgCallers sync.WaitGroup
for j := 0; j < threads && ctx.Err() == nil; j++ {
wgCallers.Add(1)
+ // each thread attempts to get state for a block that is just being created (here called recent):
+ // 1. Before state trie node is referenced in core.BlockChain.writeBlockWithState, block body is written to database with key prefix `b` followed by block number and then block hash (see: rawdb.blockBodyKey)
+ // 2. Each thread tries to read the block body entry to: a. extract recent block hash b. congest resource usage to slow down execution of core.BlockChain.writeBlockWithState
+ // 3. After extracting the hash from block body entry key, StateAndHeaderByNumberOfHash is called for the hash. It is expected that it will:
+ // a. either fail with "ahead of current block" if we made it before rawdb.WriteCanonicalHash is called in core.BlockChain.writeHeadBlock, which is called after writeBlockWithState finishes,
+ // b. or it will succeed if the canonical hash was written for the block meaning that writeBlockWithState was fully executed (i.a. state root trie node correctly referenced) - then the recentBlock is advanced
go func() {
defer wgCallers.Done()
mtx.RLock()
- blockNumber := i
+ blockNumber := recentBlock
mtx.RUnlock()
for blockNumber < 300 && ctx.Err() == nil {
prefix := make([]byte, 8)
@@ -633,8 +648,8 @@ func TestStateAndHeaderForRecentBlock(t *testing.T) {
_, _, err := api.StateAndHeaderByNumberOrHash(ctx, rpc.BlockNumberOrHash{BlockHash: &blockHash})
if err == nil {
mtx.Lock()
- if blockNumber == i {
- i++
+ if blockNumber == recentBlock {
+ recentBlock++
}
mtx.Unlock()
break
@@ -654,7 +669,7 @@ func TestStateAndHeaderForRecentBlock(t *testing.T) {
}
it.Release()
mtx.RLock()
- blockNumber = i
+ blockNumber = recentBlock
mtx.RUnlock()
}
}()
diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go
index 106dfc6d46..aa9fbfd72e 100644
--- a/system_tests/retryable_test.go
+++ b/system_tests/retryable_test.go
@@ -1042,7 +1042,7 @@ func elevateL2Basefee(t *testing.T, ctx context.Context, builder *NodeBuilder) {
_, err = precompilesgen.NewArbosTest(common.HexToAddress("0x69"), builder.L2.Client)
Require(t, err, "failed to deploy ArbosTest")
- burnAmount := ExecConfigDefaultTest().RPC.RPCGasCap
+ burnAmount := ExecConfigDefaultTest(t).RPC.RPCGasCap
burnTarget := uint64(5 * l2pricing.InitialSpeedLimitPerSecondV6 * l2pricing.InitialBacklogTolerance)
for i := uint64(0); i < (burnTarget+burnAmount)/burnAmount; i++ {
burnArbGas := arbosTestAbi.Methods["burnArbGas"]
diff --git a/system_tests/seq_nonce_test.go b/system_tests/seq_nonce_test.go
index 72629e1978..c099563e29 100644
--- a/system_tests/seq_nonce_test.go
+++ b/system_tests/seq_nonce_test.go
@@ -111,6 +111,7 @@ func TestSequencerNonceTooHighQueueFull(t *testing.T) {
}
for wait := 9; wait >= 0; wait-- {
+ // #nosec G115
got := int(completed.Load())
expected := count - builder.execConfig.Sequencer.NonceFailureCacheSize
if got == expected {
diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go
index 5e70fdf098..21f0755225 100644
--- a/system_tests/seqfeed_test.go
+++ b/system_tests/seqfeed_test.go
@@ -164,12 +164,12 @@ func compareAllMsgResultsFromConsensusAndExecution(
}
var lastResult *execution.MessageResult
- for msgCount := 1; arbutil.MessageIndex(msgCount) <= consensusMsgCount; msgCount++ {
+ for msgCount := arbutil.MessageIndex(1); msgCount <= consensusMsgCount; msgCount++ {
pos := msgCount - 1
resultExec, err := testClient.ExecNode.ResultAtPos(arbutil.MessageIndex(pos))
Require(t, err)
- resultConsensus, err := testClient.ConsensusNode.TxStreamer.ResultAtCount(arbutil.MessageIndex(msgCount))
+ resultConsensus, err := testClient.ConsensusNode.TxStreamer.ResultAtCount(msgCount)
Require(t, err)
if !reflect.DeepEqual(resultExec, resultConsensus) {
diff --git a/system_tests/seqinbox_test.go b/system_tests/seqinbox_test.go
index 4dc8f4a664..a9f66b0e2f 100644
--- a/system_tests/seqinbox_test.go
+++ b/system_tests/seqinbox_test.go
@@ -229,6 +229,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) {
reorgTargetNumber := blockStates[reorgTo].l1BlockNumber
currentHeader, err := builder.L1.Client.HeaderByNumber(ctx, nil)
Require(t, err)
+ // #nosec G115
if currentHeader.Number.Int64()-int64(reorgTargetNumber) < 65 {
Fatal(t, "Less than 65 blocks of difference between current block", currentHeader.Number, "and target", reorgTargetNumber)
}
@@ -264,6 +265,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) {
for j := 0; j < numMessages; j++ {
sourceNum := rand.Int() % len(state.accounts)
source := state.accounts[sourceNum]
+ // #nosec G115
amount := new(big.Int).SetUint64(uint64(rand.Int()) % state.balances[source].Uint64())
reserveAmount := new(big.Int).SetUint64(l2pricing.InitialBaseFeeWei * 100000000)
if state.balances[source].Cmp(new(big.Int).Add(amount, reserveAmount)) < 0 {
@@ -313,6 +315,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) {
for j := 0; ; j++ {
haveNonce, err := builder.L1.Client.PendingNonceAt(ctx, seqOpts.From)
Require(t, err)
+ // #nosec G115
if haveNonce == uint64(seqNonce) {
break
}
@@ -346,7 +349,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) {
BridgeAddr: builder.L1Info.GetAddress("Bridge"),
DataPosterAddr: seqOpts.From,
GasRefunderAddr: gasRefunderAddr,
- SequencerInboxAccs: len(blockStates),
+ SequencerInboxAccs: uint64(len(blockStates)),
AfterDelayedMessagesRead: 1,
})
if diff := diffAccessList(accessed, *wantAL); diff != "" {
@@ -374,10 +377,12 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) {
t.Fatalf("BalanceAt(%v) unexpected error: %v", seqOpts.From, err)
}
txCost := txRes.EffectiveGasPrice.Uint64() * txRes.GasUsed
+ // #nosec G115
if diff := before.Int64() - after.Int64(); diff >= int64(txCost) {
t.Errorf("Transaction: %v was not refunded, balance diff: %v, cost: %v", tx.Hash(), diff, txCost)
}
+ // #nosec G115
state.l2BlockNumber += uint64(numMessages)
state.l1BlockNumber = txRes.BlockNumber.Uint64()
blockStates = append(blockStates, state)
@@ -424,11 +429,13 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) {
}
for _, state := range blockStates {
+ // #nosec G115
block, err := l2Backend.APIBackend().BlockByNumber(ctx, rpc.BlockNumber(state.l2BlockNumber))
Require(t, err)
if block == nil {
Fatal(t, "missing state block", state.l2BlockNumber)
}
+ // #nosec G115
stateDb, _, err := l2Backend.APIBackend().StateAndHeaderByNumber(ctx, rpc.BlockNumber(state.l2BlockNumber))
Require(t, err)
for acct, expectedBalance := range state.balances {
diff --git a/system_tests/snap_sync_test.go b/system_tests/snap_sync_test.go
index a04d9f5bf3..7462b5f5f0 100644
--- a/system_tests/snap_sync_test.go
+++ b/system_tests/snap_sync_test.go
@@ -92,8 +92,10 @@ func TestSnapSync(t *testing.T) {
waitForBlockToCatchupToMessageCount(ctx, t, nodeC.Client, finalMessageCount)
// Fetching message count - 1 instead on the latest block number as the latest block number might not be
// present in the snap sync node since it does not have the sequencer feed.
+ // #nosec G115
header, err := builder.L2.Client.HeaderByNumber(ctx, big.NewInt(int64(finalMessageCount)-1))
Require(t, err)
+ // #nosec G115
headerNodeC, err := nodeC.Client.HeaderByNumber(ctx, big.NewInt(int64(finalMessageCount)-1))
Require(t, err)
// Once the node is synced up, check if the block hash is the same for the last block
diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go
index f57b68ad8a..67ce260529 100644
--- a/system_tests/staker_test.go
+++ b/system_tests/staker_test.go
@@ -37,6 +37,7 @@ import (
"github.com/offchainlabs/nitro/util"
"github.com/offchainlabs/nitro/util/arbmath"
"github.com/offchainlabs/nitro/util/colors"
+ "github.com/offchainlabs/nitro/util/testhelpers"
"github.com/offchainlabs/nitro/validator/valnode"
)
@@ -57,7 +58,8 @@ func makeBackgroundTxs(ctx context.Context, builder *NodeBuilder) error {
}
func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) {
- t.Parallel()
+ logHandler := testhelpers.InitTestLog(t, log.LvlTrace)
+
ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx()
srv := externalsignertest.NewServer(t)
@@ -132,15 +134,6 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool)
builder.L1.TransferBalance(t, "Faucet", "ValidatorB", balance, builder.L1Info)
l1authB := builder.L1Info.GetDefaultTransactOpts("ValidatorB", ctx)
- valWalletAddrAPtr, err := validatorwallet.GetValidatorWalletContract(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, &l1authA, l2nodeA.L1Reader, true)
- Require(t, err)
- valWalletAddrA := *valWalletAddrAPtr
- valWalletAddrCheck, err := validatorwallet.GetValidatorWalletContract(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, &l1authA, l2nodeA.L1Reader, true)
- Require(t, err)
- if valWalletAddrA == *valWalletAddrCheck {
- Require(t, err, "didn't cache validator wallet address", valWalletAddrA.String(), "vs", valWalletAddrCheck.String())
- }
-
rollup, err := rollupgen.NewRollupAdminLogic(l2nodeA.DeployInfo.Rollup, builder.L1.Client)
Require(t, err)
@@ -149,16 +142,9 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool)
rollupABI, err := abi.JSON(strings.NewReader(rollupgen.RollupAdminLogicABI))
Require(t, err, "unable to parse rollup ABI")
- setValidatorCalldata, err := rollupABI.Pack("setValidator", []common.Address{valWalletAddrA, l1authB.From, srv.Address}, []bool{true, true, true})
- Require(t, err, "unable to generate setValidator calldata")
- tx, err := upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setValidatorCalldata)
- Require(t, err, "unable to set validators")
- _, err = builder.L1.EnsureTxSucceeded(tx)
- Require(t, err)
-
setMinAssertPeriodCalldata, err := rollupABI.Pack("setMinimumAssertionPeriod", big.NewInt(1))
Require(t, err, "unable to generate setMinimumAssertionPeriod calldata")
- tx, err = upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setMinAssertPeriodCalldata)
+ tx, err := upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setMinAssertPeriodCalldata)
Require(t, err, "unable to set minimum assertion period")
_, err = builder.L1.EnsureTxSucceeded(tx)
Require(t, err)
@@ -166,7 +152,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool)
validatorUtils, err := rollupgen.NewValidatorUtils(l2nodeA.DeployInfo.ValidatorUtils, builder.L1.Client)
Require(t, err)
- valConfig := staker.TestL1ValidatorConfig
+ valConfigA := staker.TestL1ValidatorConfig
parentChainID, err := builder.L1.Client.ChainID(ctx)
if err != nil {
t.Fatalf("Failed to get parent chain id: %v", err)
@@ -182,14 +168,30 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool)
if err != nil {
t.Fatalf("Error creating validator dataposter: %v", err)
}
- valWalletA, err := validatorwallet.NewContract(dpA, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}, func() uint64 { return valConfig.ExtraGas })
+ valWalletA, err := validatorwallet.NewContract(dpA, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}, func() uint64 { return valConfigA.ExtraGas })
Require(t, err)
if honestStakerInactive {
- valConfig.Strategy = "Defensive"
+ valConfigA.Strategy = "Defensive"
} else {
- valConfig.Strategy = "MakeNodes"
+ valConfigA.Strategy = "MakeNodes"
}
+ valWalletAddrAPtr, err := validatorwallet.GetValidatorWalletContract(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, l2nodeA.L1Reader, true, valWalletA.DataPoster(), valWalletA.GetExtraGas())
+ Require(t, err)
+ valWalletAddrA := *valWalletAddrAPtr
+ valWalletAddrCheck, err := validatorwallet.GetValidatorWalletContract(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, l2nodeA.L1Reader, true, valWalletA.DataPoster(), valWalletA.GetExtraGas())
+ Require(t, err)
+ if valWalletAddrA == *valWalletAddrCheck {
+ Require(t, err, "didn't cache validator wallet address", valWalletAddrA.String(), "vs", valWalletAddrCheck.String())
+ }
+
+ setValidatorCalldata, err := rollupABI.Pack("setValidator", []common.Address{valWalletAddrA, l1authB.From, srv.Address}, []bool{true, true, true})
+ Require(t, err, "unable to generate setValidator calldata")
+ tx, err = upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setValidatorCalldata)
+ Require(t, err, "unable to set validators")
+ _, err = builder.L1.EnsureTxSucceeded(tx)
+ Require(t, err)
+
_, valStack := createTestValidationNode(t, ctx, &valnode.TestValidationConfig)
blockValidatorConfig := staker.TestBlockValidatorConfig
@@ -210,7 +212,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool)
l2nodeA.L1Reader,
valWalletA,
bind.CallOpts{},
- valConfig,
+ func() *staker.L1ValidatorConfig { return &valConfigA },
nil,
statelessA,
nil,
@@ -244,7 +246,8 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool)
}
valWalletB, err := validatorwallet.NewEOA(dpB, l2nodeB.DeployInfo.Rollup, l2nodeB.L1Reader.Client(), func() uint64 { return 0 })
Require(t, err)
- valConfig.Strategy = "MakeNodes"
+ valConfigB := staker.TestL1ValidatorConfig
+ valConfigB.Strategy = "MakeNodes"
statelessB, err := staker.NewStatelessBlockValidator(
l2nodeB.InboxReader,
l2nodeB.InboxTracker,
@@ -262,7 +265,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool)
l2nodeB.L1Reader,
valWalletB,
bind.CallOpts{},
- valConfig,
+ func() *staker.L1ValidatorConfig { return &valConfigB },
nil,
statelessB,
nil,
@@ -278,12 +281,13 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool)
Require(t, err)
}
valWalletC := validatorwallet.NewNoOp(builder.L1.Client, l2nodeA.DeployInfo.Rollup)
- valConfig.Strategy = "Watchtower"
+ valConfigC := staker.TestL1ValidatorConfig
+ valConfigC.Strategy = "Watchtower"
stakerC, err := staker.NewStaker(
l2nodeA.L1Reader,
valWalletC,
bind.CallOpts{},
- valConfig,
+ func() *staker.L1ValidatorConfig { return &valConfigC },
nil,
statelessA,
nil,
@@ -462,8 +466,53 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool)
if !stakerBWasStaked {
Fatal(t, "staker B was never staked")
}
+
+ if logHandler.WasLogged("data poster expected next transaction to have nonce \\d+ but was requested to post transaction with nonce \\d+") {
+ Fatal(t, "Staker's DataPoster inferred nonce incorrectly")
+ }
}
func TestStakersCooperative(t *testing.T) {
stakerTestImpl(t, false, false)
}
+
+func TestGetValidatorWalletContractWithDataposterOnlyUsedToCreateValidatorWalletContract(t *testing.T) {
+ t.Parallel()
+
+ ctx, cancelCtx := context.WithCancel(context.Background())
+ defer cancelCtx()
+
+ builder := NewNodeBuilder(ctx).DefaultConfig(t, true)
+ cleanup := builder.Build(t)
+ defer cleanup()
+
+ balance := big.NewInt(params.Ether)
+ balance.Mul(balance, big.NewInt(100))
+ builder.L1Info.GenerateAccount("ValidatorA")
+ builder.L1.TransferBalance(t, "Faucet", "ValidatorA", balance, builder.L1Info)
+ l1auth := builder.L1Info.GetDefaultTransactOpts("ValidatorA", ctx)
+
+ parentChainID, err := builder.L1.Client.ChainID(ctx)
+ Require(t, err)
+
+ dataPoster, err := arbnode.DataposterOnlyUsedToCreateValidatorWalletContract(
+ ctx,
+ builder.L2.ConsensusNode.L1Reader,
+ &l1auth,
+ &builder.nodeConfig.Staker.DataPoster,
+ parentChainID,
+ )
+ if err != nil {
+ log.Crit("error creating data poster to create validator wallet contract", "err", err)
+ }
+ getExtraGas := func() uint64 { return builder.nodeConfig.Staker.ExtraGas }
+
+ valWalletAddrAPtr, err := validatorwallet.GetValidatorWalletContract(ctx, builder.L2.ConsensusNode.DeployInfo.ValidatorWalletCreator, 0, builder.L2.ConsensusNode.L1Reader, true, dataPoster, getExtraGas)
+ Require(t, err)
+ valWalletAddrA := *valWalletAddrAPtr
+ valWalletAddrCheck, err := validatorwallet.GetValidatorWalletContract(ctx, builder.L2.ConsensusNode.DeployInfo.ValidatorWalletCreator, 0, builder.L2.ConsensusNode.L1Reader, true, dataPoster, getExtraGas)
+ Require(t, err)
+ if valWalletAddrA == *valWalletAddrCheck {
+ Require(t, err, "didn't cache validator wallet address", valWalletAddrA.String(), "vs", valWalletAddrCheck.String())
+ }
+}
diff --git a/system_tests/stylus_trace_test.go b/system_tests/stylus_trace_test.go
index cb303874d6..52039df460 100644
--- a/system_tests/stylus_trace_test.go
+++ b/system_tests/stylus_trace_test.go
@@ -6,6 +6,7 @@ package arbtest
import (
"bytes"
"encoding/binary"
+ "math"
"math/big"
"testing"
@@ -76,6 +77,7 @@ func sendAndTraceTransaction(
}
func intToBytes(v int) []byte {
+ // #nosec G115
return binary.BigEndian.AppendUint64(nil, uint64(v))
}
@@ -477,3 +479,17 @@ func TestStylusOpcodeTraceEquivalence(t *testing.T) {
checkOpcode(t, wasmResult, 12, vm.RETURN, offset, returnLen)
checkOpcode(t, evmResult, 5078, vm.RETURN, offset, returnLen)
}
+
+func TestStylusHugeWriteResultTrace(t *testing.T) {
+ const jit = false
+ builder, auth, cleanup := setupProgramTest(t, jit)
+ ctx := builder.ctx
+ l2client := builder.L2.Client
+ defer cleanup()
+
+ program := deployWasm(t, ctx, auth, l2client, watFile("write-result-len"))
+ const returnLen = math.MaxUint16 + 1
+ args := binary.LittleEndian.AppendUint32(nil, returnLen)
+ result := sendAndTraceTransaction(t, builder, program, nil, args)
+ checkOpcode(t, result, 3, vm.RETURN, nil, intToBe32(returnLen))
+}
diff --git a/system_tests/stylus_tracer_test.go b/system_tests/stylus_tracer_test.go
new file mode 100644
index 0000000000..7fda39f04e
--- /dev/null
+++ b/system_tests/stylus_tracer_test.go
@@ -0,0 +1,244 @@
+// Copyright 2024, Offchain Labs, Inc.
+// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE
+
+package arbtest
+
+import (
+ "encoding/binary"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/google/go-cmp/cmp"
+ "github.com/offchainlabs/nitro/execution/gethexec"
+ "github.com/offchainlabs/nitro/solgen/go/mocksgen"
+ "github.com/offchainlabs/nitro/util/containers"
+ "github.com/offchainlabs/nitro/util/testhelpers"
+)
+
+func TestStylusTracer(t *testing.T) {
+ const jit = false
+ builder, auth, cleanup := setupProgramTest(t, jit)
+ ctx := builder.ctx
+ l2client := builder.L2.Client
+ l2info := builder.L2Info
+ rpcClient := builder.L2.Client.Client()
+ defer cleanup()
+
+ traceTransaction := func(tx common.Hash, tracer string) []gethexec.HostioTraceInfo {
+ traceOpts := struct {
+ Tracer string `json:"tracer"`
+ }{
+ Tracer: tracer,
+ }
+ var result []gethexec.HostioTraceInfo
+ err := rpcClient.CallContext(ctx, &result, "debug_traceTransaction", tx, traceOpts)
+ Require(t, err, "trace transaction")
+ return result
+ }
+
+ // Deploy contracts
+ stylusMulticall := deployWasm(t, ctx, auth, l2client, rustFile("multicall"))
+ evmMulticall, tx, _, err := mocksgen.DeployMultiCallTest(&auth, builder.L2.Client)
+ Require(t, err, "deploy evm multicall")
+ _, err = EnsureTxSucceeded(ctx, l2client, tx)
+ Require(t, err, "ensure evm multicall deployment")
+
+ // Args for tests
+ key := testhelpers.RandomHash()
+ value := testhelpers.RandomHash()
+ loadStoreArgs := multicallEmptyArgs()
+ loadStoreArgs = multicallAppendStore(loadStoreArgs, key, value, false)
+ loadStoreArgs = multicallAppendLoad(loadStoreArgs, key, false)
+ callArgs := argsForMulticall(vm.CALL, stylusMulticall, nil, []byte{0})
+ evmCall := argsForMulticall(vm.CALL, evmMulticall, nil, []byte{0})
+
+ for _, testCase := range []struct {
+ name string
+ contract common.Address
+ args []byte
+ want []gethexec.HostioTraceInfo
+ }{
+ {
+ name: "non-recursive hostios",
+ contract: stylusMulticall,
+ args: loadStoreArgs,
+ want: []gethexec.HostioTraceInfo{
+ {Name: "user_entrypoint", Args: intToBe32(len(loadStoreArgs))},
+ {Name: "pay_for_memory_grow", Args: []byte{0x00, 0x01}},
+ {Name: "read_args", Outs: loadStoreArgs},
+ {Name: "storage_cache_bytes32", Args: append(key.Bytes(), value.Bytes()...)},
+ {Name: "storage_flush_cache", Args: []byte{0x00}},
+ {Name: "storage_load_bytes32", Args: key.Bytes(), Outs: value.Bytes()},
+ {Name: "storage_flush_cache", Args: []byte{0x00}},
+ {Name: "write_result", Args: value.Bytes()},
+ {Name: "user_returned", Outs: intToBe32(0)},
+ },
+ },
+
+ {
+ name: "call stylus contract",
+ contract: stylusMulticall,
+ args: callArgs,
+ want: []gethexec.HostioTraceInfo{
+ {Name: "user_entrypoint", Args: intToBe32(len(callArgs))},
+ {Name: "pay_for_memory_grow", Args: []byte{0x00, 0x01}},
+ {Name: "read_args", Outs: callArgs},
+ {
+ Name: "call_contract",
+ Args: append(stylusMulticall.Bytes(), common.Hex2Bytes("ffffffffffffffff000000000000000000000000000000000000000000000000000000000000000000")...),
+ Outs: common.Hex2Bytes("0000000000"),
+ Address: &stylusMulticall,
+ Steps: (*containers.Stack[gethexec.HostioTraceInfo])(&[]gethexec.HostioTraceInfo{
+ {Name: "user_entrypoint", Args: intToBe32(1)},
+ {Name: "pay_for_memory_grow", Args: []byte{0x00, 0x01}},
+ {Name: "read_args", Outs: []byte{0x00}},
+ {Name: "storage_flush_cache", Args: []byte{0x00}},
+ {Name: "write_result"},
+ {Name: "user_returned", Outs: intToBe32(0)},
+ }),
+ },
+ {Name: "storage_flush_cache", Args: []byte{0x00}},
+ {Name: "write_result"},
+ {Name: "user_returned", Outs: intToBe32(0)},
+ },
+ },
+
+ {
+ name: "call evm contract",
+ contract: stylusMulticall,
+ args: evmCall,
+ want: []gethexec.HostioTraceInfo{
+ {Name: "user_entrypoint", Args: intToBe32(len(callArgs))},
+ {Name: "pay_for_memory_grow", Args: []byte{0x00, 0x01}},
+ {Name: "read_args", Outs: evmCall},
+ {
+ Name: "call_contract",
+ Args: append(evmMulticall.Bytes(), common.Hex2Bytes("ffffffffffffffff000000000000000000000000000000000000000000000000000000000000000000")...),
+ Outs: common.Hex2Bytes("0000000000"),
+ Address: &evmMulticall,
+ Steps: containers.NewStack[gethexec.HostioTraceInfo](),
+ },
+ {Name: "storage_flush_cache", Args: []byte{0x00}},
+ {Name: "write_result"},
+ {Name: "user_returned", Outs: intToBe32(0)},
+ },
+ },
+
+ {
+ name: "evm contract calling wasm",
+ contract: evmMulticall,
+ args: callArgs,
+ want: []gethexec.HostioTraceInfo{
+ {
+ Name: "evm_call_contract",
+ Address: &stylusMulticall,
+ Steps: (*containers.Stack[gethexec.HostioTraceInfo])(&[]gethexec.HostioTraceInfo{
+ {Name: "user_entrypoint", Args: intToBe32(1)},
+ {Name: "pay_for_memory_grow", Args: []byte{0x00, 0x01}},
+ {Name: "read_args", Outs: []byte{0x00}},
+ {Name: "storage_flush_cache", Args: []byte{0x00}},
+ {Name: "write_result"},
+ {Name: "user_returned", Outs: intToBe32(0)},
+ }),
+ },
+ },
+ },
+ } {
+ t.Run(testCase.name, func(t *testing.T) {
+ to := testCase.contract
+ tx := l2info.PrepareTxTo("Owner", &to, l2info.TransferGas, nil, testCase.args)
+ err := l2client.SendTransaction(ctx, tx)
+ Require(t, err, "send transaction")
+
+ nativeResult := traceTransaction(tx.Hash(), "stylusTracer")
+ normalizeHostioTrace(nativeResult)
+ if diff := cmp.Diff(testCase.want, nativeResult); diff != "" {
+ Fatal(t, "native tracer don't match wanted result", diff)
+ }
+
+ jsResult := traceTransaction(tx.Hash(), jsStylusTracer)
+ normalizeHostioTrace(jsResult)
+ if diff := cmp.Diff(jsResult, nativeResult); diff != "" {
+ Fatal(t, "native tracer don't match js trace", diff)
+ }
+ })
+ }
+}
+
+func intToBe32(v int) []byte {
+ // #nosec G115
+ return binary.BigEndian.AppendUint32(nil, uint32(v))
+}
+
+// normalize removes the start and end ink values from the trace so we can compare them.
+// In Arbitrum, the gas used by the transaction varies depending on the L1 fees, so the trace
+// returns different gas values and we can't hardcode them.
+func normalizeHostioTrace(trace []gethexec.HostioTraceInfo) {
+ for i := range trace {
+ trace[i].StartInk = 0
+ trace[i].EndInk = 0
+ if len(trace[i].Args) == 0 {
+ trace[i].Args = nil
+ }
+ if len(trace[i].Outs) == 0 {
+ trace[i].Outs = nil
+ }
+ if trace[i].Steps != nil {
+ normalizeHostioTrace(*trace[i].Steps)
+ }
+ }
+}
+
+var jsStylusTracer = `
+{
+ "hostio": function(info) {
+ info.args = toHex(info.args);
+ info.outs = toHex(info.outs);
+ if (this.nests.includes(info.name)) {
+ Object.assign(info, this.open.pop());
+ info.name = info.name.substring(4) // remove evm_
+ }
+ this.open.push(info);
+ },
+ "enter": function(frame) {
+ let inner = [];
+ let name = "";
+ switch (frame.getType()) {
+ case "CALL":
+ name = "evm_call_contract";
+ break;
+ case "DELEGATECALL":
+ name = "evm_delegate_call_contract";
+ break;
+ case "STATICCALL":
+ name = "evm_static_call_contract";
+ break;
+ case "CREATE":
+ name = "evm_create1";
+ break;
+ case "CREATE2":
+ name = "evm_create2";
+ break;
+ case "SELFDESTRUCT":
+ name = "evm_self_destruct";
+ break;
+ }
+ this.open.push({
+ address: toHex(frame.getTo()),
+ steps: inner,
+ name: name,
+ });
+ this.stack.push(this.open); // save where we were
+ this.open = inner;
+ },
+ "exit": function(result) {
+ this.open = this.stack.pop();
+ },
+ "result": function() { return this.open; },
+ "fault": function() { return this.open; },
+ stack: [],
+ open: [],
+ nests: ["call_contract", "delegate_call_contract", "static_call_contract"]
+}
+`
diff --git a/system_tests/twonodeslong_test.go b/system_tests/twonodeslong_test.go
index 83cd975dd8..60707b83fb 100644
--- a/system_tests/twonodeslong_test.go
+++ b/system_tests/twonodeslong_test.go
@@ -63,6 +63,7 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) {
builder.L2Info.GenerateAccount("ErrorTxSender")
builder.L2.SendWaitTestTransactions(t, []*types.Transaction{
+ // #nosec G115
builder.L2Info.PrepareTx("Faucet", "ErrorTxSender", builder.L2Info.TransferGas, big.NewInt(l2pricing.InitialBaseFeeWei*int64(builder.L2Info.TransferGas)), nil),
})
diff --git a/system_tests/unsupported_txtypes_test.go b/system_tests/unsupported_txtypes_test.go
index 4c3c8661c8..a228cb2454 100644
--- a/system_tests/unsupported_txtypes_test.go
+++ b/system_tests/unsupported_txtypes_test.go
@@ -112,8 +112,8 @@ func TestBlobAndInternalTxsAsDelayedMsgReject(t *testing.T) {
blocknum, err := builder.L2.Client.BlockNumber(ctx)
Require(t, err)
- for i := int64(0); i <= int64(blocknum); i++ {
- block, err := builder.L2.Client.BlockByNumber(ctx, big.NewInt(i))
+ for i := uint64(0); i <= blocknum; i++ {
+ block, err := builder.L2.Client.BlockByNumber(ctx, new(big.Int).SetUint64(i))
Require(t, err)
for _, tx := range block.Transactions() {
if _, ok := txAcceptStatus[tx.Hash()]; ok {
diff --git a/system_tests/validation_mock_test.go b/system_tests/validation_mock_test.go
index 2c6321d009..912b48ea6a 100644
--- a/system_tests/validation_mock_test.go
+++ b/system_tests/validation_mock_test.go
@@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
"github.com/offchainlabs/nitro/arbnode"
@@ -60,8 +61,8 @@ func (s *mockSpawner) WasmModuleRoots() ([]common.Hash, error) {
return mockWasmModuleRoots, nil
}
-func (s *mockSpawner) StylusArchs() []string {
- return []string{"mock"}
+func (s *mockSpawner) StylusArchs() []ethdb.WasmTarget {
+ return []ethdb.WasmTarget{"mock"}
}
func (s *mockSpawner) Launch(entry *validator.ValidationInput, moduleRoot common.Hash) validator.ValidationRun {
@@ -95,10 +96,6 @@ func (s *mockSpawner) LatestWasmModuleRoot() containers.PromiseInterface[common.
return containers.NewReadyPromise[common.Hash](mockWasmModuleRoots[0], nil)
}
-func (s *mockSpawner) WriteToFile(input *validator.ValidationInput, expOut validator.GoGlobalState, moduleRoot common.Hash) containers.PromiseInterface[struct{}] {
- return containers.NewReadyPromise[struct{}](struct{}{}, nil)
-}
-
type mockValRun struct {
containers.Promise[validator.GoGlobalState]
root common.Hash
diff --git a/system_tests/wrap_transaction_test.go b/system_tests/wrap_transaction_test.go
index bd561ad5e5..36052fb2db 100644
--- a/system_tests/wrap_transaction_test.go
+++ b/system_tests/wrap_transaction_test.go
@@ -15,6 +15,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/offchainlabs/nitro/arbutil"
@@ -22,7 +23,7 @@ import (
"github.com/offchainlabs/nitro/util/headerreader"
)
-func GetPendingBlockNumber(ctx context.Context, client arbutil.L1Interface) (*big.Int, error) {
+func GetPendingBlockNumber(ctx context.Context, client *ethclient.Client) (*big.Int, error) {
// Attempt to get the block number from ArbSys, if it exists
arbSys, err := precompilesgen.NewArbSys(common.BigToAddress(big.NewInt(100)), client)
if err != nil {
@@ -37,7 +38,7 @@ func GetPendingBlockNumber(ctx context.Context, client arbutil.L1Interface) (*bi
}
// Will wait until txhash is in the blockchain and return its receipt
-func WaitForTx(ctxinput context.Context, client arbutil.L1Interface, txhash common.Hash, timeout time.Duration) (*types.Receipt, error) {
+func WaitForTx(ctxinput context.Context, client *ethclient.Client, txhash common.Hash, timeout time.Duration) (*types.Receipt, error) {
ctx, cancel := context.WithTimeout(ctxinput, timeout)
defer cancel()
@@ -75,11 +76,11 @@ func WaitForTx(ctxinput context.Context, client arbutil.L1Interface, txhash comm
}
}
-func EnsureTxSucceeded(ctx context.Context, client arbutil.L1Interface, tx *types.Transaction) (*types.Receipt, error) {
+func EnsureTxSucceeded(ctx context.Context, client *ethclient.Client, tx *types.Transaction) (*types.Receipt, error) {
return EnsureTxSucceededWithTimeout(ctx, client, tx, time.Second*5)
}
-func EnsureTxSucceededWithTimeout(ctx context.Context, client arbutil.L1Interface, tx *types.Transaction, timeout time.Duration) (*types.Receipt, error) {
+func EnsureTxSucceededWithTimeout(ctx context.Context, client *ethclient.Client, tx *types.Transaction, timeout time.Duration) (*types.Receipt, error) {
receipt, err := WaitForTx(ctx, client, tx.Hash(), timeout)
if err != nil {
return nil, fmt.Errorf("waitFoxTx (tx=%s) got: %w", tx.Hash().Hex(), err)
@@ -103,12 +104,12 @@ func EnsureTxSucceededWithTimeout(ctx context.Context, client arbutil.L1Interfac
return receipt, arbutil.DetailTxError(ctx, client, tx, receipt)
}
-func EnsureTxFailed(t *testing.T, ctx context.Context, client arbutil.L1Interface, tx *types.Transaction) *types.Receipt {
+func EnsureTxFailed(t *testing.T, ctx context.Context, client *ethclient.Client, tx *types.Transaction) *types.Receipt {
t.Helper()
return EnsureTxFailedWithTimeout(t, ctx, client, tx, time.Second*5)
}
-func EnsureTxFailedWithTimeout(t *testing.T, ctx context.Context, client arbutil.L1Interface, tx *types.Transaction, timeout time.Duration) *types.Receipt {
+func EnsureTxFailedWithTimeout(t *testing.T, ctx context.Context, client *ethclient.Client, tx *types.Transaction, timeout time.Duration) *types.Receipt {
t.Helper()
receipt, err := WaitForTx(ctx, client, tx.Hash(), timeout)
Require(t, err)
diff --git a/util/arbmath/bips.go b/util/arbmath/bips.go
index 8b7c47d82b..39b014f3ac 100644
--- a/util/arbmath/bips.go
+++ b/util/arbmath/bips.go
@@ -20,36 +20,47 @@ func PercentToBips(percentage int64) Bips {
}
func BigToBips(natural *big.Int) Bips {
- return Bips(natural.Uint64())
+ return Bips(natural.Int64())
}
func BigMulByBips(value *big.Int, bips Bips) *big.Int {
return BigMulByFrac(value, int64(bips), int64(OneInBips))
}
+func BigMulByUBips(value *big.Int, bips UBips) *big.Int {
+ return BigMulByUFrac(value, uint64(bips), uint64(OneInUBips))
+}
+
func IntMulByBips(value int64, bips Bips) int64 {
return value * int64(bips) / int64(OneInBips)
}
+// UintMulByBips multiplies a uint value by a bips value
+// bips must be positive and not cause an overflow
func UintMulByBips(value uint64, bips Bips) uint64 {
+ // #nosec G115
return value * uint64(bips) / uint64(OneInBips)
}
-func SaturatingCastToBips(value uint64) Bips {
- return Bips(SaturatingCast[int64](value))
-}
-
-func (bips UBips) Uint64() uint64 {
- return uint64(bips)
+// UintSaturatingMulByBips multiplies a uint value by a bips value,
+// saturating at the maximum bips value (not the maximum uint64 result),
+// then rounding down and returning a uint64.
+// Returns 0 if bips is less than or equal to zero
+func UintSaturatingMulByBips(value uint64, bips Bips) uint64 {
+ if bips <= 0 {
+ return 0
+ }
+ // #nosec G115
+ return SaturatingUMul(value, uint64(bips)) / uint64(OneInBips)
}
-func (bips Bips) Uint64() uint64 {
- return uint64(bips)
+func SaturatingCastToBips(value uint64) Bips {
+ return Bips(SaturatingCast[int64](value))
}
// BigDivToBips returns dividend/divisor as bips, saturating if out of bounds
func BigDivToBips(dividend, divisor *big.Int) Bips {
value := BigMulByInt(dividend, int64(OneInBips))
value.Div(value, divisor)
- return Bips(BigToUintSaturating(value))
+ return Bips(BigToIntSaturating(value))
}
diff --git a/util/arbmath/math.go b/util/arbmath/math.go
index 62af1e26e0..07a9941b65 100644
--- a/util/arbmath/math.go
+++ b/util/arbmath/math.go
@@ -29,6 +29,7 @@ func NextOrCurrentPowerOf2(value uint64) uint64 {
// Log2ceil the log2 of the int, rounded up
func Log2ceil(value uint64) uint64 {
+ // #nosec G115
return uint64(64 - bits.LeadingZeros64(value))
}
@@ -117,6 +118,18 @@ func BigToUintSaturating(value *big.Int) uint64 {
return value.Uint64()
}
+// BigToUintSaturating casts a huge to an int, saturating if out of bounds
+func BigToIntSaturating(value *big.Int) int64 {
+ if !value.IsInt64() {
+ if value.Sign() < 0 {
+ return math.MinInt64
+ } else {
+ return math.MaxInt64
+ }
+ }
+ return value.Int64()
+}
+
// BigToUintOrPanic casts a huge to a uint, panicking if out of bounds
func BigToUintOrPanic(value *big.Int) uint64 {
if value.Sign() < 0 {
@@ -216,8 +229,8 @@ func BigMulByFrac(value *big.Int, numerator, denominator int64) *big.Int {
return value
}
-// BigMulByUfrac multiply a huge by a rational whose components are non-negative
-func BigMulByUfrac(value *big.Int, numerator, denominator uint64) *big.Int {
+// BigMulByUFrac multiply a huge by a rational whose components are non-negative
+func BigMulByUFrac(value *big.Int, numerator, denominator uint64) *big.Int {
value = new(big.Int).Set(value)
value.Mul(value, new(big.Int).SetUint64(numerator))
value.Div(value, new(big.Int).SetUint64(denominator))
@@ -260,10 +273,12 @@ func BigFloatMulByUint(multiplicand *big.Float, multiplier uint64) *big.Float {
}
func MaxSignedValue[T Signed]() T {
+ // #nosec G115
return T((uint64(1) << (8*unsafe.Sizeof(T(0)) - 1)) - 1)
}
func MinSignedValue[T Signed]() T {
+ // #nosec G115
return T(uint64(1) << ((8 * unsafe.Sizeof(T(0))) - 1))
}
@@ -393,6 +408,8 @@ func ApproxExpBasisPoints(value Bips, accuracy uint64) Bips {
if negative {
input = -value
}
+ // This cast is safe because input is always positive
+ // #nosec G115
x := uint64(input)
bips := uint64(OneInBips)
diff --git a/util/arbmath/math_test.go b/util/arbmath/math_test.go
index 1be60dc58b..3660f3657e 100644
--- a/util/arbmath/math_test.go
+++ b/util/arbmath/math_test.go
@@ -35,6 +35,7 @@ func TestMath(t *testing.T) {
input := rand.Uint64() / 256
approx := ApproxSquareRoot(input)
correct := math.Sqrt(float64(input))
+ // #nosec G115
diff := int(approx) - int(correct)
if diff < -1 || diff > 1 {
Fail(t, "sqrt approximation off by too much", diff, input, approx, correct)
@@ -43,9 +44,11 @@ func TestMath(t *testing.T) {
// try the first million sqrts
for i := 0; i < 1000000; i++ {
+ // #nosec G115
input := uint64(i)
approx := ApproxSquareRoot(input)
correct := math.Sqrt(float64(input))
+ // #nosec G115
diff := int(approx) - int(correct)
if diff < 0 || diff > 1 {
Fail(t, "sqrt approximation off by too much", diff, input, approx, correct)
@@ -57,6 +60,7 @@ func TestMath(t *testing.T) {
input := uint64(1 << i)
approx := ApproxSquareRoot(input)
correct := math.Sqrt(float64(input))
+ // #nosec G115
diff := int(approx) - int(correct)
if diff != 0 {
Fail(t, "incorrect", "2^", i, diff, approx, correct)
diff --git a/util/arbmath/uint24.go b/util/arbmath/uint24.go
index 818f871a23..a0c5aa27b7 100644
--- a/util/arbmath/uint24.go
+++ b/util/arbmath/uint24.go
@@ -9,10 +9,10 @@ import (
"math/big"
)
-const MaxUint24 = 1<<24 - 1 // 16777215
-
type Uint24 uint32
+const MaxUint24 = 1<<24 - 1 // 16777215
+
func (value Uint24) ToBig() *big.Int {
return UintToBig(uint64(value))
}
@@ -26,8 +26,9 @@ func (value Uint24) ToUint64() uint64 {
}
func IntToUint24[T uint32 | uint64](value T) (Uint24, error) {
+ // #nosec G115
if value > T(MaxUint24) {
- return Uint24(MaxUint24), errors.New("value out of range")
+ return MaxUint24, errors.New("value out of range")
}
return Uint24(value), nil
}
@@ -40,6 +41,7 @@ func BigToUint24OrPanic(value *big.Int) Uint24 {
if !value.IsUint64() || value.Uint64() > MaxUint24 {
panic("big.Int value exceeds the max Uint24")
}
+ // #nosec G115
return Uint24(value.Uint64())
}
diff --git a/util/blobs/blobs.go b/util/blobs/blobs.go
index 405c776bad..f5914edd2e 100644
--- a/util/blobs/blobs.go
+++ b/util/blobs/blobs.go
@@ -41,6 +41,7 @@ func fillBlobBits(blob []byte, data []byte) ([]byte, error) {
accBits += 8
data = data[1:]
}
+ // #nosec G115
blob[fieldElement*32] = uint8(acc & ((1 << spareBlobBits) - 1))
accBits -= spareBlobBits
if accBits < 0 {
@@ -88,6 +89,7 @@ func DecodeBlobs(blobs []kzg4844.Blob) ([]byte, error) {
acc |= uint16(blob[fieldIndex*32]) << accBits
accBits += spareBlobBits
if accBits >= 8 {
+ // #nosec G115
rlpData = append(rlpData, uint8(acc))
acc >>= 8
accBits -= 8
diff --git a/util/containers/stack.go b/util/containers/stack.go
new file mode 100644
index 0000000000..ea7f31013b
--- /dev/null
+++ b/util/containers/stack.go
@@ -0,0 +1,50 @@
+// Copyright 2024, Offchain Labs, Inc.
+// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE
+
+package containers
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/log"
+)
+
+type Stack[T any] []T
+
+func NewStack[T any]() *Stack[T] {
+ return &Stack[T]{}
+}
+
+func (s *Stack[T]) Push(v T) {
+ if s == nil {
+ log.Warn("trying to push nil stack")
+ return
+ }
+ *s = append(*s, v)
+}
+
+func (s *Stack[T]) Pop() (T, error) {
+ if s == nil {
+ var zeroVal T
+ return zeroVal, fmt.Errorf("trying to pop nil stack")
+ }
+ if s.Empty() {
+ var zeroVal T
+ return zeroVal, fmt.Errorf("trying to pop empty stack")
+ }
+ i := len(*s) - 1
+ val := (*s)[i]
+ *s = (*s)[:i]
+ return val, nil
+}
+
+func (s *Stack[T]) Empty() bool {
+ return s == nil || len(*s) == 0
+}
+
+func (s *Stack[T]) Len() int {
+ if s == nil {
+ return 0
+ }
+ return len(*s)
+}
diff --git a/util/dbutil/dbutil.go b/util/dbutil/dbutil.go
index a1eb6ce208..6573c5742c 100644
--- a/util/dbutil/dbutil.go
+++ b/util/dbutil/dbutil.go
@@ -5,8 +5,12 @@ package dbutil
import (
"errors"
+ "fmt"
+ "io/fs"
+ "regexp"
"github.com/cockroachdb/pebble"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/syndtr/goleveldb/leveldb"
)
@@ -14,3 +18,40 @@ import (
func IsErrNotFound(err error) bool {
return errors.Is(err, leveldb.ErrNotFound) || errors.Is(err, pebble.ErrNotFound) || errors.Is(err, memorydb.ErrMemorydbNotFound)
}
+
+var pebbleNotExistErrorRegex = regexp.MustCompile("pebble: database .* does not exist")
+
+func isPebbleNotExistError(err error) bool {
+ return err != nil && pebbleNotExistErrorRegex.MatchString(err.Error())
+}
+
+func isLeveldbNotExistError(err error) bool {
+ return errors.Is(err, fs.ErrNotExist)
+}
+
+// IsNotExistError returns true if the error is a "database not found" error.
+// It must return false if err is nil.
+func IsNotExistError(err error) bool {
+ return isLeveldbNotExistError(err) || isPebbleNotExistError(err)
+}
+
+var unfinishedConversionCanaryKey = []byte("unfinished-conversion-canary-key")
+
+func PutUnfinishedConversionCanary(db ethdb.KeyValueStore) error {
+ return db.Put(unfinishedConversionCanaryKey, []byte{1})
+}
+
+func DeleteUnfinishedConversionCanary(db ethdb.KeyValueStore) error {
+ return db.Delete(unfinishedConversionCanaryKey)
+}
+
+func UnfinishedConversionCheck(db ethdb.KeyValueStore) error {
+ unfinished, err := db.Has(unfinishedConversionCanaryKey)
+ if err != nil {
+ return fmt.Errorf("Failed to check UnfinishedConversionCanaryKey existence: %w", err)
+ }
+ if unfinished {
+ return errors.New("Unfinished conversion canary key detected")
+ }
+ return nil
+}
diff --git a/util/dbutil/dbutil_test.go b/util/dbutil/dbutil_test.go
new file mode 100644
index 0000000000..b303bb56b6
--- /dev/null
+++ b/util/dbutil/dbutil_test.go
@@ -0,0 +1,49 @@
+package dbutil
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/node"
+)
+
+func testIsNotExistError(t *testing.T, dbEngine string, isNotExist func(error) bool) {
+ stackConf := node.DefaultConfig
+ stackConf.DataDir = t.TempDir()
+ stackConf.DBEngine = dbEngine
+ stack, err := node.New(&stackConf)
+ if err != nil {
+ t.Fatalf("Failed to created test stack: %v", err)
+ }
+ defer stack.Close()
+ readonly := true
+ _, err = stack.OpenDatabaseWithExtraOptions("test", 16, 16, "", readonly, nil)
+ if err == nil {
+ t.Fatal("Opening non-existent database did not fail")
+ }
+ if !isNotExist(err) {
+ t.Fatalf("Failed to classify error as not exist error - internal implementation of OpenDatabaseWithExtraOptions might have changed, err: %v", err)
+ }
+ err = errors.New("some other error")
+ if isNotExist(err) {
+ t.Fatalf("Classified other error as not exist, err: %v", err)
+ }
+ if isNotExist(nil) {
+ t.Fatal("Classified nil as not exist")
+ }
+}
+
+func TestIsNotExistError(t *testing.T) {
+ t.Run("TestIsPebbleNotExistError", func(t *testing.T) {
+ testIsNotExistError(t, "pebble", isPebbleNotExistError)
+ })
+ t.Run("TestIsLeveldbNotExistError", func(t *testing.T) {
+ testIsNotExistError(t, "leveldb", isLeveldbNotExistError)
+ })
+ t.Run("TestIsNotExistErrorWithPebble", func(t *testing.T) {
+ testIsNotExistError(t, "pebble", IsNotExistError)
+ })
+ t.Run("TestIsNotExistErrorWithLeveldb", func(t *testing.T) {
+ testIsNotExistError(t, "leveldb", IsNotExistError)
+ })
+}
diff --git a/util/headerreader/blob_client.go b/util/headerreader/blob_client.go
index 2b47a940c3..4831994bba 100644
--- a/util/headerreader/blob_client.go
+++ b/util/headerreader/blob_client.go
@@ -18,8 +18,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
- "github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/util/blobs"
"github.com/offchainlabs/nitro/util/jsonapi"
"github.com/offchainlabs/nitro/util/pretty"
@@ -28,7 +28,7 @@ import (
)
type BlobClient struct {
- ec arbutil.L1Interface
+ ec *ethclient.Client
beaconUrl *url.URL
secondaryBeaconUrl *url.URL
httpClient *http.Client
@@ -63,7 +63,7 @@ func BlobClientAddOptions(prefix string, f *pflag.FlagSet) {
f.String(prefix+".authorization", DefaultBlobClientConfig.Authorization, "Value to send with the HTTP Authorization: header for Beacon REST requests, must include both scheme and scheme parameters")
}
-func NewBlobClient(config BlobClientConfig, ec arbutil.L1Interface) (*BlobClient, error) {
+func NewBlobClient(config BlobClientConfig, ec *ethclient.Client) (*BlobClient, error) {
beaconUrl, err := url.Parse(config.BeaconUrl)
if err != nil {
return nil, fmt.Errorf("failed to parse beacon chain URL: %w", err)
@@ -191,6 +191,7 @@ func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHas
rawData, err := beaconRequest[json.RawMessage](b, ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot))
if err != nil || len(rawData) == 0 {
// blobs are pruned after 4096 epochs (1 epoch = 32 slots), we determine if the requested slot were to be pruned by a non-archive endpoint
+ // #nosec G115
roughAgeOfSlot := uint64(time.Now().Unix()) - (b.genesisTime + slot*b.secondsPerSlot)
if roughAgeOfSlot > b.secondsPerSlot*32*4096 {
return nil, fmt.Errorf("beacon client in blobSidecars got error or empty response fetching older blobs in slot: %d, an archive endpoint is required, please refer to https://docs.arbitrum.io/run-arbitrum-node/l1-ethereum-beacon-chain-rpc-providers, err: %w", slot, err)
diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go
index 074d24338e..98f778dee8 100644
--- a/util/headerreader/header_reader.go
+++ b/util/headerreader/header_reader.go
@@ -16,6 +16,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/offchainlabs/nitro/arbutil"
@@ -33,7 +34,7 @@ type ArbSysInterface interface {
type HeaderReader struct {
stopwaiter.StopWaiter
config ConfigFetcher
- client arbutil.L1Interface
+ client *ethclient.Client
isParentChainArbitrum bool
arbSys ArbSysInterface
@@ -120,7 +121,7 @@ var TestConfig = Config{
},
}
-func New(ctx context.Context, client arbutil.L1Interface, config ConfigFetcher, arbSysPrecompile ArbSysInterface) (*HeaderReader, error) {
+func New(ctx context.Context, client *ethclient.Client, config ConfigFetcher, arbSysPrecompile ArbSysInterface) (*HeaderReader, error) {
isParentChainArbitrum := false
var arbSys ArbSysInterface
if arbSysPrecompile != nil {
@@ -340,6 +341,7 @@ func (s *HeaderReader) logIfHeaderIsOld() {
if storedHeader == nil {
return
}
+ // #nosec G115
l1Timetamp := time.Unix(int64(storedHeader.Time), 0)
headerTime := time.Since(l1Timetamp)
if headerTime >= s.config().OldHeaderTimeout {
@@ -521,7 +523,7 @@ func (s *HeaderReader) LatestFinalizedBlockNr(ctx context.Context) (uint64, erro
return header.Number.Uint64(), nil
}
-func (s *HeaderReader) Client() arbutil.L1Interface {
+func (s *HeaderReader) Client() *ethclient.Client {
return s.client
}
diff --git a/util/merkletree/merkleEventProof_test.go b/util/merkletree/merkleEventProof_test.go
index b64cc88c2a..6af8479190 100644
--- a/util/merkletree/merkleEventProof_test.go
+++ b/util/merkletree/merkleEventProof_test.go
@@ -22,6 +22,7 @@ func initializedMerkleAccumulatorForTesting() *merkleAccumulator.MerkleAccumulat
func TestProofForNext(t *testing.T) {
leaves := make([]common.Hash, 13)
for i := range leaves {
+ // #nosec G115
leaves[i] = pseudorandomForTesting(uint64(i))
}
diff --git a/util/merkletree/merkleTree.go b/util/merkletree/merkleTree.go
index 1b15d51d98..fffa9bcabc 100644
--- a/util/merkletree/merkleTree.go
+++ b/util/merkletree/merkleTree.go
@@ -43,8 +43,8 @@ func NewLevelAndLeaf(level, leaf uint64) LevelAndLeaf {
func (place LevelAndLeaf) ToBigInt() *big.Int {
return new(big.Int).Add(
- new(big.Int).Lsh(big.NewInt(int64(place.Level)), 192),
- big.NewInt(int64(place.Leaf)),
+ new(big.Int).Lsh(new(big.Int).SetUint64(place.Level), 192),
+ new(big.Int).SetUint64(place.Leaf),
)
}
diff --git a/util/redisutil/redis_coordinator.go b/util/redisutil/redis_coordinator.go
index 59e3b0e0f9..2c12ffec50 100644
--- a/util/redisutil/redis_coordinator.go
+++ b/util/redisutil/redis_coordinator.go
@@ -13,12 +13,13 @@ import (
"github.com/offchainlabs/nitro/arbutil"
)
-const CHOSENSEQ_KEY string = "coordinator.chosen" // Never overwritten. Expires or released only
-const MSG_COUNT_KEY string = "coordinator.msgCount" // Only written by sequencer holding CHOSEN key
-const PRIORITIES_KEY string = "coordinator.priorities" // Read only
-const WANTS_LOCKOUT_KEY_PREFIX string = "coordinator.liveliness." // Per server. Only written by self
-const MESSAGE_KEY_PREFIX string = "coordinator.msg." // Per Message. Only written by sequencer holding CHOSEN
-const SIGNATURE_KEY_PREFIX string = "coordinator.msg.sig." // Per Message. Only written by sequencer holding CHOSEN
+const CHOSENSEQ_KEY string = "coordinator.chosen" // Never overwritten. Expires or released only
+const MSG_COUNT_KEY string = "coordinator.msgCount" // Only written by sequencer holding CHOSEN key
+const FINALIZED_MSG_COUNT_KEY string = "coordinator.finalizedMsgCount" // Only written by sequencer holding CHOSEN key
+const PRIORITIES_KEY string = "coordinator.priorities" // Read only
+const WANTS_LOCKOUT_KEY_PREFIX string = "coordinator.liveliness." // Per server. Only written by self
+const MESSAGE_KEY_PREFIX string = "coordinator.msg." // Per Message. Only written by sequencer holding CHOSEN
+const SIGNATURE_KEY_PREFIX string = "coordinator.msg.sig." // Per Message. Only written by sequencer holding CHOSEN
const WANTS_LOCKOUT_VAL string = "OK"
const INVALID_VAL string = "INVALID"
const INVALID_URL string = ""
diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go
index be5825a28d..a35d4b6665 100644
--- a/util/rpcclient/rpcclient.go
+++ b/util/rpcclient/rpcclient.go
@@ -101,7 +101,7 @@ func (c *RpcClient) Close() {
}
type limitedMarshal struct {
- limit int
+ limit uint
value any
}
@@ -113,16 +113,18 @@ func (m limitedMarshal) String() string {
} else {
str = string(marshalled)
}
- if m.limit == 0 || len(str) <= m.limit {
+ // #nosec G115
+ limit := int(m.limit)
+ if m.limit <= 0 || len(str) <= limit {
return str
}
prefix := str[:m.limit/2-1]
- postfix := str[len(str)-m.limit/2+1:]
+ postfix := str[len(str)-limit/2+1:]
return fmt.Sprintf("%v..%v", prefix, postfix)
}
type limitedArgumentsMarshal struct {
- limit int
+ limit uint
args []any
}
@@ -162,9 +164,9 @@ func (c *RpcClient) CallContext(ctx_in context.Context, result interface{}, meth
return errors.New("not connected")
}
logId := c.logId.Add(1)
- log.Trace("sending RPC request", "method", method, "logId", logId, "args", limitedArgumentsMarshal{int(c.config().ArgLogLimit), args})
+ log.Trace("sending RPC request", "method", method, "logId", logId, "args", limitedArgumentsMarshal{c.config().ArgLogLimit, args})
var err error
- for i := 0; i < int(c.config().Retries)+1; i++ {
+ for i := uint(0); i < c.config().Retries+1; i++ {
retryDelay := c.config().RetryDelay
if i > 0 && retryDelay > 0 {
select {
@@ -188,7 +190,7 @@ func (c *RpcClient) CallContext(ctx_in context.Context, result interface{}, meth
cancelCtx()
logger := log.Trace
- limit := int(c.config().ArgLogLimit)
+ limit := c.config().ArgLogLimit
if err != nil && !IsAlreadyKnownError(err) {
logger = log.Info
}
diff --git a/util/sharedmetrics/sharedmetrics.go b/util/sharedmetrics/sharedmetrics.go
index 377eef5352..9b4b3609bc 100644
--- a/util/sharedmetrics/sharedmetrics.go
+++ b/util/sharedmetrics/sharedmetrics.go
@@ -11,8 +11,10 @@ var (
)
func UpdateSequenceNumberGauge(sequenceNumber arbutil.MessageIndex) {
+ // #nosec G115
latestSequenceNumberGauge.Update(int64(sequenceNumber))
}
func UpdateSequenceNumberInBlockGauge(sequenceNumber arbutil.MessageIndex) {
+ // #nosec G115
sequenceNumberInBlockGauge.Update(int64(sequenceNumber))
}
diff --git a/util/testhelpers/testhelpers.go b/util/testhelpers/testhelpers.go
index b1b08708e7..d681b422bf 100644
--- a/util/testhelpers/testhelpers.go
+++ b/util/testhelpers/testhelpers.go
@@ -65,6 +65,7 @@ func RandomCallValue(limit int64) *big.Int {
// Computes a psuedo-random uint64 on the interval [min, max]
func RandomUint32(min, max uint32) uint32 {
+ //#nosec G115
return uint32(RandomUint64(uint64(min), uint64(max)))
}
diff --git a/validator/client/redis/producer.go b/validator/client/redis/producer.go
index b3ad0f8839..adc2f34af5 100644
--- a/validator/client/redis/producer.go
+++ b/validator/client/redis/producer.go
@@ -6,6 +6,8 @@ import (
"sync/atomic"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/go-redis/redis/v8"
"github.com/offchainlabs/nitro/pubsub"
@@ -32,11 +34,20 @@ func (c ValidationClientConfig) Enabled() bool {
return c.RedisURL != ""
}
+func (c ValidationClientConfig) Validate() error {
+ for _, arch := range c.StylusArchs {
+ if !rawdb.IsSupportedWasmTarget(ethdb.WasmTarget(arch)) {
+ return fmt.Errorf("Invalid stylus arch: %v", arch)
+ }
+ }
+ return nil
+}
+
var DefaultValidationClientConfig = ValidationClientConfig{
Name: "redis validation client",
Room: 2,
RedisURL: "",
- StylusArchs: []string{"wavm"},
+ StylusArchs: []string{string(rawdb.TargetWavm)},
ProducerConfig: pubsub.DefaultProducerConfig,
CreateStreams: true,
}
@@ -46,7 +57,7 @@ var TestValidationClientConfig = ValidationClientConfig{
Room: 2,
RedisURL: "",
StreamPrefix: "test-",
- StylusArchs: []string{"wavm"},
+ StylusArchs: []string{string(rawdb.TargetWavm)},
ProducerConfig: pubsub.TestProducerConfig,
CreateStreams: false,
}
@@ -152,8 +163,12 @@ func (c *ValidationClient) Name() string {
return c.config.Name
}
-func (c *ValidationClient) StylusArchs() []string {
- return c.config.StylusArchs
+func (c *ValidationClient) StylusArchs() []ethdb.WasmTarget {
+ stylusArchs := make([]ethdb.WasmTarget, 0, len(c.config.StylusArchs))
+ for _, arch := range c.config.StylusArchs {
+ stylusArchs = append(stylusArchs, ethdb.WasmTarget(arch))
+ }
+ return stylusArchs
}
func (c *ValidationClient) Room() int {
diff --git a/validator/client/validation_client.go b/validator/client/validation_client.go
index 05d947db3d..934362f00a 100644
--- a/validator/client/validation_client.go
+++ b/validator/client/validation_client.go
@@ -8,7 +8,6 @@ import (
"encoding/base64"
"errors"
"fmt"
- "runtime"
"sync/atomic"
"time"
@@ -22,6 +21,8 @@ import (
"github.com/offchainlabs/nitro/validator/server_common"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
@@ -31,7 +32,7 @@ type ValidationClient struct {
stopwaiter.StopWaiter
client *rpcclient.RpcClient
name string
- stylusArchs []string
+ stylusArchs []ethdb.WasmTarget
room atomic.Int32
wasmModuleRoots []common.Hash
}
@@ -40,7 +41,7 @@ func NewValidationClient(config rpcclient.ClientConfigFetcher, stack *node.Node)
return &ValidationClient{
client: rpcclient.NewRpcClient(config, stack),
name: "not started",
- stylusArchs: []string{"not started"},
+ stylusArchs: []ethdb.WasmTarget{"not started"},
}
}
@@ -67,20 +68,20 @@ func (c *ValidationClient) Start(ctx context.Context) error {
if len(name) == 0 {
return errors.New("couldn't read name from server")
}
- var stylusArchs []string
+ var stylusArchs []ethdb.WasmTarget
if err := c.client.CallContext(ctx, &stylusArchs, server_api.Namespace+"_stylusArchs"); err != nil {
var rpcError rpc.Error
ok := errors.As(err, &rpcError)
if !ok || rpcError.ErrorCode() != -32601 {
return fmt.Errorf("could not read stylus arch from server: %w", err)
}
- stylusArchs = []string{"pre-stylus"} // validation does not support stylus
+ stylusArchs = []ethdb.WasmTarget{ethdb.WasmTarget("pre-stylus")} // invalid, will fail if trying to validate block with stylus
} else {
if len(stylusArchs) == 0 {
return fmt.Errorf("could not read stylus archs from validation server")
}
for _, stylusArch := range stylusArchs {
- if stylusArch != "wavm" && stylusArch != runtime.GOARCH && stylusArch != "mock" {
+ if !rawdb.IsSupportedWasmTarget(ethdb.WasmTarget(stylusArch)) && stylusArch != "mock" {
return fmt.Errorf("unsupported stylus architecture: %v", stylusArch)
}
}
@@ -102,6 +103,7 @@ func (c *ValidationClient) Start(ctx context.Context) error {
} else {
log.Info("connected to validation server", "name", name, "room", room)
}
+ // #nosec G115
c.room.Store(int32(room))
c.wasmModuleRoots = moduleRoots
c.name = name
@@ -117,11 +119,11 @@ func (c *ValidationClient) WasmModuleRoots() ([]common.Hash, error) {
return nil, errors.New("not started")
}
-func (c *ValidationClient) StylusArchs() []string {
+func (c *ValidationClient) StylusArchs() []ethdb.WasmTarget {
if c.Started() {
return c.stylusArchs
}
- return []string{"not started"}
+ return []ethdb.WasmTarget{"not started"}
}
func (c *ValidationClient) Stop() {
@@ -186,19 +188,6 @@ func (c *ExecutionClient) LatestWasmModuleRoot() containers.PromiseInterface[com
})
}
-func (c *ExecutionClient) WriteToFile(input *validator.ValidationInput, expOut validator.GoGlobalState, moduleRoot common.Hash) containers.PromiseInterface[struct{}] {
- jsonInput := server_api.ValidationInputToJson(input)
- if err := jsonInput.WriteToFile(); err != nil {
- return stopwaiter.LaunchPromiseThread[struct{}](c, func(ctx context.Context) (struct{}, error) {
- return struct{}{}, err
- })
- }
- return stopwaiter.LaunchPromiseThread[struct{}](c, func(ctx context.Context) (struct{}, error) {
- err := c.client.CallContext(ctx, nil, server_api.Namespace+"_writeToFile", jsonInput, expOut, moduleRoot)
- return struct{}{}, err
- })
-}
-
func (r *ExecutionClientRun) SendKeepAlive(ctx context.Context) time.Duration {
err := r.client.client.CallContext(ctx, nil, server_api.Namespace+"_execKeepAlive", r.id)
if err != nil {
diff --git a/validator/inputs/writer.go b/validator/inputs/writer.go
new file mode 100644
index 0000000000..a45e584f52
--- /dev/null
+++ b/validator/inputs/writer.go
@@ -0,0 +1,141 @@
+package inputs
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "time"
+
+ "github.com/offchainlabs/nitro/validator/server_api"
+)
+
+// Writer is a configurable writer of InputJSON files.
+//
+// The default Writer will write to a path like:
+//
+// $HOME/.arbuitrum/validation-inputs//block_inputs_.json
+//
+// The path can be nested under a slug directory so callers can provide a
+// recognizable name to differentiate various contexts in which the InputJSON
+// is being written. If the Writer is configured by calling SetSlug, then the
+// path will be like:
+//
+// $HOME/.arbuitrum/validation-inputs///block_inputs_.json
+//
+// The inclusion of a timestamp directory is on by default to avoid conflicts which
+// would result in files being overwritten. However, the Writer can be configured
+// to not use a timestamp directory. If the Writer is configured by calling
+// SetUseTimestampDir(false), then the path will be like:
+//
+// $HOME/.arbuitrum/validation-inputs//block_inputs_.json
+//
+// Finally, to give complete control to the clients, the base directory can be
+// set directly with SetBaseDir. In which case, the path will be like:
+//
+// /block_inputs_.json
+// or
+// //block_inputs_.json
+// or
+// ///block_inputs_.json
+type Writer struct {
+ clock Clock
+ baseDir string
+ slug string
+ useTimestampDir bool
+}
+
+// WriterOption is a function that configures a Writer.
+type WriterOption func(*Writer)
+
+// Clock is an interface for getting the current time.
+type Clock interface {
+ Now() time.Time
+}
+
+type realClock struct{}
+
+func (realClock) Now() time.Time {
+ return time.Now()
+}
+
+// NewWriter creates a new Writer with default settings.
+func NewWriter(options ...WriterOption) (*Writer, error) {
+ homeDir, err := os.UserHomeDir()
+ if err != nil {
+ return nil, err
+ }
+ baseDir := filepath.Join(homeDir, ".arbitrum", "validation-inputs")
+ w := &Writer{
+ clock: realClock{},
+ baseDir: baseDir,
+ slug: "",
+ useTimestampDir: true,
+ }
+ for _, o := range options {
+ o(w)
+ }
+ return w, nil
+}
+
+// withTestClock configures the Writer to use the given clock.
+//
+// This is only intended for testing.
+func withTestClock(clock Clock) WriterOption {
+ return func(w *Writer) {
+ w.clock = clock
+ }
+}
+
+// WithSlug configures the Writer to use the given slug as a directory name.
+func WithSlug(slug string) WriterOption {
+ return func(w *Writer) {
+ w.slug = slug
+ }
+}
+
+// WithoutSlug clears the slug configuration.
+//
+// This is equivalent to the WithSlug("") option but is more readable.
+func WithoutSlug() WriterOption {
+ return WithSlug("")
+}
+
+// WithBaseDir configures the Writer to use the given base directory.
+func WithBaseDir(baseDir string) WriterOption {
+ return func(w *Writer) {
+ w.baseDir = baseDir
+ }
+}
+
+// WithTimestampDirEnabled controls the addition of a timestamp directory.
+func WithTimestampDirEnabled(useTimestampDir bool) WriterOption {
+ return func(w *Writer) {
+ w.useTimestampDir = useTimestampDir
+ }
+}
+
+// Write writes the given InputJSON to a file in JSON format.
+func (w *Writer) Write(json *server_api.InputJSON) error {
+ dir := w.baseDir
+ if w.slug != "" {
+ dir = filepath.Join(dir, w.slug)
+ }
+ if w.useTimestampDir {
+ t := w.clock.Now()
+ tStr := t.Format("20060102_150405")
+ dir = filepath.Join(dir, tStr)
+ }
+ if err := os.MkdirAll(dir, 0700); err != nil {
+ return err
+ }
+ contents, err := json.Marshal()
+ if err != nil {
+ return err
+ }
+ if err = os.WriteFile(
+ filepath.Join(dir, fmt.Sprintf("block_inputs_%d.json", json.Id)),
+ contents, 0600); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/validator/inputs/writer_test.go b/validator/inputs/writer_test.go
new file mode 100644
index 0000000000..59cb63dae7
--- /dev/null
+++ b/validator/inputs/writer_test.go
@@ -0,0 +1,92 @@
+package inputs
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/offchainlabs/nitro/validator/server_api"
+)
+
+func TestDefaultBaseDir(t *testing.T) {
+ // Simply testing that the default baseDir is set relative to the user's home directory.
+ // This way, the other tests can all override the baseDir to a temporary directory.
+ w, err := NewWriter()
+ if err != nil {
+ t.Fatal(err)
+ }
+ homeDir, err := os.UserHomeDir()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if w.baseDir != homeDir+"/.arbitrum/validation-inputs" {
+ t.Errorf("unexpected baseDir: %v", w.baseDir)
+ }
+}
+
+type fakeClock struct {
+ now time.Time
+}
+
+func (c fakeClock) Now() time.Time {
+ return c.now
+}
+
+func TestWriting(t *testing.T) {
+ dir := t.TempDir()
+ w, err := NewWriter(
+ withTestClock(fakeClock{now: time.Date(2021, 1, 2, 3, 4, 5, 0, time.UTC)}),
+ WithBaseDir(dir),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = w.Write(&server_api.InputJSON{Id: 24601})
+ if err != nil {
+ t.Fatal(err)
+ }
+ // The file should exist.
+ if _, err := os.Stat(dir + "/20210102_030405/block_inputs_24601.json"); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestWritingWithSlug(t *testing.T) {
+ dir := t.TempDir()
+ w, err := NewWriter(
+ withTestClock(fakeClock{now: time.Date(2021, 1, 2, 3, 4, 5, 0, time.UTC)}),
+ WithBaseDir(dir),
+ WithSlug("foo"),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = w.Write(&server_api.InputJSON{Id: 24601})
+ if err != nil {
+ t.Fatal(err)
+ }
+ // The file should exist.
+ if _, err := os.Stat(dir + "/foo/20210102_030405/block_inputs_24601.json"); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestWritingWithoutTimestampDir(t *testing.T) {
+ dir := t.TempDir()
+ w, err := NewWriter(
+ withTestClock(fakeClock{now: time.Date(2021, 1, 2, 3, 4, 5, 0, time.UTC)}),
+ WithBaseDir(dir),
+ WithTimestampDirEnabled(false),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = w.Write(&server_api.InputJSON{Id: 24601})
+ if err != nil {
+ t.Fatal(err)
+ }
+ // The file should exist.
+ if _, err := os.Stat(dir + "/block_inputs_24601.json"); err != nil {
+ t.Error(err)
+ }
+}
diff --git a/validator/interface.go b/validator/interface.go
index 80aa2c1fcc..9fb831ca0d 100644
--- a/validator/interface.go
+++ b/validator/interface.go
@@ -4,6 +4,7 @@ import (
"context"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/offchainlabs/nitro/util/containers"
)
@@ -13,7 +14,7 @@ type ValidationSpawner interface {
Start(context.Context) error
Stop()
Name() string
- StylusArchs() []string
+ StylusArchs() []ethdb.WasmTarget
Room() int
}
@@ -26,7 +27,6 @@ type ExecutionSpawner interface {
ValidationSpawner
CreateExecutionRun(wasmModuleRoot common.Hash, input *ValidationInput) containers.PromiseInterface[ExecutionRun]
LatestWasmModuleRoot() containers.PromiseInterface[common.Hash]
- WriteToFile(input *ValidationInput, expOut GoGlobalState, moduleRoot common.Hash) containers.PromiseInterface[struct{}]
}
type ExecutionRun interface {
diff --git a/validator/server_api/json.go b/validator/server_api/json.go
index 90746e4c57..8dfbc8446a 100644
--- a/validator/server_api/json.go
+++ b/validator/server_api/json.go
@@ -8,9 +8,9 @@ import (
"encoding/json"
"errors"
"fmt"
- "os"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/offchainlabs/nitro/arbcompress"
"github.com/offchainlabs/nitro/arbutil"
@@ -63,19 +63,13 @@ type InputJSON struct {
BatchInfo []BatchInfoJson
DelayedMsgB64 string
StartState validator.GoGlobalState
- UserWasms map[string]map[common.Hash]string
+ UserWasms map[ethdb.WasmTarget]map[common.Hash]string
DebugChain bool
}
-func (i *InputJSON) WriteToFile() error {
- contents, err := json.MarshalIndent(i, "", " ")
- if err != nil {
- return err
- }
- if err = os.WriteFile(fmt.Sprintf("block_inputs_%d.json", i.Id), contents, 0600); err != nil {
- return err
- }
- return nil
+// Marshal returns the JSON encoding of the InputJSON.
+func (i *InputJSON) Marshal() ([]byte, error) {
+ return json.MarshalIndent(i, "", " ")
}
type BatchInfoJson struct {
@@ -95,14 +89,14 @@ func ValidationInputToJson(entry *validator.ValidationInput) *InputJSON {
DelayedMsgB64: base64.StdEncoding.EncodeToString(entry.DelayedMsg),
StartState: entry.StartState,
PreimagesB64: jsonPreimagesMap,
- UserWasms: make(map[string]map[common.Hash]string),
+ UserWasms: make(map[ethdb.WasmTarget]map[common.Hash]string),
DebugChain: entry.DebugChain,
}
for _, binfo := range entry.BatchInfo {
encData := base64.StdEncoding.EncodeToString(binfo.Data)
res.BatchInfo = append(res.BatchInfo, BatchInfoJson{Number: binfo.Number, DataB64: encData})
}
- for arch, wasms := range entry.UserWasms {
+ for target, wasms := range entry.UserWasms {
archWasms := make(map[common.Hash]string)
for moduleHash, data := range wasms {
compressed, err := arbcompress.CompressLevel(data, 1)
@@ -111,7 +105,7 @@ func ValidationInputToJson(entry *validator.ValidationInput) *InputJSON {
}
archWasms[moduleHash] = base64.StdEncoding.EncodeToString(compressed)
}
- res.UserWasms[arch] = archWasms
+ res.UserWasms[target] = archWasms
}
return res
}
@@ -127,7 +121,7 @@ func ValidationInputFromJson(entry *InputJSON) (*validator.ValidationInput, erro
DelayedMsgNr: entry.DelayedMsgNr,
StartState: entry.StartState,
Preimages: preimages,
- UserWasms: make(map[string]map[common.Hash][]byte),
+ UserWasms: make(map[ethdb.WasmTarget]map[common.Hash][]byte),
DebugChain: entry.DebugChain,
}
delayed, err := base64.StdEncoding.DecodeString(entry.DelayedMsgB64)
@@ -146,7 +140,7 @@ func ValidationInputFromJson(entry *InputJSON) (*validator.ValidationInput, erro
}
valInput.BatchInfo = append(valInput.BatchInfo, decInfo)
}
- for arch, wasms := range entry.UserWasms {
+ for target, wasms := range entry.UserWasms {
archWasms := make(map[common.Hash][]byte)
for moduleHash, encoded := range wasms {
decoded, err := base64.StdEncoding.DecodeString(encoded)
@@ -171,7 +165,7 @@ func ValidationInputFromJson(entry *InputJSON) (*validator.ValidationInput, erro
}
archWasms[moduleHash] = uncompressed
}
- valInput.UserWasms[arch] = archWasms
+ valInput.UserWasms[target] = archWasms
}
return valInput, nil
}
diff --git a/validator/server_arb/execution_run_test.go b/validator/server_arb/execution_run_test.go
index bdc1eefc4d..479db58515 100644
--- a/validator/server_arb/execution_run_test.go
+++ b/validator/server_arb/execution_run_test.go
@@ -194,7 +194,7 @@ func Test_machineHashesWithStep(t *testing.T) {
Batch: 1,
PosInBatch: mm.totalSteps - 1,
}))
- if len(hashes) >= int(maxIterations) {
+ if uint64(len(hashes)) >= maxIterations {
t.Fatal("Wanted fewer hashes than the max iterations")
}
for i := range hashes {
diff --git a/validator/server_arb/machine.go b/validator/server_arb/machine.go
index adca9695e2..1e73e6b212 100644
--- a/validator/server_arb/machine.go
+++ b/validator/server_arb/machine.go
@@ -4,7 +4,7 @@
package server_arb
/*
-#cgo CFLAGS: -g -Wall -I../../target/include/
+#cgo CFLAGS: -g -I../../target/include/
#include "arbitrator.h"
ResolvedPreimage preimageResolverC(size_t context, uint8_t preimageType, const uint8_t* hash);
diff --git a/validator/server_arb/machine_cache.go b/validator/server_arb/machine_cache.go
index 23fcdef6d6..35f3406236 100644
--- a/validator/server_arb/machine_cache.go
+++ b/validator/server_arb/machine_cache.go
@@ -31,7 +31,7 @@ type MachineCache struct {
}
type MachineCacheConfig struct {
- CachedChallengeMachines int `koanf:"cached-challenge-machines"`
+ CachedChallengeMachines uint64 `koanf:"cached-challenge-machines"`
InitialSteps uint64 `koanf:"initial-steps"`
}
@@ -42,7 +42,7 @@ var DefaultMachineCacheConfig = MachineCacheConfig{
func MachineCacheConfigConfigAddOptions(prefix string, f *flag.FlagSet) {
f.Uint64(prefix+".initial-steps", DefaultMachineCacheConfig.InitialSteps, "initial steps between machines")
- f.Int(prefix+".cached-challenge-machines", DefaultMachineCacheConfig.CachedChallengeMachines, "how many machines to store in cache while working on a challenge (should be even)")
+ f.Uint64(prefix+".cached-challenge-machines", DefaultMachineCacheConfig.CachedChallengeMachines, "how many machines to store in cache while working on a challenge (should be even)")
}
// `initialMachine` won't be mutated by this function.
@@ -140,7 +140,7 @@ func (c *MachineCache) unlockBuild(err error) {
}
func (c *MachineCache) setRangeLocked(ctx context.Context, start uint64, end uint64) error {
- newInterval := (end - start) / uint64(c.config.CachedChallengeMachines)
+ newInterval := (end - start) / c.config.CachedChallengeMachines
if newInterval == 0 {
newInterval = 2
}
@@ -150,7 +150,7 @@ func (c *MachineCache) setRangeLocked(ctx context.Context, start uint64, end uin
if end >= c.finalMachineStep {
end = c.finalMachineStep - newInterval/2
}
- newInterval = (end - start) / uint64(c.config.CachedChallengeMachines)
+ newInterval = (end - start) / c.config.CachedChallengeMachines
if newInterval == 0 {
newInterval = 1
}
@@ -212,7 +212,7 @@ func (c *MachineCache) populateCache(ctx context.Context) error {
if nextMachine.GetStepCount()+c.machineStepInterval >= c.finalMachineStep {
break
}
- if len(c.machines) >= c.config.CachedChallengeMachines {
+ if uint64(len(c.machines)) >= c.config.CachedChallengeMachines {
break
}
nextMachine = nextMachine.CloneMachineInterface()
@@ -236,9 +236,11 @@ func (c *MachineCache) getClosestMachine(stepCount uint64) (int, MachineInterfac
}
stepsFromStart := stepCount - c.firstMachineStep
var index int
+ // #nosec G115
if c.machineStepInterval == 0 || stepsFromStart > c.machineStepInterval*uint64(len(c.machines)-1) {
index = len(c.machines) - 1
} else {
+ // #nosec G115
index = int(stepsFromStart / c.machineStepInterval)
}
return index, c.machines[index]
diff --git a/validator/server_arb/nitro_machine.go b/validator/server_arb/nitro_machine.go
index 2b2cb230b6..926b1e8930 100644
--- a/validator/server_arb/nitro_machine.go
+++ b/validator/server_arb/nitro_machine.go
@@ -4,7 +4,7 @@
package server_arb
/*
-#cgo CFLAGS: -g -Wall -I../../target/include/
+#cgo CFLAGS: -g -I../../target/include/
#include "arbitrator.h"
#include
*/
diff --git a/validator/server_arb/preimage_resolver.go b/validator/server_arb/preimage_resolver.go
index cd4ea40e28..f01d79f4dd 100644
--- a/validator/server_arb/preimage_resolver.go
+++ b/validator/server_arb/preimage_resolver.go
@@ -4,7 +4,7 @@
package server_arb
/*
-#cgo CFLAGS: -g -Wall -I../../target/include/
+#cgo CFLAGS: -g -I../../target/include/
#include "arbitrator.h"
extern ResolvedPreimage preimageResolver(size_t context, uint8_t preimageType, const uint8_t* hash);
diff --git a/validator/server_arb/prover_interface.go b/validator/server_arb/prover_interface.go
index bdd81ed588..3010d2138d 100644
--- a/validator/server_arb/prover_interface.go
+++ b/validator/server_arb/prover_interface.go
@@ -4,7 +4,7 @@
package server_arb
/*
-#cgo CFLAGS: -g -Wall -I../target/include/
+#cgo CFLAGS: -g -I../target/include/
#cgo LDFLAGS: ${SRCDIR}/../../target/lib/libstylus.a -ldl -lm
#include "arbitrator.h"
#include
diff --git a/validator/server_arb/validator_spawner.go b/validator/server_arb/validator_spawner.go
index 1d4126dc7c..07971e2ba5 100644
--- a/validator/server_arb/validator_spawner.go
+++ b/validator/server_arb/validator_spawner.go
@@ -2,17 +2,15 @@ package server_arb
import (
"context"
- "encoding/binary"
"errors"
"fmt"
- "os"
- "path/filepath"
"runtime"
"sync/atomic"
"time"
"github.com/spf13/pflag"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/util/containers"
"github.com/offchainlabs/nitro/util/stopwaiter"
@@ -21,6 +19,7 @@ import (
"github.com/offchainlabs/nitro/validator/valnode/redis"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
)
@@ -88,15 +87,15 @@ func (s *ArbitratorSpawner) WasmModuleRoots() ([]common.Hash, error) {
return s.locator.ModuleRoots(), nil
}
-func (s *ArbitratorSpawner) StylusArchs() []string {
- return []string{"wavm"}
+func (s *ArbitratorSpawner) StylusArchs() []ethdb.WasmTarget {
+ return []ethdb.WasmTarget{rawdb.TargetWavm}
}
func (s *ArbitratorSpawner) Name() string {
return "arbitrator"
}
-func (v *ArbitratorSpawner) loadEntryToMachine(ctx context.Context, entry *validator.ValidationInput, mach *ArbitratorMachine) error {
+func (v *ArbitratorSpawner) loadEntryToMachine(_ context.Context, entry *validator.ValidationInput, mach *ArbitratorMachine) error {
resolver := func(ty arbutil.PreimageType, hash common.Hash) ([]byte, error) {
// Check if it's a known preimage
if preimage, ok := entry.Preimages[ty][hash]; ok {
@@ -122,14 +121,14 @@ func (v *ArbitratorSpawner) loadEntryToMachine(ctx context.Context, entry *valid
return fmt.Errorf("error while trying to add sequencer msg for proving: %w", err)
}
}
- if len(entry.UserWasms["wavm"]) == 0 {
+ if len(entry.UserWasms[rawdb.TargetWavm]) == 0 {
for stylusArch, wasms := range entry.UserWasms {
if len(wasms) > 0 {
return fmt.Errorf("bad stylus arch loaded to machine. Expected wavm. Got: %s", stylusArch)
}
}
}
- for moduleHash, module := range entry.UserWasms["wavm"] {
+ for moduleHash, module := range entry.UserWasms[rawdb.TargetWavm] {
err = mach.AddUserWasm(moduleHash, module)
if err != nil {
log.Error(
@@ -178,7 +177,10 @@ func (v *ArbitratorSpawner) execute(
}
steps += count
}
+
+ // #nosec G115
arbitratorValidationSteps.Update(int64(mach.GetStepCount()))
+
if mach.IsErrored() {
log.Error("machine entered errored state during attempted validation", "block", entry.Id)
return validator.GoGlobalState{}, errors.New("machine entered errored state during attempted validation")
@@ -187,6 +189,7 @@ func (v *ArbitratorSpawner) execute(
}
func (v *ArbitratorSpawner) Launch(entry *validator.ValidationInput, moduleRoot common.Hash) validator.ValidationRun {
+ println("LAUCHING ARBITRATOR VALIDATION")
v.count.Add(1)
promise := stopwaiter.LaunchPromiseThread[validator.GoGlobalState](v, func(ctx context.Context) (validator.GoGlobalState, error) {
defer v.count.Add(-1)
@@ -203,139 +206,6 @@ func (v *ArbitratorSpawner) Room() int {
return avail
}
-var launchTime = time.Now().Format("2006_01_02__15_04")
-
-//nolint:gosec
-func (v *ArbitratorSpawner) writeToFile(ctx context.Context, input *validator.ValidationInput, expOut validator.GoGlobalState, moduleRoot common.Hash) error {
- outDirPath := filepath.Join(v.locator.RootPath(), v.config().OutputPath, launchTime, fmt.Sprintf("block_%d", input.Id))
- err := os.MkdirAll(outDirPath, 0755)
- if err != nil {
- return err
- }
- if ctx.Err() != nil {
- return ctx.Err()
- }
-
- rootPathAssign := ""
- if executable, err := os.Executable(); err == nil {
- rootPathAssign = "ROOTPATH=\"" + filepath.Dir(executable) + "\"\n"
- }
- cmdFile, err := os.OpenFile(filepath.Join(outDirPath, "run-prover.sh"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
- if err != nil {
- return err
- }
- defer cmdFile.Close()
- _, err = cmdFile.WriteString("#!/bin/bash\n" +
- fmt.Sprintf("# expected output: batch %d, postion %d, hash %s\n", expOut.Batch, expOut.PosInBatch, expOut.BlockHash) +
- "MACHPATH=\"" + v.locator.GetMachinePath(moduleRoot) + "\"\n" +
- rootPathAssign +
- "if (( $# > 1 )); then\n" +
- " if [[ $1 == \"-m\" ]]; then\n" +
- " MACHPATH=$2\n" +
- " shift\n" +
- " shift\n" +
- " fi\n" +
- "fi\n" +
- "${ROOTPATH}/bin/prover ${MACHPATH}/replay.wasm")
- if err != nil {
- return err
- }
- if ctx.Err() != nil {
- return ctx.Err()
- }
-
- libraries := []string{"soft-float.wasm", "wasi_stub.wasm", "go_stub.wasm", "host_io.wasm", "brotli.wasm"}
- for _, module := range libraries {
- _, err = cmdFile.WriteString(" -l " + "${MACHPATH}/" + module)
- if err != nil {
- return err
- }
- }
- _, err = cmdFile.WriteString(fmt.Sprintf(" --inbox-position %d --position-within-message %d --last-block-hash %s", input.StartState.Batch, input.StartState.PosInBatch, input.StartState.BlockHash))
- if err != nil {
- return err
- }
-
- for _, msg := range input.BatchInfo {
- if ctx.Err() != nil {
- return ctx.Err()
- }
- sequencerFileName := fmt.Sprintf("sequencer_%d.bin", msg.Number)
- err = os.WriteFile(filepath.Join(outDirPath, sequencerFileName), msg.Data, 0644)
- if err != nil {
- return err
- }
- _, err = cmdFile.WriteString(" --inbox " + sequencerFileName)
- if err != nil {
- return err
- }
- }
-
- preimageFile, err := os.Create(filepath.Join(outDirPath, "preimages.bin"))
- if err != nil {
- return err
- }
- defer preimageFile.Close()
- for ty, preimages := range input.Preimages {
- _, err = preimageFile.Write([]byte{byte(ty)})
- if err != nil {
- return err
- }
- for _, data := range preimages {
- if ctx.Err() != nil {
- return ctx.Err()
- }
- lenbytes := make([]byte, 8)
- binary.LittleEndian.PutUint64(lenbytes, uint64(len(data)))
- _, err := preimageFile.Write(lenbytes)
- if err != nil {
- return err
- }
- _, err = preimageFile.Write(data)
- if err != nil {
- return err
- }
- }
- }
-
- _, err = cmdFile.WriteString(" --preimages preimages.bin")
- if err != nil {
- return err
- }
-
- if input.HasDelayedMsg {
- if ctx.Err() != nil {
- return ctx.Err()
- }
- _, err = cmdFile.WriteString(fmt.Sprintf(" --delayed-inbox-position %d", input.DelayedMsgNr))
- if err != nil {
- return err
- }
- filename := fmt.Sprintf("delayed_%d.bin", input.DelayedMsgNr)
- err = os.WriteFile(filepath.Join(outDirPath, filename), input.DelayedMsg, 0644)
- if err != nil {
- return err
- }
- _, err = cmdFile.WriteString(fmt.Sprintf(" --delayed-inbox %s", filename))
- if err != nil {
- return err
- }
- }
-
- _, err = cmdFile.WriteString(" \"$@\"\n")
- if err != nil {
- return err
- }
- return nil
-}
-
-func (v *ArbitratorSpawner) WriteToFile(input *validator.ValidationInput, expOut validator.GoGlobalState, moduleRoot common.Hash) containers.PromiseInterface[struct{}] {
- return stopwaiter.LaunchPromiseThread[struct{}](v, func(ctx context.Context) (struct{}, error) {
- err := v.writeToFile(ctx, input, expOut, moduleRoot)
- return struct{}{}, err
- })
-}
-
func (v *ArbitratorSpawner) CreateExecutionRun(wasmModuleRoot common.Hash, input *validator.ValidationInput) containers.PromiseInterface[validator.ExecutionRun] {
getMachine := func(ctx context.Context) (MachineInterface, error) {
initialFrozenMachine, err := v.machineLoader.GetZeroStepMachine(ctx, wasmModuleRoot)
diff --git a/validator/server_jit/jit_machine.go b/validator/server_jit/jit_machine.go
index e4fb840cbb..0748101277 100644
--- a/validator/server_jit/jit_machine.go
+++ b/validator/server_jit/jit_machine.go
@@ -9,13 +9,14 @@ import (
"errors"
"fmt"
"io"
+ "math"
"net"
"os"
"os/exec"
- "runtime"
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/offchainlabs/nitro/util/arbmath"
@@ -29,9 +30,10 @@ type JitMachine struct {
process *exec.Cmd
stdin io.WriteCloser
wasmMemoryUsageLimit int
+ maxExecutionTime time.Duration
}
-func createJitMachine(jitBinary string, binaryPath string, cranelift bool, wasmMemoryUsageLimit int, moduleRoot common.Hash, fatalErrChan chan error) (*JitMachine, error) {
+func createJitMachine(jitBinary string, binaryPath string, cranelift bool, wasmMemoryUsageLimit int, maxExecutionTime time.Duration, _ common.Hash, fatalErrChan chan error) (*JitMachine, error) {
invocation := []string{"--binary", binaryPath, "--forks"}
if cranelift {
invocation = append(invocation, "--cranelift")
@@ -54,6 +56,7 @@ func createJitMachine(jitBinary string, binaryPath string, cranelift bool, wasmM
process: process,
stdin: stdin,
wasmMemoryUsageLimit: wasmMemoryUsageLimit,
+ maxExecutionTime: maxExecutionTime,
}
return machine, nil
}
@@ -72,7 +75,7 @@ func (machine *JitMachine) prove(
defer cancel() // ensure our cleanup functions run when we're done
state := validator.GoGlobalState{}
- timeout := time.Now().Add(60 * time.Second)
+ timeout := time.Now().Add(machine.maxExecutionTime)
tcp, err := net.ListenTCP("tcp4", &net.TCPAddr{
IP: []byte{127, 0, 0, 1},
})
@@ -125,6 +128,13 @@ func (machine *JitMachine) prove(
writeUint32 := func(data uint32) error {
return writeExact(arbmath.Uint32ToBytes(data))
}
+ writeIntAsUint32 := func(data int) error {
+ if data < 0 || data > math.MaxUint32 {
+ return fmt.Errorf("attempted to write out-of-bounds int %v as uint32", data)
+ }
+ // #nosec G115
+ return writeUint32(uint32(data))
+ }
writeUint64 := func(data uint64) error {
return writeExact(arbmath.UintToBytes(data))
}
@@ -192,14 +202,14 @@ func (machine *JitMachine) prove(
// send known preimages
preimageTypes := entry.Preimages
- if err := writeUint32(uint32(len(preimageTypes))); err != nil {
+ if err := writeIntAsUint32(len(preimageTypes)); err != nil {
return state, err
}
for ty, preimages := range preimageTypes {
if err := writeUint8(uint8(ty)); err != nil {
return state, err
}
- if err := writeUint32(uint32(len(preimages))); err != nil {
+ if err := writeIntAsUint32(len(preimages)); err != nil {
return state, err
}
for hash, preimage := range preimages {
@@ -212,18 +222,19 @@ func (machine *JitMachine) prove(
}
}
- userWasms := entry.UserWasms[runtime.GOARCH]
+ localTarget := rawdb.LocalTarget()
+ userWasms := entry.UserWasms[localTarget]
// if there are user wasms, but only for wrong architecture - error
if len(userWasms) == 0 {
for arch, userWasms := range entry.UserWasms {
if len(userWasms) != 0 {
- return state, fmt.Errorf("bad stylus arch for validation input. got: %v, expected: %v", arch, runtime.GOARCH)
+ return state, fmt.Errorf("bad stylus arch for validation input. got: %v, expected: %v", arch, localTarget)
}
}
}
- if err := writeUint32(uint32(len(userWasms))); err != nil {
+ if err := writeIntAsUint32(len(userWasms)); err != nil {
return state, err
}
for moduleHash, program := range userWasms {
@@ -297,9 +308,11 @@ func (machine *JitMachine) prove(
if err != nil {
return state, fmt.Errorf("failed to read memory usage from Jit machine: %w", err)
}
+ // #nosec G115
if memoryUsed > uint64(machine.wasmMemoryUsageLimit) {
log.Warn("memory used by jit wasm exceeds the wasm memory usage limit", "limit", machine.wasmMemoryUsageLimit, "memoryUsed", memoryUsed)
}
+ // #nosec G115
jitWasmMemoryUsage.Update(int64(memoryUsed))
return state, nil
default:
diff --git a/validator/server_jit/machine_loader.go b/validator/server_jit/machine_loader.go
index cfa475370c..3d8b01367f 100644
--- a/validator/server_jit/machine_loader.go
+++ b/validator/server_jit/machine_loader.go
@@ -7,6 +7,7 @@ import (
"path/filepath"
"runtime"
"strings"
+ "time"
"github.com/ethereum/go-ethereum/common"
"github.com/offchainlabs/nitro/validator/server_common"
@@ -52,14 +53,14 @@ type JitMachineLoader struct {
stopped bool
}
-func NewJitMachineLoader(config *JitMachineConfig, locator *server_common.MachineLocator, fatalErrChan chan error) (*JitMachineLoader, error) {
+func NewJitMachineLoader(config *JitMachineConfig, locator *server_common.MachineLocator, maxExecutionTime time.Duration, fatalErrChan chan error) (*JitMachineLoader, error) {
jitPath, err := getJitPath()
if err != nil {
return nil, err
}
createMachineThreadFunc := func(ctx context.Context, moduleRoot common.Hash) (*JitMachine, error) {
binPath := filepath.Join(locator.GetMachinePath(moduleRoot), config.ProverBinPath)
- return createJitMachine(jitPath, binPath, config.JitCranelift, config.WasmMemoryUsageLimit, moduleRoot, fatalErrChan)
+ return createJitMachine(jitPath, binPath, config.JitCranelift, config.WasmMemoryUsageLimit, maxExecutionTime, moduleRoot, fatalErrChan)
}
return &JitMachineLoader{
MachineLoader: *server_common.NewMachineLoader[JitMachine](locator, createMachineThreadFunc),
diff --git a/validator/server_jit/spawner.go b/validator/server_jit/spawner.go
index 5ba3664109..f30b6e181a 100644
--- a/validator/server_jit/spawner.go
+++ b/validator/server_jit/spawner.go
@@ -3,12 +3,14 @@ package server_jit
import (
"context"
"fmt"
+ flag "github.com/spf13/pflag"
"runtime"
"sync/atomic"
-
- flag "github.com/spf13/pflag"
+ "time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/offchainlabs/nitro/util/stopwaiter"
"github.com/offchainlabs/nitro/validator"
@@ -16,8 +18,9 @@ import (
)
type JitSpawnerConfig struct {
- Workers int `koanf:"workers" reload:"hot"`
- Cranelift bool `koanf:"cranelift"`
+ Workers int `koanf:"workers" reload:"hot"`
+ Cranelift bool `koanf:"cranelift"`
+ MaxExecutionTime time.Duration `koanf:"max-execution-time" reload:"hot"`
// TODO: change WasmMemoryUsageLimit to a string and use resourcemanager.ParseMemLimit
WasmMemoryUsageLimit int `koanf:"wasm-memory-usage-limit"`
@@ -28,6 +31,7 @@ type JitSpawnerConfigFecher func() *JitSpawnerConfig
var DefaultJitSpawnerConfig = JitSpawnerConfig{
Workers: 0,
Cranelift: true,
+ MaxExecutionTime: time.Minute * 10,
WasmMemoryUsageLimit: 4294967296, // 2^32 WASM memeory limit
}
@@ -35,6 +39,7 @@ func JitSpawnerConfigAddOptions(prefix string, f *flag.FlagSet) {
f.Int(prefix+".workers", DefaultJitSpawnerConfig.Workers, "number of concurrent validation threads")
f.Bool(prefix+".cranelift", DefaultJitSpawnerConfig.Cranelift, "use Cranelift instead of LLVM when validating blocks using the jit-accelerated block validator")
f.Int(prefix+".wasm-memory-usage-limit", DefaultJitSpawnerConfig.WasmMemoryUsageLimit, "if memory used by a jit wasm exceeds this limit, a warning is logged")
+ f.Duration(prefix+".max-execution-time", DefaultJitSpawnerConfig.MaxExecutionTime, "if execution time used by a jit wasm exceeds this limit, a rpc error is returned")
}
type JitSpawner struct {
@@ -50,7 +55,8 @@ func NewJitSpawner(locator *server_common.MachineLocator, config JitSpawnerConfi
machineConfig := DefaultJitMachineConfig
machineConfig.JitCranelift = config().Cranelift
machineConfig.WasmMemoryUsageLimit = config().WasmMemoryUsageLimit
- loader, err := NewJitMachineLoader(&machineConfig, locator, fatalErrChan)
+ maxExecutionTime := config().MaxExecutionTime
+ loader, err := NewJitMachineLoader(&machineConfig, locator, maxExecutionTime, fatalErrChan)
if err != nil {
return nil, err
}
@@ -71,8 +77,8 @@ func (v *JitSpawner) WasmModuleRoots() ([]common.Hash, error) {
return v.locator.ModuleRoots(), nil
}
-func (v *JitSpawner) StylusArchs() []string {
- return []string{runtime.GOARCH}
+func (v *JitSpawner) StylusArchs() []ethdb.WasmTarget {
+ return []ethdb.WasmTarget{rawdb.LocalTarget()}
}
func (v *JitSpawner) execute(
diff --git a/validator/validation_entry.go b/validator/validation_entry.go
index 133a67a8a8..4ec6919d3b 100644
--- a/validator/validation_entry.go
+++ b/validator/validation_entry.go
@@ -2,13 +2,13 @@ package validator
import (
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/offchainlabs/nitro/arbutil"
)
type BatchInfo struct {
- Number uint64
- BlockHash common.Hash
- Data []byte
+ Number uint64
+ Data []byte
}
type ValidationInput struct {
@@ -16,7 +16,7 @@ type ValidationInput struct {
HasDelayedMsg bool
DelayedMsgNr uint64
Preimages map[arbutil.PreimageType]map[common.Hash][]byte
- UserWasms map[string]map[common.Hash][]byte
+ UserWasms map[ethdb.WasmTarget]map[common.Hash][]byte
BatchInfo []BatchInfo
DelayedMsg []byte
StartState GoGlobalState
diff --git a/validator/valnode/redis/consumer.go b/validator/valnode/redis/consumer.go
index fb7db1e870..e0d53ffb2e 100644
--- a/validator/valnode/redis/consumer.go
+++ b/validator/valnode/redis/consumer.go
@@ -3,6 +3,7 @@ package redis
import (
"context"
"fmt"
+ "runtime"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -22,8 +23,9 @@ type ValidationServer struct {
spawner validator.ValidationSpawner
// consumers stores moduleRoot to consumer mapping.
- consumers map[common.Hash]*pubsub.Consumer[*validator.ValidationInput, validator.GoGlobalState]
- streamTimeout time.Duration
+ consumers map[common.Hash]*pubsub.Consumer[*validator.ValidationInput, validator.GoGlobalState]
+
+ config *ValidationServerConfig
}
func NewValidationServer(cfg *ValidationServerConfig, spawner validator.ValidationSpawner) (*ValidationServer, error) {
@@ -44,9 +46,9 @@ func NewValidationServer(cfg *ValidationServerConfig, spawner validator.Validati
consumers[mr] = c
}
return &ValidationServer{
- consumers: consumers,
- spawner: spawner,
- streamTimeout: cfg.StreamTimeout,
+ consumers: consumers,
+ spawner: spawner,
+ config: cfg,
}, nil
}
@@ -54,6 +56,23 @@ func (s *ValidationServer) Start(ctx_in context.Context) {
s.StopWaiter.Start(ctx_in, s)
// Channel that all consumers use to indicate their readiness.
readyStreams := make(chan struct{}, len(s.consumers))
+ type workUnit struct {
+ req *pubsub.Message[*validator.ValidationInput]
+ moduleRoot common.Hash
+ }
+ workers := s.config.Workers
+ if workers == 0 {
+ workers = runtime.NumCPU()
+ }
+ workQueue := make(chan workUnit, workers)
+ tokensCount := workers
+ if s.config.BufferReads {
+ tokensCount += workers
+ }
+ requestTokenQueue := make(chan struct{}, tokensCount)
+ for i := 0; i < tokensCount; i++ {
+ requestTokenQueue <- struct{}{}
+ }
for moduleRoot, c := range s.consumers {
c := c
moduleRoot := moduleRoot
@@ -84,26 +103,31 @@ func (s *ValidationServer) Start(ctx_in context.Context) {
case <-ready: // Wait until the stream exists and start consuming iteratively.
}
s.StopWaiter.CallIteratively(func(ctx context.Context) time.Duration {
+ log.Debug("waiting for request token", "cid", c.Id())
+ select {
+ case <-ctx.Done():
+ return 0
+ case <-requestTokenQueue:
+ }
+ log.Debug("got request token", "cid", c.Id())
req, err := c.Consume(ctx)
if err != nil {
log.Error("Consuming request", "error", err)
+ requestTokenQueue <- struct{}{}
return 0
}
if req == nil {
- // There's nothing in the queue.
+ log.Debug("consumed nil", "cid", c.Id())
+ // There's nothing in the queue
+ requestTokenQueue <- struct{}{}
return time.Second
}
- valRun := s.spawner.Launch(req.Value, moduleRoot)
- res, err := valRun.Await(ctx)
- if err != nil {
- log.Error("Error validating", "request value", req.Value, "error", err)
- return 0
- }
- if err := c.SetResult(ctx, req.ID, res); err != nil {
- log.Error("Error setting result for request", "id", req.ID, "result", res, "error", err)
- return 0
+ log.Debug("forwarding work", "cid", c.Id(), "workid", req.ID)
+ select {
+ case <-ctx.Done():
+ case workQueue <- workUnit{req, moduleRoot}:
}
- return time.Second
+ return 0
})
})
}
@@ -111,9 +135,9 @@ func (s *ValidationServer) Start(ctx_in context.Context) {
for {
select {
case <-readyStreams:
- log.Trace("At least one stream is ready")
+ log.Debug("At least one stream is ready")
return // Don't block Start if at least one of the stream is ready.
- case <-time.After(s.streamTimeout):
+ case <-time.After(s.config.StreamTimeout):
log.Error("Waiting for redis streams timed out")
case <-ctx.Done():
log.Info("Context done while waiting redis streams to be ready, failed to start")
@@ -121,6 +145,37 @@ func (s *ValidationServer) Start(ctx_in context.Context) {
}
}
})
+ for i := 0; i < workers; i++ {
+ i := i
+ s.StopWaiter.LaunchThread(func(ctx context.Context) {
+ for {
+ log.Debug("waiting for work", "thread", i)
+ var work workUnit
+ select {
+ case <-ctx.Done():
+ return
+ case work = <-workQueue:
+ }
+ log.Debug("got work", "thread", i, "workid", work.req.ID)
+ valRun := s.spawner.Launch(work.req.Value, work.moduleRoot)
+ res, err := valRun.Await(ctx)
+ if err != nil {
+ log.Error("Error validating", "request value", work.req.Value, "error", err)
+ } else {
+ log.Debug("done work", "thread", i, "workid", work.req.ID)
+ if err := s.consumers[work.moduleRoot].SetResult(ctx, work.req.ID, res); err != nil {
+ log.Error("Error setting result for request", "id", work.req.ID, "result", res, "error", err)
+ }
+ log.Debug("set result", "thread", i, "workid", work.req.ID)
+ }
+ select {
+ case <-ctx.Done():
+ return
+ case requestTokenQueue <- struct{}{}:
+ }
+ }
+ })
+ }
}
type ValidationServerConfig struct {
@@ -131,6 +186,8 @@ type ValidationServerConfig struct {
// Timeout on polling for existence of each redis stream.
StreamTimeout time.Duration `koanf:"stream-timeout"`
StreamPrefix string `koanf:"stream-prefix"`
+ Workers int `koanf:"workers"`
+ BufferReads bool `koanf:"buffer-reads"`
}
var DefaultValidationServerConfig = ValidationServerConfig{
@@ -139,6 +196,8 @@ var DefaultValidationServerConfig = ValidationServerConfig{
ConsumerConfig: pubsub.DefaultConsumerConfig,
ModuleRoots: []string{},
StreamTimeout: 10 * time.Minute,
+ Workers: 0,
+ BufferReads: true,
}
var TestValidationServerConfig = ValidationServerConfig{
@@ -147,6 +206,8 @@ var TestValidationServerConfig = ValidationServerConfig{
ConsumerConfig: pubsub.TestConsumerConfig,
ModuleRoots: []string{},
StreamTimeout: time.Minute,
+ Workers: 1,
+ BufferReads: true,
}
func ValidationServerConfigAddOptions(prefix string, f *pflag.FlagSet) {
@@ -155,6 +216,8 @@ func ValidationServerConfigAddOptions(prefix string, f *pflag.FlagSet) {
f.String(prefix+".redis-url", DefaultValidationServerConfig.RedisURL, "url of redis server")
f.String(prefix+".stream-prefix", DefaultValidationServerConfig.StreamPrefix, "prefix for stream name")
f.Duration(prefix+".stream-timeout", DefaultValidationServerConfig.StreamTimeout, "Timeout on polling for existence of redis streams")
+ f.Int(prefix+".workers", DefaultValidationServerConfig.Workers, "number of validation threads (0 to use number of CPUs)")
+ f.Bool(prefix+".buffer-reads", DefaultValidationServerConfig.BufferReads, "buffer reads (read next while working)")
}
func (cfg *ValidationServerConfig) Enabled() bool {
diff --git a/validator/valnode/validation_api.go b/validator/valnode/validation_api.go
index 6245ffc5e3..ef3e1b2c49 100644
--- a/validator/valnode/validation_api.go
+++ b/validator/valnode/validation_api.go
@@ -12,6 +12,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/offchainlabs/nitro/util/stopwaiter"
"github.com/offchainlabs/nitro/validator"
@@ -44,7 +45,7 @@ func (a *ValidationServerAPI) WasmModuleRoots() ([]common.Hash, error) {
return a.spawner.WasmModuleRoots()
}
-func (a *ValidationServerAPI) StylusArchs() ([]string, error) {
+func (a *ValidationServerAPI) StylusArchs() ([]ethdb.WasmTarget, error) {
return a.spawner.StylusArchs(), nil
}
@@ -117,15 +118,6 @@ func (a *ExecServerAPI) Start(ctx_in context.Context) {
a.CallIteratively(a.removeOldRuns)
}
-func (a *ExecServerAPI) WriteToFile(ctx context.Context, jsonInput *server_api.InputJSON, expOut validator.GoGlobalState, moduleRoot common.Hash) error {
- input, err := server_api.ValidationInputFromJson(jsonInput)
- if err != nil {
- return err
- }
- _, err = a.execSpawner.WriteToFile(input, expOut, moduleRoot).Await(ctx)
- return err
-}
-
var errRunNotFound error = errors.New("run not found")
func (a *ExecServerAPI) getRun(id uint64) (validator.ExecutionRun, error) {
diff --git a/wavmio/stub.go b/wavmio/stub.go
index 7fd29e2062..0c82506ff3 100644
--- a/wavmio/stub.go
+++ b/wavmio/stub.go
@@ -60,13 +60,14 @@ func parsePreimageBytes(path string) {
if read != len(lenBuf) {
panic(fmt.Sprintf("missing bytes reading len got %d", read))
}
- fieldSize := int(binary.LittleEndian.Uint64(lenBuf))
+ fieldSize := binary.LittleEndian.Uint64(lenBuf)
dataBuf := make([]byte, fieldSize)
read, err = file.Read(dataBuf)
if err != nil {
panic(err)
}
- if read != fieldSize {
+ // #nosec G115
+ if uint64(read) != fieldSize {
panic("missing bytes reading data")
}
hash := crypto.Keccak256Hash(dataBuf)
@@ -77,18 +78,18 @@ func parsePreimageBytes(path string) {
func StubInit() {
preimages = make(map[common.Hash][]byte)
var delayedMsgPath arrayFlags
- seqMsgPosFlag := flag.Int("inbox-position", 0, "position for sequencer inbox message")
- posWithinMsgFlag := flag.Int("position-within-message", 0, "position inside sequencer inbox message")
- delayedPositionFlag := flag.Int("delayed-inbox-position", 0, "position for first delayed inbox message")
+ seqMsgPosFlag := flag.Uint64("inbox-position", 0, "position for sequencer inbox message")
+ posWithinMsgFlag := flag.Uint64("position-within-message", 0, "position inside sequencer inbox message")
+ delayedPositionFlag := flag.Uint64("delayed-inbox-position", 0, "position for first delayed inbox message")
lastBlockFlag := flag.String("last-block-hash", "0000000000000000000000000000000000000000000000000000000000000000", "lastBlockHash")
flag.Var(&delayedMsgPath, "delayed-inbox", "delayed inbox messages (multiple values)")
inboxPath := flag.String("inbox", "", "file to load sequencer message")
preimagesPath := flag.String("preimages", "", "file to load preimages from")
flag.Parse()
- seqMsgPos = uint64(*seqMsgPosFlag)
- posWithinMsg = uint64(*posWithinMsgFlag)
- delayedMsgFirstPos = uint64(*delayedPositionFlag)
+ seqMsgPos = *seqMsgPosFlag
+ posWithinMsg = *posWithinMsgFlag
+ delayedMsgFirstPos = *delayedPositionFlag
lastBlockHash = common.HexToHash(*lastBlockFlag)
for _, path := range delayedMsgPath {
msg, err := os.ReadFile(path)
@@ -125,7 +126,7 @@ func ReadInboxMessage(msgNum uint64) []byte {
}
func ReadDelayedInboxMessage(seqNum uint64) []byte {
- if seqNum < delayedMsgFirstPos || (int(seqNum-delayedMsgFirstPos) > len(delayedMsgs)) {
+ if seqNum < delayedMsgFirstPos || (seqNum-delayedMsgFirstPos > uint64(len(delayedMsgs))) {
panic(fmt.Sprintf("trying to read bad delayed msg %d", seqNum))
}
return delayedMsgs[seqNum-delayedMsgFirstPos]
diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go
index 16a8f64daf..00ae0f0dcf 100644
--- a/wsbroadcastserver/clientconnection.go
+++ b/wsbroadcastserver/clientconnection.go
@@ -135,6 +135,7 @@ func (cc *ClientConnection) writeBacklog(ctx context.Context, segment backlog.Ba
msgs := prevSegment.Messages()
if isFirstSegment && prevSegment.Contains(uint64(cc.requestedSeqNum)) {
+ // #nosec G115
requestedIdx := int(cc.requestedSeqNum) - int(prevSegment.Start())
// This might be false if messages were added after we fetched the segment's messages
if len(msgs) >= requestedIdx {