diff --git a/.dockerignore b/.dockerignore index d41a52fa4c2..59626a5bde9 100644 --- a/.dockerignore +++ b/.dockerignore @@ -10,4 +10,3 @@ cmd/prometheus vendor cache.db -.git diff --git a/.github/workflows/ci_zkevm.yml b/.github/workflows/ci_zkevm.yml index 472ef416b9e..afdb7a86cc4 100644 --- a/.github/workflows/ci_zkevm.yml +++ b/.github/workflows/ci_zkevm.yml @@ -13,6 +13,7 @@ on: - reopened - synchronize - ready_for_review + workflow_dispatch: concurrency: group: ${{ github.ref }} @@ -92,18 +93,27 @@ jobs: working-directory: ./cdk-erigon run: docker build -t cdk-erigon:local --file Dockerfile . + - name: Remove unused flags + working-directory: ./kurtosis-cdk + run: | + sed -i '/zkevm.sequencer-batch-seal-time:/d' templates/cdk-erigon/config.yml + - name: Configure Kurtosis CDK working-directory: ./kurtosis-cdk run: | /usr/local/bin/yq -i '.args.data_availability_mode = "rollup"' cdk-erigon-sequencer-params.yml /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "cdk-erigon:local"' cdk-erigon-sequencer-params.yml /usr/local/bin/yq -i '.args.zkevm_bridge_service_image = "hermeznetwork/zkevm-bridge-service:v0.5.0-RC8"' cdk-erigon-sequencer-params.yml - sed -i '/zkevm\.sequencer-initial-fork-id/d' ./templates/cdk-erigon/config-sequencer.yaml - name: Deploy Kurtosis CDK package working-directory: ./kurtosis-cdk run: kurtosis run --enclave cdk-v1 --args-file cdk-erigon-sequencer-params.yml --image-download always . + - name: Override gas limit for test transactions + working-directory: ./kurtosis-cdk + run: | + sed -i 's/--gas-limit [0-9]*/--gas-limit 100000/gi' .github/actions/monitor-cdk-verified-batches/batch_verification_monitor.sh + - name: Monitor verified batches working-directory: ./kurtosis-cdk shell: bash @@ -253,4 +263,4 @@ jobs: uses: actions/upload-artifact@v3 with: name: logs_${{ github.run_id }} - path: ./kurtosis-cdk/ci_logs \ No newline at end of file + path: ./kurtosis-cdk/ci_logs diff --git a/.github/workflows/nightly-ansible.yml b/.github/workflows/nightly-ansible.yml index a74367a7988..d5b55d307a9 100644 --- a/.github/workflows/nightly-ansible.yml +++ b/.github/workflows/nightly-ansible.yml @@ -12,10 +12,10 @@ jobs: strategy: matrix: include: - - name: "integration-5" - rpc_url: "http://34.175.214.161:8500" - eth_address: "0x41BB6960a5156aC29d6e9E04273837AD19d6691A" - secret_name: "NETWORK5_PRIVATE_KEY" + - name: "integration-8" + rpc_url: "http://34.175.214.161:18124" + eth_address: "0x126240A22FF66101131b0BcE1D6c27498ace7D41" + secret_name: "NETWORK5_PRIVATE_KEY_0X126" - name: "cdk-1" rpc_url: "http://34.175.214.161:8020" eth_address: "0x673df8221611aD1f714053b82c4F9E2b0867CcC6" diff --git a/.github/workflows/nightly-bridge-erc20.yml b/.github/workflows/nightly-bridge-erc20.yml index 238c9c19c93..37cb9810a62 100644 --- a/.github/workflows/nightly-bridge-erc20.yml +++ b/.github/workflows/nightly-bridge-erc20.yml @@ -11,14 +11,14 @@ jobs: strategy: matrix: include: - - profile_name: "network5" + - profile_name: "network8" l1_ep: "https://rpc.eu-central-1.gateway.fm/v4/ethereum/non-archival/sepolia" - l2_ep: "http://34.175.214.161:8500" - bridge_ep: "http://34.175.214.161:8085" + l2_ep: "http://34.175.214.161:18124" + bridge_ep: "http://34.175.214.161:18080" l1_pk_secret_name: "L1_SEPOLIA_FUNDED_PRIVATE_KEY" - l2_pk_secret_name: "NETWORK5_PRIVATE_KEY" - bridge_addr: "0xb566BE17B22404BD46F389030ec5592F8ffAde12" - + l2_pk_secret_name: "NETWORK5_PRIVATE_KEY_0X126" + bridge_addr: "0x27DAeD0badd500740762d1792F3277a7F3DAdd79" + steps: - name: Clone bridge repository run: git clone --recurse-submodules -j8 https://github.com/0xPolygonHermez/zkevm-bridge-service.git -b feature/test_bridge_messages_real_network-v2 bridge diff --git a/.github/workflows/nightly-bridge-msg.yml b/.github/workflows/nightly-bridge-msg.yml index 949874e1493..9c4d71bf0c7 100644 --- a/.github/workflows/nightly-bridge-msg.yml +++ b/.github/workflows/nightly-bridge-msg.yml @@ -11,14 +11,14 @@ jobs: strategy: matrix: include: - - profile_name: "network5" + - profile_name: "network8" l1_ep: "https://rpc.eu-central-1.gateway.fm/v4/ethereum/non-archival/sepolia" - l2_ep: "http://34.175.214.161:8500" - bridge_ep: "http://34.175.214.161:8085" + l2_ep: "http://34.175.214.161:18124" + bridge_ep: "http://34.175.214.161:18080" l1_pk_secret_name: "L1_SEPOLIA_FUNDED_PRIVATE_KEY" - l2_pk_secret_name: "NETWORK5_PRIVATE_KEY" - bridge_addr: "0xb566BE17B22404BD46F389030ec5592F8ffAde12" - + l2_pk_secret_name: "NETWORK5_PRIVATE_KEY_0X126" + bridge_addr: "0x27DAeD0badd500740762d1792F3277a7F3DAdd79" + steps: - name: Clone bridge repository run: git clone --recurse-submodules -j8 https://github.com/0xPolygonHermez/zkevm-bridge-service.git -b develop bridge diff --git a/.github/workflows/nightly-eth-bench.yml b/.github/workflows/nightly-eth-bench.yml index 670880aa2ef..708608e4b16 100644 --- a/.github/workflows/nightly-eth-bench.yml +++ b/.github/workflows/nightly-eth-bench.yml @@ -11,13 +11,13 @@ jobs: strategy: matrix: include: - - profile_name: "network5" + - profile_name: "network8" l1_ep: "https://rpc.eu-central-1.gateway.fm/v4/ethereum/non-archival/sepolia" - l2_ep: "http://34.175.214.161:8500" - bridge_ep: "http://34.175.214.161:8085" + l2_ep: "http://34.175.214.161:18124" + bridge_ep: "http://34.175.214.161:18080" l1_pk_secret_name: "L1_SEPOLIA_FUNDED_PRIVATE_KEY" l2_pk_secret_name: "NETWORK5_PRIVATE_KEY_0X126" - bridge_addr: "0xb566BE17B22404BD46F389030ec5592F8ffAde12" + bridge_addr: "0x27DAeD0badd500740762d1792F3277a7F3DAdd79" steps: - name: Checkout current repository @@ -70,7 +70,7 @@ jobs: --env PROFILE=${PROFILE} \ --workdir /app \ eth-bench-temp-${{ matrix.profile_name}} \ - python bench.py -p "$PROFILE" -c 5 -t 12 --all --recover + python bench.py -p "$PROFILE" -c 5 -t 12 --confirmed --allconfirmed --unconfirmed --erc20 --uniswap --precompileds --pairings --eventminter --recover docker run --rm \ --volume "$(pwd):/app" \ diff --git a/.github/workflows/nightly-l1-recovery.yml b/.github/workflows/nightly-l1-recovery.yml index b521ad14373..f3a500305f5 100644 --- a/.github/workflows/nightly-l1-recovery.yml +++ b/.github/workflows/nightly-l1-recovery.yml @@ -21,6 +21,6 @@ jobs: - name: Set up QEMU uses: docker/setup-qemu-action@v2 - - name: Run Docker Compose + - name: Run Network 8 Recovery working-directory: ./zk/tests/nightly-l1-recovery - run: docker compose build --quiet && docker compose up --exit-code-from=block-checker + run: docker compose -f docker-compose-8.yml build --quiet && docker compose -f docker-compose-8.yml up --exit-code-from=block-checker diff --git a/.github/workflows/nightly-node-compare.yml b/.github/workflows/nightly-node-compare.yml index b0c45a21754..a83e4a609c8 100644 --- a/.github/workflows/nightly-node-compare.yml +++ b/.github/workflows/nightly-node-compare.yml @@ -12,10 +12,10 @@ jobs: strategy: matrix: nodes: - - name: "Integration 5" - erigon: "http://34.175.214.161:8500" - zkevm: "http://34.175.214.161:8505" - sequencer: "http://34.175.214.161:8005" + - name: "Integration 8" + erigon: "http://34.175.214.161:18124" + zkevm: "http://34.175.214.161:18505" + sequencer: "http://34.175.214.161:18123" compare-blocks: 1000 allowed-block-diff: 2000 diff --git a/.github/workflows/nightly-rpc-batch-compare.yml b/.github/workflows/nightly-rpc-batch-compare.yml index e79cc817e01..d1192a3cef1 100644 --- a/.github/workflows/nightly-rpc-batch-compare.yml +++ b/.github/workflows/nightly-rpc-batch-compare.yml @@ -16,9 +16,9 @@ jobs: - name: bali legacy: "https://rpc.zkevm-internal.com" erigon: "https://rpc.internal.zkevm-rpc.com" - - name: integration5 - legacy: "http://34.175.214.161:8505" - erigon: "http://34.175.214.161:8500" + - name: integration8 + legacy: "http://34.175.214.161:18505" + erigon: "http://34.175.214.161:18124" steps: - name: Checkout repository diff --git a/.gitignore b/.gitignore index fccb0616825..8b2987eaec7 100644 --- a/.gitignore +++ b/.gitignore @@ -48,6 +48,9 @@ profile.cov # VS Code .vscode +# PYTHON ENV +.python-env + # dashboard /dashboard/assets/flow-typed /dashboard/assets/node_modules diff --git a/README.md b/README.md index ff0869dde7e..321e678be54 100644 --- a/README.md +++ b/README.md @@ -4,16 +4,22 @@ cdk-erigon is a fork of Erigon, currently in Alpha, optimized for syncing with t *** ## Release Roadmap -- **v0.9.x**: Support for Cardona testnet -- **v1.x.x**: Support for Mainnet +- **v1.1.x**: RPC (full support) +- **v2.x.x**: Sequencer (full support) - **v3.x.x**: Erigon 3 based (snapshot support) *** +## Hardware requirements + +* A Linux-based OS (e.g., Ubuntu Server 22.04 LTS). +* At least 32GB RAM with a 4-core CPU. +* Both Apple Silicon and AMD64 are supported. + ## Chain/Fork Support Current status of cdk-erigon's support for running various chains and fork ids: -- zkEVM Cardona testnet — beta support +- zkEVM Cardona testnet — full support - zkEVM mainnet — beta support - CDK Chains - beta support (forkid.9 and above) @@ -58,6 +64,16 @@ In order to retrieve data from the L1, the L1 syncer must be configured to know - `zkevm.l1-highest-block-type` which defaults to retrieving the 'finalized' block, however there are cases where you may wish to pass 'safe' or 'latest'. +### L1 Cache +The node can cache the L1 requests/responses to speed up the sync and enable quicker responses to RPC requests requiring for example OldAccInputHash from the L1. This is enabled by default, +but can be controlled via the following flags: + +- `zkevm.l1-cache-enabled` - defaults to true, set to false to disable the cache +- `zkevm.l1-cache-port` - the port the cache server will run on, defaults to 6969 + +To transplant the cache between datadirs, the `l1cache` dir can be copied. To use an upstream cdk-erigon node's L1 cache, the zkevm.l1-cache-enabled can be set to false, and the node provided the endpoint of the cache, +instead of a regular L1 URL. e.g. `zkevm.l1-rpc-url=http://myerigonnode:6969?endpoint=http%3A%2F%2Fsepolia-rpc.com&chainid=2440`. NB: this node must be syncing the same network for any benefit! + ## Sequencer (WIP) Enable Sequencer: `CDK_ERIGON_SEQUENCER=1 ./build/bin/cdk-erigon ` diff --git a/cmd/utils/customflags_zkevm.go b/cmd/utils/customflags_zkevm.go new file mode 100644 index 00000000000..b56db836aa8 --- /dev/null +++ b/cmd/utils/customflags_zkevm.go @@ -0,0 +1,87 @@ +package utils + +import ( + "flag" + "fmt" + + "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon/cmd/utils/flags" + "github.com/urfave/cli/v2" +) + +// Custom cli.Flag type which expand the received string to an absolute path. +// e.g. ~/.ethereum -> /home/username/.ethereum +type DatasizeFlag struct { + Name string + + Category string + DefaultText string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value datasizeFlagValue + + Aliases []string +} + +func (f *DatasizeFlag) Names() []string { return append([]string{f.Name}, f.Aliases...) } +func (f *DatasizeFlag) IsSet() bool { return f.HasBeenSet } +func (f *DatasizeFlag) String() string { return cli.FlagStringer(f) } + +// called by cli library, grabs variable from environment (if in env) +// and adds variable to flag set for parsing. +func (f *DatasizeFlag) Apply(set *flag.FlagSet) error { + flags.EachName(f, func(name string) { + set.Var(&f.Value, f.Name, f.Usage) + }) + return nil +} + +func (f *DatasizeFlag) IsRequired() bool { return f.Required } + +func (f *DatasizeFlag) IsVisible() bool { return !f.Hidden } + +func (f *DatasizeFlag) GetCategory() string { return f.Category } + +func (f *DatasizeFlag) TakesValue() bool { return true } +func (f *DatasizeFlag) GetUsage() string { return f.Usage } +func (f *DatasizeFlag) GetValue() string { return f.Value.String() } +func (f *DatasizeFlag) GetEnvVars() []string { return nil } // env not supported + +func (f *DatasizeFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + return f.GetValue() +} + +type datasizeFlagValue datasize.ByteSize + +func (b *datasizeFlagValue) String() string { + if b == nil { + return "" + } + a := datasize.ByteSize(*b) + return a.String() +} + +func (b *datasizeFlagValue) Set(s string) error { + val, err := datasize.ParseString(s) + if err != nil { + return fmt.Errorf("parse datasize: %v", err) + } + *b = datasizeFlagValue(val) + return nil +} + +// DatasizeFlagValue returns the value of a DatasizeFlag from the flag set. +func DatasizeFlagValue(ctx *cli.Context, name string) *datasize.ByteSize { + val := ctx.Generic(name) + if val == nil { + return nil + } + return (*datasize.ByteSize)(val.(*datasizeFlagValue)) +} diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index ed199147df1..16aaf56413e 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -430,6 +430,16 @@ var ( Usage: "Ethereum L1 RPC endpoint", Value: "", } + L1CacheEnabledFlag = cli.BoolFlag{ + Name: "zkevm.l1-cache-enabled", + Usage: "Enable the L1 cache", + Value: true, + } + L1CachePortFlag = cli.UintFlag{ + Name: "zkevm.l1-cache-port", + Usage: "The port used for the L1 cache", + Value: 6969, + } AddressSequencerFlag = cli.StringFlag{ Name: "zkevm.address-sequencer", Usage: "Sequencer address", @@ -516,6 +526,11 @@ var ( Usage: "Batch seal time. Defaults to 3s", Value: "3s", } + SequencerHaltOnBatchNumber = cli.Uint64Flag{ + Name: "zkevm.sequencer-halt-on-batch-number", + Usage: "Halt the sequencer on this batch number", + Value: 0, + } ExecutorUrls = cli.StringFlag{ Name: "zkevm.executor-urls", Usage: "A comma separated list of grpc addresses that host executors", @@ -536,6 +551,12 @@ var ( Usage: "The timeout for the executor request", Value: 500 * time.Millisecond, } + + WitnessMemdbSize = DatasizeFlag{ + Name: "zkevm.witness-memdb-size", + Usage: "A size of the memdb used on witness generation in format \"2GB\". Might fail generation for older batches if not enough for the unwind.", + Value: datasizeFlagValue(2 * datasize.GB), + } ExecutorMaxConcurrentRequests = cli.IntFlag{ Name: "zkevm.executor-max-concurrent-requests", Usage: "The maximum number of concurrent requests to the executor", @@ -1309,6 +1330,11 @@ var ( Usage: "Enable speed test", Value: false, } + YieldSizeFlag = cli.Uint64Flag{ + Name: "yieldsize", + Usage: "transaction count fetched from txpool each time", + Value: 1000, + } ) var MetricFlags = []cli.Flag{&MetricsEnabledFlag, &MetricsHTTPFlag, &MetricsPortFlag, &DiagDisabledFlag, &DiagEndpointAddrFlag, &DiagEndpointPortFlag, &DiagSpeedTestFlag} @@ -2091,6 +2117,7 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C setTxPool(ctx, cfg) cfg.TxPool = ethconfig.DefaultTxPool2Config(cfg) cfg.TxPool.DBDir = nodeConfig.Dirs.TxPool + cfg.YieldSize = ctx.Uint64(YieldSizeFlag.Name) setEthash(ctx, nodeConfig.Dirs.DataDir, cfg) setClique(ctx, &cfg.Clique, nodeConfig.Dirs.DataDir) diff --git a/cmd/utils/flags/flags.go b/cmd/utils/flags/flags.go index be0a8a396b7..3dc48d116f9 100644 --- a/cmd/utils/flags/flags.go +++ b/cmd/utils/flags/flags.go @@ -85,7 +85,7 @@ func (f *DirectoryFlag) String() string { return cli.FlagStringer(f) } // Apply called by cli library, grabs variable from environment (if in env) // and adds variable to flag set for parsing. func (f *DirectoryFlag) Apply(set *flag.FlagSet) error { - eachName(f, func(name string) { + EachName(f, func(name string) { set.Var(&f.Value, f.Name, f.Usage) }) return nil @@ -171,7 +171,7 @@ func (f *TextMarshalerFlag) IsSet() bool { return f.HasBeenSet } func (f *TextMarshalerFlag) String() string { return cli.FlagStringer(f) } func (f *TextMarshalerFlag) Apply(set *flag.FlagSet) error { - eachName(f, func(name string) { + EachName(f, func(name string) { set.Var(textMarshalerVal{f.Value}, f.Name, f.Usage) }) return nil @@ -252,7 +252,7 @@ func (f *BigFlag) IsSet() bool { return f.HasBeenSet } func (f *BigFlag) String() string { return cli.FlagStringer(f) } func (f *BigFlag) Apply(set *flag.FlagSet) error { - eachName(f, func(name string) { + EachName(f, func(name string) { f.Value = new(big.Int) set.Var((*bigValue)(f.Value), f.Name, f.Usage) }) @@ -346,7 +346,7 @@ func HomeDir() string { return "" } -func eachName(f cli.Flag, fn func(string)) { +func EachName(f cli.Flag, fn func(string)) { for _, name := range f.Names() { name = strings.Trim(name, " ") fn(name) diff --git a/core/state/intra_block_state_zkevm.go b/core/state/intra_block_state_zkevm.go index c37575eb8d0..dc63d421908 100644 --- a/core/state/intra_block_state_zkevm.go +++ b/core/state/intra_block_state_zkevm.go @@ -152,14 +152,14 @@ func (sdb *IntraBlockState) scalableSetBlockHash(blockNum uint64, blockHash *lib sdb.SetState(ADDRESS_SCALABLE_L2, &mkh, *hashAsBigU) } -func (sdb *IntraBlockState) GetBlockStateRoot(blockNum uint64) libcommon.Hash { - d1 := common.LeftPadBytes(uint256.NewInt(blockNum).Bytes(), 32) +func (sdb *IntraBlockState) GetBlockStateRoot(blockNum *uint256.Int) *uint256.Int { + d1 := common.LeftPadBytes(blockNum.Bytes(), 32) d2 := common.LeftPadBytes(STATE_ROOT_STORAGE_POS.Bytes(), 32) mapKey := keccak256.Hash(d1, d2) mkh := libcommon.BytesToHash(mapKey) hash := uint256.NewInt(0) sdb.GetState(ADDRESS_SCALABLE_L2, &mkh, hash) - return libcommon.BytesToHash(hash.Bytes()) + return hash } func (sdb *IntraBlockState) ScalableSetSmtRootHash(roHermezDb ReadOnlyHermezDb) error { diff --git a/core/vm/evmtypes/evmtypes.go b/core/vm/evmtypes/evmtypes.go index 88037257aec..9e05f0d86d9 100644 --- a/core/vm/evmtypes/evmtypes.go +++ b/core/vm/evmtypes/evmtypes.go @@ -116,6 +116,6 @@ type IntraBlockState interface { AddLog(*types.Log) AddLog_zkEvm(*types.Log) GetLogs(hash common.Hash) []*types.Log - GetBlockStateRoot(blockNum uint64) common.Hash + GetBlockStateRoot(blockNum *uint256.Int) *uint256.Int GetBlockNumber() *uint256.Int } diff --git a/core/vm/instructions_zkevm.go b/core/vm/instructions_zkevm.go index dae26a2b685..ca24b40ec79 100644 --- a/core/vm/instructions_zkevm.go +++ b/core/vm/instructions_zkevm.go @@ -60,16 +60,9 @@ func opExtCodeHash_zkevm(pc *uint64, interpreter *EVMInterpreter, scope *ScopeCo func opBlockhash_zkevm(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { num := scope.Stack.Peek() - num64, overflow := num.Uint64WithOverflow() - if overflow { - num.Clear() - return nil, nil - } ibs := interpreter.evm.IntraBlockState() - hash := ibs.GetBlockStateRoot(num64) - - num.SetFromBig(hash.Big()) + num.Set(ibs.GetBlockStateRoot(num)) return nil, nil } diff --git a/core/vm/instructions_zkevm_test.go b/core/vm/instructions_zkevm_test.go index 3f28ade4240..33ee754574e 100644 --- a/core/vm/instructions_zkevm_test.go +++ b/core/vm/instructions_zkevm_test.go @@ -144,8 +144,8 @@ func (ibs TestIntraBlockState) GetLogs(hash libcommon.Hash) []*types.Log { panic("implement me") } -func (ibs TestIntraBlockState) GetBlockStateRoot(blockNum uint64) libcommon.Hash { - return libcommon.BigToHash(new(big.Int).SetUint64(blockNum)) +func (ibs TestIntraBlockState) GetBlockStateRoot(blockNum *uint256.Int) *uint256.Int { + return uint256.NewInt(0).Set(blockNum) } func (ibs TestIntraBlockState) GetBlockNumber() *uint256.Int { diff --git a/core/vm/interpreter_zkevm.go b/core/vm/interpreter_zkevm.go index a8a321eb638..e297df5b16e 100644 --- a/core/vm/interpreter_zkevm.go +++ b/core/vm/interpreter_zkevm.go @@ -44,6 +44,39 @@ func getJumpTable(cr *chain.Rules) *JumpTable { return jt } +func shouldExecuteLastOpCode(op OpCode) bool { + switch op { + case BLOCKHASH: + fallthrough + case CODESIZE: + fallthrough + case EXTCODESIZE: + fallthrough + case EXTCODECOPY: + fallthrough + case EXTCODEHASH: + fallthrough + case SELFBALANCE: + fallthrough + case BALANCE: + fallthrough + case CREATE: + fallthrough + case RETURN: + fallthrough + case CREATE2: + fallthrough + case SENDALL: + fallthrough + case SLOAD: + fallthrough + case SSTORE: + return true + } + + return false +} + // NewZKEVMInterpreter returns a new instance of the Interpreter. func NewZKEVMInterpreter(evm *EVM, cfg ZkConfig) *EVMInterpreter { jt := getJumpTable(evm.ChainRules()) @@ -138,10 +171,45 @@ func (in *EVMInterpreter) RunZk(contract *Contract, input []byte, readOnly bool) return } - // execute the operation in case of SLOAD | SSTORE + /* + The code below this line is executed in case of an error => it is reverted. + The single side-effect of this execution (which is reverted anyway) is accounts that are "touched", because "touches" are not reverted and they are needed during witness generation. + */ + + /* + Zkevm detects errors during execution of an opcode. + Cdk-erigon detects some errors (listed below) before execution of an opcode. + => zkevm may execute (partially) 1 additinal opcode compared to cdk-erigon because cdk-erigon detects the errors before trying to execute an opcode. + + In terms of execution - everything is fine because there is an error and everything will be reverted. + In terms of "touched" accounts - there could be some accounts that are not "touched" because the 1 additional opcode is not execute (even partially). + => The additional opcode execution (even partially) could touch more accounts than cdk-erigon + + That's why we must execute the last opcode in order to mimic the zkevm logic. + During this execution (that will be reverted anyway) we may detect panic but instead of stopping the node just ignore the panic. + By ignoring the panic we ensure that we've execute as much as possible of the additional 1 opcode. + */ + + // execute the operation in case of a list of opcodes + executeBecauseOfSpecificOpCodes := shouldExecuteLastOpCode(op) + + // execute the operation in case of early error detection + // _, errorIsUnderflow := err.(*ErrStackUnderflow) + // _, errorIsOverflow := err.(*ErrStackOverflow) + // executeBecauseOfEarlyErrorDetection := errors.Is(err, ErrOutOfGas) || errors.Is(err, ErrGasUintOverflow) || errorIsUnderflow || errorIsOverflow + + // uncommend the live above in order to enable execution based on error types in addition to opcode list + executeBecauseOfEarlyErrorDetection := false + // the actual result of this operation does not matter because it will be reverted anyway, because err != nil // we implement it this way in order execution to be identical to tracing - if op == SLOAD || op == SSTORE { + if executeBecauseOfSpecificOpCodes || executeBecauseOfEarlyErrorDetection { + defer func() { + // the goal if this recover is to catch a panic that could have happen during the execution of "in.jt[op].execute" below + // by ignoring the panic we are effectively executing as much as possible instructions of the last opcode before the error + recover() + }() + // we can safely use pc here instead of pcCopy, // because pc and pcCopy can be different only if the main loop finishes normally without error // but is it finishes normally without error then "ret" != nil and the .execute below will never be invoked at all diff --git a/core/vm/zk_batch_counters.go b/core/vm/zk_batch_counters.go index 5640ef2c93a..49bd91261e8 100644 --- a/core/vm/zk_batch_counters.go +++ b/core/vm/zk_batch_counters.go @@ -18,12 +18,20 @@ type BatchCounterCollector struct { forkId uint16 unlimitedCounters bool addonCounters *Counters + + rlpCombinedCounters Counters + executionCombinedCounters Counters + processingCombinedCounters Counters + + rlpCombinedCountersCache Counters + executionCombinedCountersCache Counters + processingCombinedCountersCache Counters } func NewBatchCounterCollector(smtMaxLevel int, forkId uint16, mcpReduction float64, unlimitedCounters bool, addonCounters *Counters) *BatchCounterCollector { smtLevels := calculateSmtLevels(smtMaxLevel, 0, mcpReduction) smtLevelsForTransaction := calculateSmtLevels(smtMaxLevel, 32, mcpReduction) - return &BatchCounterCollector{ + bcc := BatchCounterCollector{ transactions: []*TransactionCounter{}, smtLevels: smtLevels, smtLevelsForTransaction: smtLevelsForTransaction, @@ -32,6 +40,12 @@ func NewBatchCounterCollector(smtMaxLevel int, forkId uint16, mcpReduction float unlimitedCounters: unlimitedCounters, addonCounters: addonCounters, } + + bcc.rlpCombinedCounters = bcc.NewCounters() + bcc.executionCombinedCounters = bcc.NewCounters() + bcc.processingCombinedCounters = bcc.NewCounters() + + return &bcc } func (bcc *BatchCounterCollector) Clone() *BatchCounterCollector { @@ -55,6 +69,10 @@ func (bcc *BatchCounterCollector) Clone() *BatchCounterCollector { blockCount: bcc.blockCount, forkId: bcc.forkId, unlimitedCounters: bcc.unlimitedCounters, + + rlpCombinedCounters: bcc.rlpCombinedCounters.Clone(), + executionCombinedCounters: bcc.executionCombinedCounters.Clone(), + processingCombinedCounters: bcc.processingCombinedCounters.Clone(), } } @@ -69,6 +87,7 @@ func (bcc *BatchCounterCollector) AddNewTransactionCounters(txCounters *Transact } bcc.transactions = append(bcc.transactions, txCounters) + bcc.UpdateRlpCountersCache(txCounters) return bcc.CheckForOverflow(false) //no need to calculate the merkle proof here } @@ -202,19 +221,10 @@ func (bcc *BatchCounterCollector) CombineCollectors(verifyMerkleProof bool) (Cou } } - for _, tx := range bcc.transactions { - for k, v := range tx.rlpCounters.counters { - combined[k].used += v.used - combined[k].remaining -= v.used - } - for k, v := range tx.executionCounters.counters { - combined[k].used += v.used - combined[k].remaining -= v.used - } - for k, v := range tx.processingCounters.counters { - combined[k].used += v.used - combined[k].remaining -= v.used - } + for k, _ := range combined { + val := bcc.rlpCombinedCounters[k].used + bcc.executionCombinedCounters[k].used + bcc.processingCombinedCounters[k].used + combined[k].used += val + combined[k].remaining -= val } return combined, nil @@ -260,3 +270,19 @@ func (bcc *BatchCounterCollector) CombineCollectorsNoChanges(verifyMerkleProof b return combined } + +func (bcc *BatchCounterCollector) UpdateRlpCountersCache(txCounters *TransactionCounter) { + for k, v := range txCounters.rlpCounters.counters { + bcc.rlpCombinedCounters[k].used += v.used + } +} + +func (bcc *BatchCounterCollector) UpdateExecutionAndProcessingCountersCache(txCounters *TransactionCounter) { + for k, v := range txCounters.executionCounters.counters { + bcc.executionCombinedCounters[k].used += v.used + } + + for k, v := range txCounters.processingCounters.counters { + bcc.processingCombinedCounters[k].used += v.used + } +} diff --git a/core/vm/zk_counters.go b/core/vm/zk_counters.go index d67db2497aa..953e0b109fd 100644 --- a/core/vm/zk_counters.go +++ b/core/vm/zk_counters.go @@ -134,6 +134,16 @@ func (c *Counters) GetPoseidonPaddings() *Counter { return (*c)[D] } +func (cc Counters) Clone() Counters { + var clonedCounters Counters = Counters{} + + for k, v := range cc { + clonedCounters[k] = v.Clone() + } + + return clonedCounters +} + type CounterKey string var ( @@ -154,6 +164,15 @@ type CounterCollector struct { transaction types.Transaction } +func (cc *CounterCollector) isOverflown() bool { + for _, v := range cc.counters { + if v.remaining < 0 { + return true + } + } + return false +} + func calculateSmtLevels(smtMaxLevel int, minValue int, mcpReduction float64) int { binary := big.NewInt(0) base := big.NewInt(2) @@ -223,9 +242,8 @@ func (cc *CounterCollector) SetTransaction(transaction types.Transaction) { func WrapJumpTableWithZkCounters(originalTable *JumpTable, counterCalls *[256]executionFunc) *JumpTable { wrapper := func(original, counter executionFunc) executionFunc { return func(p *uint64, i *EVMInterpreter, s *ScopeContext) ([]byte, error) { - b, err := counter(p, i, s) - if err != nil { - return b, err + if _, err := counter(p, i, s); err != nil { + return nil, err } return original(p, i, s) } @@ -821,6 +839,15 @@ func (cc *CounterCollector) preModExpLoop() { func (cc *CounterCollector) multiCall(call func(), times int) { for i := 0; i < times; i++ { + // if there is a case with a huge amount ot iterations + // it will overflow after several thousand iterations + // so we can just stop it early and not hang the node + // check each 1000 itearations so the overhead is not too much in the normal case + if i%1000 == 0 { + if cc.isOverflown() { + break + } + } call() } } diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation.go b/erigon-lib/kv/membatchwithdb/memory_mutation.go index 16b397736ae..9fac01e40ee 100644 --- a/erigon-lib/kv/membatchwithdb/memory_mutation.go +++ b/erigon-lib/kv/membatchwithdb/memory_mutation.go @@ -66,6 +66,26 @@ func NewMemoryBatch(tx kv.Tx, tmpDir string, logger log.Logger) *MemoryMutation } } +func NewMemoryBatchWithSize(tx kv.Tx, tmpDir string, mapSize datasize.ByteSize) *MemoryMutation { + tmpDB := mdbx.NewMDBX(log.New()).InMem(tmpDir).MapSize(mapSize).MustOpen() + memTx, err := tmpDB.BeginRw(context.Background()) + if err != nil { + panic(err) + } + if err := initSequences(tx, memTx); err != nil { + return nil + } + + return &MemoryMutation{ + db: tx, + memDb: tmpDB, + memTx: memTx, + deletedEntries: make(map[string]map[string]struct{}), + deletedDups: map[string]map[string]map[string]struct{}{}, + clearedTables: make(map[string]struct{}), + } +} + func NewMemoryBatchWithCustomDB(tx kv.Tx, db kv.RwDB, uTx kv.RwTx, tmpDir string) *MemoryMutation { return &MemoryMutation{ db: tx, diff --git a/eth/backend.go b/eth/backend.go index f81ad126e21..2ee665908a7 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -134,6 +134,9 @@ import ( "github.com/ledgerwatch/erigon/zk/txpool/txpooluitl" "github.com/ledgerwatch/erigon/zk/witness" "github.com/ledgerwatch/erigon/zkevm/etherman" + "github.com/ledgerwatch/erigon/zk/l1_cache" + "net/url" + "path" ) // Config contains the configuration options of the ETH protocol. @@ -216,6 +219,7 @@ type Ethereum struct { dataStream *datastreamer.StreamServer l1Syncer *syncer.L1Syncer etherManClients []*etherman.Client + l1Cache *l1_cache.L1Cache preStartTasks *PreStartTasks @@ -938,11 +942,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger defer tx.Rollback() // create buckets - if err := hermez_db.CreateHermezBuckets(tx); err != nil { - return nil, err - } - - if err := db.CreateEriDbBuckets(tx); err != nil { + if err := createBuckets(tx); err != nil { return nil, err } @@ -988,6 +988,23 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.chainConfig.SupportGasless = cfg.Gasless l1Urls := strings.Split(cfg.L1RpcUrl, ",") + + if cfg.Zk.L1CacheEnabled { + l1Cache, err := l1_cache.NewL1Cache(ctx, path.Join(stack.DataDir(), "l1cache"), cfg.Zk.L1CachePort) + if err != nil { + return nil, err + } + backend.l1Cache = l1Cache + + var cacheL1Urls []string + for _, l1Url := range l1Urls { + encoded := url.QueryEscape(l1Url) + cacheL1Url := fmt.Sprintf("http://localhost:%d?endpoint=%s&chainid=%d", cfg.Zk.L1CachePort, encoded, cfg.L2ChainId) + cacheL1Urls = append(cacheL1Urls, cacheL1Url) + } + l1Urls = cacheL1Urls + } + backend.etherManClients = make([]*etherman.Client, len(l1Urls)) for i, url := range l1Urls { backend.etherManClients[i] = newEtherMan(cfg, chainConfig.ChainName, url) @@ -1031,6 +1048,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger } seqVerSyncer := syncer.NewL1Syncer( + ctx, ethermanClients, seqAndVerifL1Contracts, seqAndVerifTopics, @@ -1040,6 +1058,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger ) backend.l1Syncer = syncer.NewL1Syncer( + ctx, ethermanClients, l1Contracts, l1Topics, @@ -1049,6 +1068,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger ) l1InfoTreeSyncer := syncer.NewL1Syncer( + ctx, ethermanClients, []libcommon.Address{cfg.AddressGerManager}, [][]libcommon.Hash{{contracts.UpdateL1InfoTreeTopic}}, @@ -1065,6 +1085,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.agg, backend.blockReader, backend.chainConfig, + backend.config.Zk, backend.engine, ) @@ -1101,7 +1122,14 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger // we switch context from being an RPC node to a sequencer backend.txPool2.ForceUpdateLatestBlock(executionProgress) + // we need to start the pool before stage loop itself + // the pool holds the info about how execution stage should work - as regular or as limbo recovery + if err := backend.txPool2.StartIfNotStarted(ctx, backend.txPool2DB, tx); err != nil { + return nil, err + } + l1BlockSyncer := syncer.NewL1Syncer( + ctx, ethermanClients, []libcommon.Address{cfg.AddressZkevm, cfg.AddressRollup}, [][]libcommon.Hash{{ @@ -1186,6 +1214,22 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger return backend, nil } +func createBuckets(tx kv.RwTx) error { + if err := hermez_db.CreateHermezBuckets(tx); err != nil { + return err + } + + if err := db.CreateEriDbBuckets(tx); err != nil { + return err + } + + if err := txpool.CreateTxPoolBuckets(tx); err != nil { + return err + } + + return nil +} + // creates an EtherMan instance with default parameters func newEtherMan(cfg *ethconfig.Config, l2ChainName, url string) *etherman.Client { ethmanConf := etherman.Config{ @@ -1335,7 +1379,7 @@ func (s *Ethereum) PreStart() error { // so here we loop and take a brief pause waiting for it to be ready attempts := 0 for { - _, err = zkStages.CatchupDatastream("stream-catchup", tx, s.dataStream, s.chainConfig.ChainID.Uint64(), s.config.DatastreamVersion, s.config.HasExecutors()) + _, err = zkStages.CatchupDatastream(s.sentryCtx, "stream-catchup", tx, s.dataStream, s.chainConfig.ChainID.Uint64(), s.config.DatastreamVersion, s.config.HasExecutors()) if err != nil { if errors.Is(err, datastreamer.ErrAtomicOpNotAllowed) { attempts++ diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index f444ee19964..31693d0d63d 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -216,6 +216,8 @@ type Config struct { DeprecatedTxPool DeprecatedTxPoolConfig TxPool txpoolcfg.Config + YieldSize uint64 + // Gas Price Oracle options GPO gaspricecfg.Config diff --git a/eth/ethconfig/config_zkevm.go b/eth/ethconfig/config_zkevm.go index edd6bbcaa23..0a47d08b6be 100644 --- a/eth/ethconfig/config_zkevm.go +++ b/eth/ethconfig/config_zkevm.go @@ -3,6 +3,7 @@ package ethconfig import ( "time" + "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon-lib/common" ) @@ -26,15 +27,19 @@ type Zk struct { L1HighestBlockType string L1MaticContractAddress common.Address L1FirstBlock uint64 + L1CacheEnabled bool + L1CachePort uint RpcRateLimits int DatastreamVersion int SequencerBlockSealTime time.Duration SequencerBatchSealTime time.Duration SequencerNonEmptyBatchSealTime time.Duration + SequencerHaltOnBatchNumber uint64 ExecutorUrls []string ExecutorStrictMode bool ExecutorRequestTimeout time.Duration DatastreamNewBlockTimeout time.Duration + WitnessMemdbSize datasize.ByteSize ExecutorMaxConcurrentRequests int Limbo bool AllowFreeTransactions bool diff --git a/eth/tracers/logger/json_stream_zkevm.go b/eth/tracers/logger/json_stream_zkevm.go index 23ab39734f5..d27f410be5c 100644 --- a/eth/tracers/logger/json_stream_zkevm.go +++ b/eth/tracers/logger/json_stream_zkevm.go @@ -161,12 +161,10 @@ func (l *JsonStreamLogger_ZkEvm) writeOpSnapshot(pc uint64, op vm.OpCode, gas, c } func (l *JsonStreamLogger_ZkEvm) writeError(err error) { - if err == nil { + if err != nil { l.stream.WriteMore() l.stream.WriteObjectField("error") - l.stream.WriteObjectStart() - l.stream.WriteObjectEnd() - //l.stream.WriteString(err.Error()) + l.stream.WriteString(err.Error()) } } @@ -222,6 +220,9 @@ func (l *JsonStreamLogger_ZkEvm) writeMemory(memory *vm.Memory) { } } + if len(filteredByteLines) == 0 { + return + } l.stream.WriteMore() l.stream.WriteObjectField("memory") l.stream.WriteArrayStart() diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go index da866627f3e..579a4ad6949 100644 --- a/eth/tracers/native/call.go +++ b/eth/tracers/native/call.go @@ -236,6 +236,10 @@ func (t *callTracer) CaptureEnter(typ vm.OpCode, from libcommon.Address, to libc } if value != nil { call.Value = value.ToBig() + } else { + if typ == vm.DELEGATECALL { + call.Value = t.callstack[len(t.callstack)-1].Value + } } t.callstack = append(t.callstack, call) } diff --git a/rpc/types_zkevm.go b/rpc/types_zkevm.go new file mode 100644 index 00000000000..f47cd91b164 --- /dev/null +++ b/rpc/types_zkevm.go @@ -0,0 +1,5 @@ +package rpc + +type RpcNumberArray struct { + Numbers []BlockNumber `json:"numbers"` +} diff --git a/smt/pkg/smt/smt.go b/smt/pkg/smt/smt.go index baee3f3ac31..0a2733b7103 100644 --- a/smt/pkg/smt/smt.go +++ b/smt/pkg/smt/smt.go @@ -294,7 +294,7 @@ func (s *SMT) insert(k utils.NodeKey, v utils.NodeValue8, newValH [4]uint64, old if newValH == [4]uint64{} { newValH, err = s.hashcalcAndSave(v.ToUintArray(), utils.BranchCapacity) } else { - newValH, err = s.hashSave(v.ToUintArray(), utils.BranchCapacity, newValH) + err = s.hashSave(v.ToUintArray(), utils.BranchCapacity, newValH) } if err != nil { return nil, err @@ -342,7 +342,7 @@ func (s *SMT) insert(k utils.NodeKey, v utils.NodeValue8, newValH [4]uint64, old if newValH == [4]uint64{} { newValH, err = s.hashcalcAndSave(v.ToUintArray(), utils.BranchCapacity) } else { - newValH, err = s.hashSave(v.ToUintArray(), utils.BranchCapacity, newValH) + err = s.hashSave(v.ToUintArray(), utils.BranchCapacity, newValH) } if err != nil { @@ -407,7 +407,7 @@ func (s *SMT) insert(k utils.NodeKey, v utils.NodeValue8, newValH [4]uint64, old if newValH == [4]uint64{} { newValH, err = s.hashcalcAndSave(v.ToUintArray(), utils.BranchCapacity) } else { - newValH, err = s.hashSave(v.ToUintArray(), utils.BranchCapacity, newValH) + err = s.hashSave(v.ToUintArray(), utils.BranchCapacity, newValH) } if err != nil { return nil, err @@ -537,24 +537,27 @@ func (s *SMT) insert(k utils.NodeKey, v utils.NodeValue8, newValH [4]uint64, old return smtResponse, nil } -func (s *SMT) hashSave(in [8]uint64, capacity, h [4]uint64) ([4]uint64, error) { - if !s.noSaveOnInsert { - var sl []uint64 - sl = append(sl, in[:]...) - sl = append(sl, capacity[:]...) +func prepareHashValueForSave(in [8]uint64, capacity [4]uint64) utils.NodeValue12 { + var sl []uint64 + sl = append(sl, in[:]...) + sl = append(sl, capacity[:]...) - v := utils.NodeValue12{} - for i, val := range sl { - b := new(big.Int) - v[i] = b.SetUint64(val) - } + v := utils.NodeValue12{} + for i, val := range sl { + b := new(big.Int) + v[i] = b.SetUint64(val) + } - err := s.Db.Insert(h, v) - if err != nil { - return [4]uint64{}, err - } + return v +} + +func (s *SMT) hashSave(in [8]uint64, capacity, h [4]uint64) error { + if s.noSaveOnInsert { + return nil } - return h, nil + v := prepareHashValueForSave(in, capacity) + + return s.Db.Insert(h, v) } func (s *SMT) hashcalcAndSave(in [8]uint64, capacity [4]uint64) ([4]uint64, error) { @@ -563,11 +566,16 @@ func (s *SMT) hashcalcAndSave(in [8]uint64, capacity [4]uint64) ([4]uint64, erro return [4]uint64{}, err } - return s.hashSave(in, capacity, h) + return h, s.hashSave(in, capacity, h) } -func (s *SMT) hashcalc(in [8]uint64, capacity [4]uint64) ([4]uint64, error) { - return utils.Hash(in, capacity) +func hashCalcAndPrepareForSave(in [8]uint64, capacity [4]uint64) ([4]uint64, utils.NodeValue12, error) { + h, err := utils.Hash(in, capacity) + if err != nil { + return [4]uint64{}, utils.NodeValue12{}, err + } + + return h, prepareHashValueForSave(in, capacity), nil } func (s *RoSMT) getLastRoot() (utils.NodeKey, error) { diff --git a/smt/pkg/smt/smt_batch.go b/smt/pkg/smt/smt_batch.go index 33d69519f21..eb924c03a68 100644 --- a/smt/pkg/smt/smt_batch.go +++ b/smt/pkg/smt/smt_batch.go @@ -26,25 +26,21 @@ func (s *SMT) InsertBatch(ctx context.Context, logPrefix string, nodeKeys []*uti progressChan, stopProgressPrinter := zk.ProgressPrinterWithoutValues(fmt.Sprintf("[%s] SMT incremental progress", logPrefix), uint64(size)+preprocessStage+finalizeStage) defer stopProgressPrinter() - err = validateDataLengths(nodeKeys, nodeValues, &nodeValuesHashes) - if err != nil { + if err = validateDataLengths(nodeKeys, nodeValues, &nodeValuesHashes); err != nil { return nil, err } - err = removeDuplicateEntriesByKeys(&size, &nodeKeys, &nodeValues, &nodeValuesHashes) - if err != nil { + if err = removeDuplicateEntriesByKeys(&size, &nodeKeys, &nodeValues, &nodeValuesHashes); err != nil { return nil, err } - err = calculateNodeValueHashesIfMissing(s, nodeValues, &nodeValuesHashes) - if err != nil { + if err = calculateNodeValueHashesIfMissing(s, nodeValues, &nodeValuesHashes); err != nil { return nil, err } progressChan <- uint64(preprocessStage) - err = calculateRootNodeHashIfNil(s, &rootNodeHash) - if err != nil { + if err = calculateRootNodeHashIfNil(s, &rootNodeHash); err != nil { return nil, err } @@ -162,7 +158,7 @@ func (s *SMT) InsertBatch(ctx context.Context, logPrefix string, nodeKeys []*uti } for i, nodeValue := range nodeValues { if !nodeValue.IsZero() { - _, err = s.hashSave(nodeValue.ToUintArray(), utils.BranchCapacity, *nodeValuesHashes[i]) + err = s.hashSave(nodeValue.ToUintArray(), utils.BranchCapacity, *nodeValuesHashes[i]) if err != nil { return nil, err } @@ -172,7 +168,9 @@ func (s *SMT) InsertBatch(ctx context.Context, logPrefix string, nodeKeys []*uti if smtBatchNodeRoot == nil { rootNodeHash = &utils.NodeKey{0, 0, 0, 0} } else { - calculateAndSaveHashesDfs(s, smtBatchNodeRoot, make([]int, 256), 0) + if err := calculateAndSaveHashesDfs(s, smtBatchNodeRoot, make([]int, 256), 0); err != nil { + return nil, fmt.Errorf("calculating and saving hashes dfs: %w", err) + } rootNodeHash = (*utils.NodeKey)(smtBatchNodeRoot.hash) } if err := s.setLastRoot(*rootNodeHash); err != nil { @@ -256,7 +254,7 @@ func calculateNodeValueHashesIfMissing(s *SMT, nodeValues []*utils.NodeValue8, n if endIndex > size { endIndex = size } - err := calculateNodeValueHashesIfMissingInInterval(s, nodeValues, nodeValuesHashes, startIndex, endIndex) + err := calculateNodeValueHashesIfMissingInInterval(nodeValues, nodeValuesHashes, startIndex, endIndex) if err != nil { globalError = err } @@ -265,19 +263,19 @@ func calculateNodeValueHashesIfMissing(s *SMT, nodeValues []*utils.NodeValue8, n wg.Wait() } else { - globalError = calculateNodeValueHashesIfMissingInInterval(s, nodeValues, nodeValuesHashes, 0, len(nodeValues)) + globalError = calculateNodeValueHashesIfMissingInInterval(nodeValues, nodeValuesHashes, 0, len(nodeValues)) } return globalError } -func calculateNodeValueHashesIfMissingInInterval(s *SMT, nodeValues []*utils.NodeValue8, nodeValuesHashes *[]*[4]uint64, startIndex, endIndex int) error { +func calculateNodeValueHashesIfMissingInInterval(nodeValues []*utils.NodeValue8, nodeValuesHashes *[]*[4]uint64, startIndex, endIndex int) error { for i := startIndex; i < endIndex; i++ { if (*nodeValuesHashes)[i] != nil { continue } - nodeValueHashObj, err := s.hashcalc(nodeValues[i].ToUintArray(), utils.BranchCapacity) + nodeValueHashObj, err := utils.Hash(nodeValues[i].ToUintArray(), utils.BranchCapacity) if err != nil { return err } @@ -386,13 +384,16 @@ func calculateAndSaveHashesDfs(s *SMT, smtBatchNode *smtBatchNode, path []int, l if smtBatchNode.isLeaf() { hashObj, err := s.hashcalcAndSave(utils.ConcatArrays4(*smtBatchNode.nodeLeftHashOrRemainingKey, *smtBatchNode.nodeRightHashOrValueHash), utils.LeafCapacity) if err != nil { - return err + return fmt.Errorf("hashing leaf: %w", err) } + smtBatchNode.hash = &hashObj + nodeKey := utils.JoinKey(path[:level], *smtBatchNode.nodeLeftHashOrRemainingKey) - s.Db.InsertHashKey(hashObj, *nodeKey) + if err := s.Db.InsertHashKey(hashObj, *nodeKey); err != nil { + return fmt.Errorf("inserting hash key: %w", err) + } - smtBatchNode.hash = &hashObj return nil } @@ -400,18 +401,30 @@ func calculateAndSaveHashesDfs(s *SMT, smtBatchNode *smtBatchNode, path []int, l if smtBatchNode.leftNode != nil { path[level] = 0 - calculateAndSaveHashesDfs(s, smtBatchNode.leftNode, path, level+1) - totalHash.SetHalfValue(*smtBatchNode.leftNode.hash, 0) + if err := calculateAndSaveHashesDfs(s, smtBatchNode.leftNode, path, level+1); err != nil { + return err + } + if err := totalHash.SetHalfValue(*smtBatchNode.leftNode.hash, 0); err != nil { + return err + } } else { - totalHash.SetHalfValue(*smtBatchNode.nodeLeftHashOrRemainingKey, 0) + if err := totalHash.SetHalfValue(*smtBatchNode.nodeLeftHashOrRemainingKey, 0); err != nil { + return err + } } if smtBatchNode.rightNode != nil { path[level] = 1 - calculateAndSaveHashesDfs(s, smtBatchNode.rightNode, path, level+1) - totalHash.SetHalfValue(*smtBatchNode.rightNode.hash, 1) + if err := calculateAndSaveHashesDfs(s, smtBatchNode.rightNode, path, level+1); err != nil { + return err + } + if err := totalHash.SetHalfValue(*smtBatchNode.rightNode.hash, 1); err != nil { + return err + } } else { - totalHash.SetHalfValue(*smtBatchNode.nodeRightHashOrValueHash, 1) + if err := totalHash.SetHalfValue(*smtBatchNode.nodeRightHashOrValueHash, 1); err != nil { + return err + } } hashObj, err := s.hashcalcAndSave(totalHash.ToUintArray(), utils.BranchCapacity) diff --git a/smt/pkg/smt/smt_batch_test.go b/smt/pkg/smt/smt_batch_test.go index 15de3499ecc..cb4a84790af 100644 --- a/smt/pkg/smt/smt_batch_test.go +++ b/smt/pkg/smt/smt_batch_test.go @@ -565,7 +565,7 @@ func TestCompareAllTreesInsertTimesAndFinalHashesUsingInMemoryDb(t *testing.T) { func compareAllTreesInsertTimesAndFinalHashes(t *testing.T, smtIncremental, smtBulk, smtBatch *smt.SMT) { batchInsertDataHolders, totalInserts := prepareData() - + ctx := context.Background() var incrementalError error accChanges := make(map[libcommon.Address]*accounts.Account) @@ -601,7 +601,7 @@ func compareAllTreesInsertTimesAndFinalHashes(t *testing.T, smtIncremental, smtB t.Logf("Incremental insert %d values in %v\n", totalInserts, time.Since(startTime)) startTime = time.Now() - keyPointers, valuePointers, err := smtBatch.SetStorage(context.Background(), "", accChanges, codeChanges, storageChanges) + keyPointers, valuePointers, err := smtBatch.SetStorage(ctx, "", accChanges, codeChanges, storageChanges) assert.NilError(t, err) t.Logf("Batch insert %d values in %v\n", totalInserts, time.Since(startTime)) @@ -614,7 +614,7 @@ func compareAllTreesInsertTimesAndFinalHashes(t *testing.T, smtIncremental, smtB } } startTime = time.Now() - smtBulk.GenerateFromKVBulk("", keys) + smtBulk.GenerateFromKVBulk(ctx, "", keys) t.Logf("Bulk insert %d values in %v\n", totalInserts, time.Since(startTime)) smtIncrementalRootHash, _ := smtIncremental.Db.GetLastRoot() diff --git a/smt/pkg/smt/smt_create.go b/smt/pkg/smt/smt_create.go index f8026f66a24..f204a9df29d 100644 --- a/smt/pkg/smt/smt_create.go +++ b/smt/pkg/smt/smt_create.go @@ -1,7 +1,9 @@ package smt import ( + "context" "fmt" + "sync" "time" "github.com/ledgerwatch/erigon/smt/pkg/utils" @@ -28,21 +30,12 @@ import ( // this makes it so the left part of a node can be deleted once it's right part is inserted // this is because the left part is at its final spot // when deleting nodes, go down to the leaf and create and save hashes in the SMT -func (s *SMT) GenerateFromKVBulk(logPrefix string, nodeKeys []utils.NodeKey) ([4]uint64, error) { +func (s *SMT) GenerateFromKVBulk(ctx context.Context, logPrefix string, nodeKeys []utils.NodeKey) ([4]uint64, error) { s.clearUpMutex.Lock() defer s.clearUpMutex.Unlock() log.Info(fmt.Sprintf("[%s] Building temp binary tree started", logPrefix)) - // get nodeKeys and sort them bitwise - // nodeKeys := []utils.NodeKey{} - // for k := range kvMap { - // v := kvMap[k] - // if v.IsZero() { - // continue - // } - // nodeKeys = append(nodeKeys, k) - // } totalKeysCount := len(nodeKeys) log.Info(fmt.Sprintf("[%s] Total values to insert: %d", logPrefix, totalKeysCount)) @@ -71,10 +64,29 @@ func (s *SMT) GenerateFromKVBulk(logPrefix string, nodeKeys []utils.NodeKey) ([4 maxReachedLevel := 0 + deletesWorker := utils.NewWorker(ctx, "smt_save_finished", 1000) + + // start a worker to delete finished parts of the tree and return values to save to the db + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + deletesWorker.DoWork() + wg.Done() + }() + tempTreeBuildStart := time.Now() + leafValueMap := sync.Map{} + + var err error for _, k := range nodeKeys { // split the key keys := k.GetPath() + v, err := s.Db.GetAccountValue(k) + if err != nil { + return [4]uint64{}, err + } + leafValueMap.Store(k, v) + // find last node siblings, level := rootNode.findLastNode(keys) @@ -139,24 +151,26 @@ func (s *SMT) GenerateFromKVBulk(logPrefix string, nodeKeys []utils.NodeKey) ([4 rKey: keys[level+level2+1:], } + nodeToDelFrom := siblings[len(siblings)-1] + pathToDeleteFrom := make([]int, level+level2+1, 256) + copy(pathToDeleteFrom, keys[:level+level2]) + pathToDeleteFrom[level+level2] = 0 + + jobResult := utils.NewCalcAndPrepareJobResult(s.Db) //hash, save and delete left leaf - deleteFunc := func() error { - nodeToDelFrom := siblings[len(siblings)-1] - pathToDeleteFrom := make([]int, level+level2+1) - copy(pathToDeleteFrom, keys[:level+level2]) - pathToDeleteFrom[level+level2] = 0 - _, leftHash, err := nodeToDelFrom.node0.deleteTree(pathToDeleteFrom, s) + deleteFunc := func() utils.JobResult { + leftHash, err := nodeToDelFrom.node0.deleteTreeNoSave(pathToDeleteFrom, &leafValueMap, jobResult.KvMap, jobResult.LeafsKvMap) if err != nil { - return err + jobResult.Err = err + return jobResult } nodeToDelFrom.leftHash = leftHash nodeToDelFrom.node0 = nil - return nil + return jobResult } - deleteFunc() - // deletesQueue.AddJob(utils.Job{Action: deleteFunc}) + deletesWorker.AddJob(deleteFunc) if maxReachedLevel < level+level2+1 { maxReachedLevel = level + level2 + 1 @@ -204,22 +218,28 @@ func (s *SMT) GenerateFromKVBulk(logPrefix string, nodeKeys []utils.NodeKey) ([4 //hash, save and delete left leaf if upperNode.node0 != nil { - deleteFunc := func() error { - nodeToDelFrom := upperNode - pathToDeleteFrom := make([]int, level+1) - copy(pathToDeleteFrom, keys[:level]) - pathToDeleteFrom[level] = 0 - _, leftHash, err := nodeToDelFrom.node0.deleteTree(pathToDeleteFrom, s) + nodeToDelFrom := upperNode + pathToDeleteFrom := make([]int, level+1, 256) + copy(pathToDeleteFrom, keys[:level]) + pathToDeleteFrom[level] = 0 + + jobResult := utils.NewCalcAndPrepareJobResult(s.Db) + + // get all leaf keys so we can then get all needed values and pass them + // this is needed because w can't read from the db in another routine + deleteFunc := func() utils.JobResult { + leftHash, err := nodeToDelFrom.node0.deleteTreeNoSave(pathToDeleteFrom, &leafValueMap, jobResult.KvMap, jobResult.LeafsKvMap) + if err != nil { - return err + jobResult.Err = err + return jobResult } nodeToDelFrom.leftHash = leftHash nodeToDelFrom.node0 = nil - return nil + return jobResult } - deleteFunc() - // deletesQueue.AddJob(utils.Job{Action: deleteFunc}) + deletesWorker.AddJob(deleteFunc) } } @@ -228,9 +248,21 @@ func (s *SMT) GenerateFromKVBulk(logPrefix string, nodeKeys []utils.NodeKey) ([4 } } + if err := runSaveLoop(deletesWorker.GetJobResultsChannel()); err != nil { + return [4]uint64{}, err + } + insertedKeysCount++ progressChan <- uint64(totalKeysCount) + insertedKeysCount } + deletesWorker.Stop() + + wg.Wait() + + // wait and save all jobs + if err := runSaveLoop(deletesWorker.GetJobResultsChannel()); err != nil { + return [4]uint64{}, err + } s.updateDepth(maxReachedLevel) @@ -263,7 +295,7 @@ func (s *SMT) GenerateFromKVBulk(logPrefix string, nodeKeys []utils.NodeKey) ([4 rootNode.rKey = newRkey } - _, finalRoot, err := rootNode.deleteTree(pathToDeleteFrom, s) + finalRoot, err := rootNode.deleteTree(pathToDeleteFrom, s, &leafValueMap) if err != nil { return [4]uint64{}, err } @@ -275,6 +307,23 @@ func (s *SMT) GenerateFromKVBulk(logPrefix string, nodeKeys []utils.NodeKey) ([4 return finalRoot, nil } +func runSaveLoop(jobResultsChannel chan utils.JobResult) error { + for { + select { + case result := <-jobResultsChannel: + if result.GetError() != nil { + return result.GetError() + } + + if err := result.Save(); err != nil { + return err + } + default: + return nil + } + } +} + type SmtNode struct { rKey []int leftHash [4]uint64 @@ -320,84 +369,95 @@ func (n *SmtNode) findLastNode(keys []int) ([]*SmtNode, int) { return siblings, level } -func (n *SmtNode) deleteTree(keyPath []int, s *SMT) ([]utils.NodeKey, [4]uint64, error) { - deletedKeys := []utils.NodeKey{} - +func (n *SmtNode) deleteTreeNoSave(keyPath []int, leafValueMap *sync.Map, kvMapOfValuesToSave map[[4]uint64]utils.NodeValue12, kvMapOfLeafValuesToSave map[[4]uint64][4]uint64) ([4]uint64, error) { if n.isLeaf() { fullKey := append(keyPath, n.rKey...) k, err := utils.NodeKeyFromPath(fullKey) if err != nil { - return nil, [4]uint64{}, err - } - v, err := s.Db.GetAccountValue(k) - if err != nil { - return nil, [4]uint64{}, err + return [4]uint64{}, err } - // deletedKeys = append(deletedKeys, k) + v, ok := leafValueMap.LoadAndDelete(k) + if !ok { + return [4]uint64{}, fmt.Errorf("value not found for key %v", k) + } + accoutnValue := v.(utils.NodeValue8) newKey := utils.RemoveKeyBits(k, len(keyPath)) //hash and save leaf - newLeafHash, err := s.createNewLeaf(k, newKey, v) + newValH, newValHV, newLeafHash, newLeafHashV, err := createNewLeafNoSave(newKey, &accoutnValue) if err != nil { - return nil, [4]uint64{}, err + return [4]uint64{}, err } - return deletedKeys, newLeafHash, nil + kvMapOfValuesToSave[newValH] = newValHV + kvMapOfValuesToSave[newLeafHash] = newLeafHashV + kvMapOfLeafValuesToSave[newLeafHash] = k + + return newLeafHash, nil } var totalHash utils.NodeValue8 if n.node0 != nil { if !utils.IsArrayUint64Empty(n.leftHash[:]) { - return nil, [4]uint64{}, fmt.Errorf("node has previously deleted left part") + return [4]uint64{}, fmt.Errorf("node has previously deleted left part") } localKeyPath := append(keyPath, 0) - _, leftHash, err := n.node0.deleteTree(localKeyPath, s) + leftHash, err := n.node0.deleteTreeNoSave(localKeyPath, leafValueMap, kvMapOfValuesToSave, kvMapOfLeafValuesToSave) if err != nil { - return nil, [4]uint64{}, err + return [4]uint64{}, err } n.leftHash = leftHash - // deletedKeys = append(deletedKeys, keysFromBelow...) n.node0 = nil } if n.node1 != nil { localKeyPath := append(keyPath, 1) - _, rightHash, err := n.node1.deleteTree(localKeyPath, s) + rightHash, err := n.node1.deleteTreeNoSave(localKeyPath, leafValueMap, kvMapOfValuesToSave, kvMapOfLeafValuesToSave) if err != nil { - return nil, [4]uint64{}, err + return [4]uint64{}, err } totalHash.SetHalfValue(rightHash, 1) - // deletedKeys = append(deletedKeys, keysFromBelow...) n.node1 = nil } totalHash.SetHalfValue(n.leftHash, 0) - newRoot, err := s.hashcalcAndSave(totalHash.ToUintArray(), utils.BranchCapacity) + newRoot, v, err := hashCalcAndPrepareForSave(totalHash.ToUintArray(), utils.BranchCapacity) if err != nil { - return nil, [4]uint64{}, err + return [4]uint64{}, err } - return deletedKeys, newRoot, nil + kvMapOfValuesToSave[newRoot] = v + + return newRoot, nil } -func (s *SMT) createNewLeaf(k, rkey utils.NodeKey, v utils.NodeValue8) ([4]uint64, error) { - //hash and save leaf - newValH, err := s.hashcalcAndSave(v.ToUintArray(), utils.BranchCapacity) - if err != nil { +func (n *SmtNode) deleteTree(keyPath []int, s *SMT, leafValueMap *sync.Map) (newRoot [4]uint64, err error) { + jobResult := utils.NewCalcAndPrepareJobResult(s.Db) + + if newRoot, err = n.deleteTreeNoSave(keyPath, leafValueMap, jobResult.KvMap, jobResult.LeafsKvMap); err != nil { return [4]uint64{}, err } - newLeafHash, err := s.hashcalcAndSave(utils.ConcatArrays4(rkey, newValH), utils.LeafCapacity) + jobResult.Save() + + return newRoot, nil +} - s.Db.InsertHashKey(newLeafHash, k) +func createNewLeafNoSave(rkey utils.NodeKey, v *utils.NodeValue8) (newValH [4]uint64, newValHV utils.NodeValue12, newLeafHash [4]uint64, newLeafHashV utils.NodeValue12, err error) { + //hash and save leaf + newValH, newValHV, err = hashCalcAndPrepareForSave(v.ToUintArray(), utils.BranchCapacity) + if err != nil { + return [4]uint64{}, utils.NodeValue12{}, [4]uint64{}, utils.NodeValue12{}, err + } + newLeafHash, newLeafHashV, err = hashCalcAndPrepareForSave(utils.ConcatArrays4(rkey, newValH), utils.LeafCapacity) if err != nil { - return [4]uint64{}, err + return [4]uint64{}, utils.NodeValue12{}, [4]uint64{}, utils.NodeValue12{}, err } - return newLeafHash, nil + return newValH, newValHV, newLeafHash, newLeafHashV, nil } diff --git a/smt/pkg/smt/smt_create_test.go b/smt/pkg/smt/smt_create_test.go index b5d45b13910..90d6c6d0f08 100644 --- a/smt/pkg/smt/smt_create_test.go +++ b/smt/pkg/smt/smt_create_test.go @@ -1,6 +1,7 @@ package smt import ( + "context" "fmt" "math/big" "math/rand" @@ -56,7 +57,7 @@ func TestSMT_Create_Insert(t *testing.T) { "0xfa2d3062e11e44668ab79c595c0c916a82036a017408377419d74523569858ea", }, } - + ctx := context.Background() for _, scenario := range testCases { t.Run(scenario.name, func(t *testing.T) { s := NewSMT(nil, false) @@ -68,7 +69,7 @@ func TestSMT_Create_Insert(t *testing.T) { } } // set scenario old root if fail - newRoot, err := s.GenerateFromKVBulk("", keys) + newRoot, err := s.GenerateFromKVBulk(ctx, "", keys) if err != nil { t.Errorf("Insert failed: %v", err) } @@ -83,6 +84,7 @@ func TestSMT_Create_Insert(t *testing.T) { func TestSMT_Create_CompareWithRandomData(t *testing.T) { limit := 5000 + ctx := context.Background() kvMap := map[utils.NodeKey]utils.NodeValue8{} for i := 1; i <= limit; i++ { @@ -120,7 +122,7 @@ func TestSMT_Create_CompareWithRandomData(t *testing.T) { } } // set scenario old root if fail - root2, err := s2.GenerateFromKVBulk("", keys) + root2, err := s2.GenerateFromKVBulk(ctx, "", keys) if err != nil { t.Errorf("Insert failed: %v", err) } @@ -139,7 +141,8 @@ func TestSMT_Create_CompareWithRandomData(t *testing.T) { } func TestSMT_Create_Benchmark(t *testing.T) { - limit := 500000 + limit := 100000 + ctx := context.Background() kvMap := map[utils.NodeKey]utils.NodeValue8{} for i := 1; i <= limit; i++ { @@ -160,7 +163,7 @@ func TestSMT_Create_Benchmark(t *testing.T) { } } - _, err := s.GenerateFromKVBulk("", keys) + _, err := s.GenerateFromKVBulk(ctx, "", keys) if err != nil { t.Errorf("Insert failed: %v", err) } diff --git a/smt/pkg/utils/job_queue.go b/smt/pkg/utils/job_queue.go index fe881d67a6a..4d246df4d98 100644 --- a/smt/pkg/utils/job_queue.go +++ b/smt/pkg/utils/job_queue.go @@ -2,90 +2,101 @@ package utils import ( "context" + "sync/atomic" + "time" ) -// Job - holds logic to perform some operations during queue execution. -type Job struct { - Action func() error // A function that should be executed when the job is running. +type DB interface { + InsertHashKey(key NodeKey, value NodeKey) error + Insert(key NodeKey, value NodeValue12) error } -// Run performs job execution. -func (j Job) Run() error { - err := j.Action() - if err != nil { - return err - } - - return nil +type JobResult interface { + GetError() error + Save() error } -// Queue holds name, list of jobs and context with cancel. -type Queue struct { - jobs chan Job - ctx context.Context - cancel context.CancelFunc +type CalcAndPrepareJobResult struct { + db DB + Err error + KvMap map[[4]uint64]NodeValue12 + LeafsKvMap map[[4]uint64][4]uint64 } -// NewQueue instantiates new queue. -func NewQueue(size int) *Queue { - ctx, cancel := context.WithCancel(context.Background()) - - return &Queue{ - jobs: make(chan Job, size), - ctx: ctx, - cancel: cancel, +func NewCalcAndPrepareJobResult(db DB) *CalcAndPrepareJobResult { + return &CalcAndPrepareJobResult{ + db: db, + KvMap: make(map[[4]uint64]NodeValue12), + LeafsKvMap: make(map[[4]uint64][4]uint64), } } -// AddJob sends job to the channel. -func (q *Queue) AddJob(job Job) { - q.jobs <- job +func (r *CalcAndPrepareJobResult) GetError() error { + return r.Err } -func (q *Queue) Stop() { - q.cancel() +func (r *CalcAndPrepareJobResult) Save() error { + for key, value := range r.LeafsKvMap { + if err := r.db.InsertHashKey(key, value); err != nil { + return err + } + } + for key, value := range r.KvMap { + if err := r.db.Insert(key, value); err != nil { + return err + } + } + return nil } // Worker responsible for queue serving. type Worker struct { - name string - errChan chan error - queue *Queue + ctx context.Context + name string + jobs chan func() JobResult + jobResults chan JobResult + stopped atomic.Bool } // NewWorker initializes a new Worker. -func NewWorker(name string, errChan chan error, queue *Queue) *Worker { +func NewWorker(ctx context.Context, name string, jobQueueSize int) *Worker { return &Worker{ + ctx, name, - errChan, - queue, + make(chan func() JobResult, jobQueueSize), + make(chan JobResult, jobQueueSize), + atomic.Bool{}, } } +func (w *Worker) AddJob(job func() JobResult) { + w.jobs <- job +} + +func (w *Worker) GetJobResultsChannel() chan JobResult { + return w.jobResults +} + +func (w *Worker) Stop() { + w.stopped.Store(true) +} + // DoWork processes jobs from the queue (jobs channel). -func (w *Worker) DoWork() bool { - finish := false +func (w *Worker) DoWork() { +LOOP: for { select { - case <-w.queue.ctx.Done(): - finish = true - default: - finish = false - } - - select { + case <-w.ctx.Done(): + break LOOP // if job received. - case job := <-w.queue.jobs: - err := job.Run() - if err != nil { - w.errChan <- err - return false - // if context was canceled. - } + case job := <-w.jobs: + jobRes := job() + w.jobResults <- jobRes default: - if finish { - return true + if w.stopped.Load() { + break LOOP } + time.Sleep(1 * time.Millisecond) } } } diff --git a/smt/pkg/utils/util_test.go b/smt/pkg/utils/util_test.go index a1b718b3a87..8b155152a0a 100644 --- a/smt/pkg/utils/util_test.go +++ b/smt/pkg/utils/util_test.go @@ -42,6 +42,13 @@ func TestBinaryStringToInt64(t *testing.T) { } } +func BenchmarkConvertBigIntToHex(b *testing.B) { + b.ReportAllocs() + for n := 0; n < b.N; n++ { + ConvertBigIntToHex(big.NewInt(int64(n))) + } +} + func TestConvertBigIntToHex(t *testing.T) { testCases := []struct { name string diff --git a/smt/pkg/utils/utils.go b/smt/pkg/utils/utils.go index 2f5c7ad2c07..f932d278c9f 100644 --- a/smt/pkg/utils/utils.go +++ b/smt/pkg/utils/utils.go @@ -241,6 +241,7 @@ func (nv *NodeValue12) IsFinalNode() bool { return nv[8].Cmp(big.NewInt(1)) == 0 } +// 7 times more efficient than sprintf func ConvertBigIntToHex(n *big.Int) string { return "0x" + n.Text(16) } diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 2eddc45f84e..5095f732434 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -167,6 +167,7 @@ var DefaultFlags = []cli.Flag{ &utils.LightClientDiscoveryTCPPortFlag, &utils.SentinelAddrFlag, &utils.SentinelPortFlag, + &utils.YieldSizeFlag, &utils.L2ChainIdFlag, &utils.L2RpcUrlFlag, @@ -176,6 +177,8 @@ var DefaultFlags = []cli.Flag{ &utils.L1SyncStopBatch, &utils.L1ChainIdFlag, &utils.L1RpcUrlFlag, + &utils.L1CacheEnabledFlag, + &utils.L1CachePortFlag, &utils.AddressSequencerFlag, &utils.AddressAdminFlag, &utils.AddressRollupFlag, @@ -195,10 +198,12 @@ var DefaultFlags = []cli.Flag{ &utils.SequencerBlockSealTime, &utils.SequencerBatchSealTime, &utils.SequencerNonEmptyBatchSealTime, + &utils.SequencerHaltOnBatchNumber, &utils.ExecutorUrls, &utils.ExecutorStrictMode, &utils.ExecutorRequestTimeout, &utils.DatastreamNewBlockTimeout, + &utils.WitnessMemdbSize, &utils.ExecutorMaxConcurrentRequests, &utils.Limbo, &utils.AllowFreeTransactions, diff --git a/turbo/cli/flags_zkevm.go b/turbo/cli/flags_zkevm.go index 9b10373b1f0..26d672790c0 100644 --- a/turbo/cli/flags_zkevm.go +++ b/turbo/cli/flags_zkevm.go @@ -99,6 +99,8 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { panic("Effective gas price for contract deployment must be in interval [0; 1]") } + witnessMemSize := utils.DatasizeFlagValue(ctx, utils.WitnessMemdbSize.Name) + cfg.Zk = ðconfig.Zk{ L2ChainId: ctx.Uint64(utils.L2ChainIdFlag.Name), L2RpcUrl: ctx.String(utils.L2RpcUrlFlag.Name), @@ -108,6 +110,8 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { L1SyncStopBatch: ctx.Uint64(utils.L1SyncStopBatch.Name), L1ChainId: ctx.Uint64(utils.L1ChainIdFlag.Name), L1RpcUrl: ctx.String(utils.L1RpcUrlFlag.Name), + L1CacheEnabled: ctx.Bool(utils.L1CacheEnabledFlag.Name), + L1CachePort: ctx.Uint(utils.L1CachePortFlag.Name), AddressSequencer: libcommon.HexToAddress(ctx.String(utils.AddressSequencerFlag.Name)), AddressAdmin: libcommon.HexToAddress(ctx.String(utils.AddressAdminFlag.Name)), AddressRollup: libcommon.HexToAddress(ctx.String(utils.AddressRollupFlag.Name)), @@ -127,10 +131,12 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { SequencerBlockSealTime: sequencerBlockSealTime, SequencerBatchSealTime: sequencerBatchSealTime, SequencerNonEmptyBatchSealTime: sequencerNonEmptyBatchSealTime, - ExecutorUrls: strings.Split(ctx.String(utils.ExecutorUrls.Name), ","), + SequencerHaltOnBatchNumber: ctx.Uint64(utils.SequencerHaltOnBatchNumber.Name), + ExecutorUrls: strings.Split(strings.ReplaceAll(ctx.String(utils.ExecutorUrls.Name), " ", ""), ","), ExecutorStrictMode: ctx.Bool(utils.ExecutorStrictMode.Name), ExecutorRequestTimeout: ctx.Duration(utils.ExecutorRequestTimeout.Name), DatastreamNewBlockTimeout: ctx.Duration(utils.DatastreamNewBlockTimeout.Name), + WitnessMemdbSize: *witnessMemSize, ExecutorMaxConcurrentRequests: ctx.Int(utils.ExecutorMaxConcurrentRequests.Name), Limbo: ctx.Bool(utils.Limbo.Name), AllowFreeTransactions: ctx.Bool(utils.AllowFreeTransactions.Name), diff --git a/turbo/debug/flags.go b/turbo/debug/flags.go index 421da286d9d..683ff16b28c 100644 --- a/turbo/debug/flags.go +++ b/turbo/debug/flags.go @@ -290,7 +290,16 @@ func RaiseFdLimit() { } var ( - metricsConfigs = []string{metricsEnabledFlag.Name, metricsAddrFlag.Name, metricsPortFlag.Name} + metricsConfigs = []string{ + metricsEnabledFlag.Name, + metricsAddrFlag.Name, + metricsPortFlag.Name, + traceFlag.Name, + cpuprofileFlag.Name, + pprofFlag.Name, + pprofAddrFlag.Name, + pprofPortFlag.Name, + } ) func SetFlagsFromConfigFile(ctx *cli.Context) error { diff --git a/turbo/jsonrpc/eth_txs.go b/turbo/jsonrpc/eth_txs.go index df647297138..0bd4e373184 100644 --- a/turbo/jsonrpc/eth_txs.go +++ b/turbo/jsonrpc/eth_txs.go @@ -18,6 +18,7 @@ import ( types2 "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/zk/sequencer" ) // GetTransactionByHash implements eth_getTransactionByHash. Returns information about a transaction given the transaction's hash. @@ -81,6 +82,10 @@ func (api *APIImpl) GetTransactionByHash(ctx context.Context, txnHash common.Has return NewRPCTransaction(txn, blockHash, blockNum, txnIndex, baseFee), nil } + if !sequencer.IsSequencer() { + return nil, nil + } + curHeader := rawdb.ReadCurrentHeader(tx) if curHeader == nil { return nil, nil diff --git a/turbo/jsonrpc/send_transaction.go b/turbo/jsonrpc/send_transaction.go index a899a471dda..06a9b01f985 100644 --- a/turbo/jsonrpc/send_transaction.go +++ b/turbo/jsonrpc/send_transaction.go @@ -9,11 +9,8 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" txPoolProto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/zk/utils" ) @@ -90,33 +87,6 @@ func (api *APIImpl) SendRawTransaction(ctx context.Context, encodedTx hexutility return hash, fmt.Errorf("%s: %s", txPoolProto.ImportResult_name[int32(res.Imported[0])], res.Errors[0]) } - // Print a log with full txn details for manual investigations and interventions - blockNum := rawdb.ReadCurrentBlockNumber(tx) - if blockNum == nil { - return common.Hash{}, err - } - - txnChainId := txn.GetChainID() - - if txn.Protected() { - if chainId.Cmp(txnChainId.ToBig()) != 0 { - return common.Hash{}, fmt.Errorf("invalid chain id, expected: %d got: %d", chainId, *txnChainId) - } - } - - signer := types.MakeSigner(cc, *blockNum, 0) - from, err := txn.Sender(*signer) - if err != nil { - return common.Hash{}, err - } - - if txn.GetTo() == nil { - addr := crypto.CreateAddress(from, txn.GetNonce()) - log.Debug("Submitted contract creation", "hash", txn.Hash().Hex(), "from", from, "nonce", txn.GetNonce(), "contract", addr.Hex(), "value", txn.GetValue()) - } else { - log.Debug("Submitted transaction", "hash", txn.Hash().Hex(), "from", from, "nonce", txn.GetNonce(), "recipient", txn.GetTo(), "value", txn.GetValue()) - } - return txn.Hash(), nil } diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index 3642b440f03..25e6f903ab1 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -62,6 +62,7 @@ type ZkEvmAPI interface { EstimateCounters(ctx context.Context, argsOrNil *zkevmRPCTransaction) (json.RawMessage, error) TraceTransactionCounters(ctx context.Context, hash common.Hash, config *tracers.TraceConfig_ZkEvm, stream *jsoniter.Stream) error GetBatchCountersByNumber(ctx context.Context, batchNumRpc rpc.BlockNumber) (res json.RawMessage, err error) + GetExitRootTable(ctx context.Context) ([]l1InfoTreeData, error) } // APIImpl is implementation of the ZkEvmAPI interface based on remote Db access @@ -235,7 +236,7 @@ func (api *ZkEvmAPIImpl) VerifiedBatchNumber(ctx context.Context) (hexutil.Uint6 } // GetBatchDataByNumbers returns the batch data for the given batch numbers -func (api *ZkEvmAPIImpl) GetBatchDataByNumbers(ctx context.Context, batchNumbers []rpc.BlockNumber) (json.RawMessage, error) { +func (api *ZkEvmAPIImpl) GetBatchDataByNumbers(ctx context.Context, batchNumbers rpc.RpcNumberArray) (json.RawMessage, error) { tx, err := api.db.BeginRo(ctx) if err != nil { return nil, err @@ -269,9 +270,9 @@ func (api *ZkEvmAPIImpl) GetBatchDataByNumbers(ctx context.Context, batchNumbers highestBatchNo, err = hermezDb.GetBatchNoByL2Block(uint64(bn.(hexutil.Uint64))) } - bds := make([]*types.BatchDataSlim, 0, len(batchNumbers)) + bds := make([]*types.BatchDataSlim, 0, len(batchNumbers.Numbers)) - for _, batchNumber := range batchNumbers { + for _, batchNumber := range batchNumbers.Numbers { bd := &types.BatchDataSlim{ Number: uint64(batchNumber.Int64()), Empty: false, @@ -466,9 +467,6 @@ func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, batchNumber rpc.B batch.Coinbase = block.Coinbase() batch.StateRoot = block.Root() - // TODO: this logic is wrong it is the L1 verification timestamp we need - batch.Timestamp = types.ArgUint64(block.Time()) - // block numbers in batch blocksInBatch, err := hermezDb.GetL2BlockNosByBatch(batchNo) if err != nil { @@ -580,13 +578,19 @@ func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, batchNumber rpc.B batch.GlobalExitRoot = batchGer // sequence - seq, err := hermezDb.GetSequenceByBatchNo(batchNo) + seq, err := hermezDb.GetSequenceByBatchNoOrHighest(batchNo) if err != nil { return nil, err } if seq != nil { batch.SendSequencesTxHash = &seq.L1TxHash } + + // timestamp - ts of highest block in the batch always + if block != nil { + batch.Timestamp = types.ArgUint64(block.Time()) + } + _, found, err = hermezDb.GetLowestBlockInBatch(batchNo + 1) if err != nil { return nil, err @@ -594,23 +598,37 @@ func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, batchNumber rpc.B // sequenced, genesis or injected batch 1 - special batches 0,1 will always be closed, if next batch has blocks, bn must be closed batch.Closed = seq != nil || batchNo == 0 || batchNo == 1 || found - // verification - ver, err := hermezDb.GetVerificationByBatchNo(batchNo) + // verification - if we can't find one, maybe this batch was verified along with a higher batch number + ver, err := hermezDb.GetVerificationByBatchNoOrHighest(batchNo) if err != nil { return nil, err } + if ver == nil { + // TODO: this is the actual unverified batch behaviour probably set 0x00 + } if ver != nil { batch.VerifyBatchTxHash = &ver.L1TxHash - } - // exit roots (MainnetExitRoot, RollupExitRoot) - infoTreeUpdate, err := hermezDb.GetL1InfoTreeUpdateByGer(batchGer) - if err != nil { - return nil, err - } - if infoTreeUpdate != nil { - batch.MainnetExitRoot = infoTreeUpdate.MainnetExitRoot - batch.RollupExitRoot = infoTreeUpdate.RollupExitRoot + verificationBatch := ver.BatchNo + verifiedBatchHighestBlock, err := hermezDb.GetHighestBlockInBatch(verificationBatch) + if err != nil { + return nil, err + } + + verifiedBatchGer, err := hermezDb.GetBlockGlobalExitRoot(verifiedBatchHighestBlock) + if err != nil { + return nil, err + } + + // exit roots (MainnetExitRoot, RollupExitRoot) + infoTreeUpdate, err := hermezDb.GetL1InfoTreeUpdateByGer(verifiedBatchGer) + if err != nil { + return nil, err + } + if infoTreeUpdate != nil { + batch.MainnetExitRoot = infoTreeUpdate.MainnetExitRoot + batch.RollupExitRoot = infoTreeUpdate.RollupExitRoot + } } // local exit root @@ -653,15 +671,10 @@ func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, batchNumber rpc.B return nil, err } - itu, err := hermezDb.GetL1InfoTreeUpdateByGer(prevBatchGer) - if err != nil { - return nil, err - } - - if itu == nil || batch.MainnetExitRoot == itu.MainnetExitRoot { + if batchGer == prevBatchGer { + batch.GlobalExitRoot = common.Hash{} batch.MainnetExitRoot = common.Hash{} batch.RollupExitRoot = common.Hash{} - batch.GlobalExitRoot = common.Hash{} } } @@ -848,6 +861,7 @@ func (api *ZkEvmAPIImpl) buildGenerator(ctx context.Context, tx kv.Tx, witnessMo api.ethApi._agg, api.ethApi._blockReader, chainConfig, + api.config.Zk, api.ethApi._engine, ) @@ -898,9 +912,11 @@ func (api *ZkEvmAPIImpl) getBlockRangeWitness(ctx context.Context, db kv.RoDB, s type WitnessMode string const ( - WitnessModeNone WitnessMode = "none" - WitnessModeFull WitnessMode = "full" - WitnessModeTrimmed WitnessMode = "trimmed" + WitnessModeNone WitnessMode = "none" + WitnessModeFull WitnessMode = "full" // if the node mode is "full witness" - will return witness from cache + WitnessModeTrimmed WitnessMode = "trimmed" // if the node mode is "partial witness" - will return witness from cache + WitnessModeFullRegen WitnessMode = "full_regen" // forces regenerate no matter the node mode + WitnessModeTrimmedRegen WitnessMode = "trimmed_regen" // forces regenerate no matter the node mode ) func (api *ZkEvmAPIImpl) GetBatchWitness(ctx context.Context, batchNumber uint64, mode *WitnessMode) (interface{}, error) { @@ -929,9 +945,14 @@ func (api *ZkEvmAPIImpl) GetBatchWitness(ctx context.Context, batchNumber uint64 checkedMode = *mode } - // we only want to check the cache if no special run mode has been supplied. If a run mode is supplied - // we need to always regenerate the witness from scratch - if checkedMode == WitnessModeNone { + isWitnessModeNone := checkedMode == WitnessModeNone + rpcModeMatchesNodeMode := + checkedMode == WitnessModeFull && api.config.WitnessFull || + checkedMode == WitnessModeTrimmed && !api.config.WitnessFull + // we only want to check the cache if no special run mode has been supplied. + // or if requested mode matches the node mode + // otherwise regenerate it + if isWitnessModeNone || rpcModeMatchesNodeMode { hermezDb := hermez_db.NewHermezDbReader(tx) witnessCached, err := hermezDb.GetWitness(batchNumber) if err != nil { @@ -1020,6 +1041,59 @@ func (api *ZkEvmAPIImpl) GetLatestGlobalExitRoot(ctx context.Context) (common.Ha return ger, nil } +type l1InfoTreeData struct { + Index uint64 `json:"index"` + Ger common.Hash `json:"ger"` + InfoRoot common.Hash `json:"info_root"` + MainnetExitRoot common.Hash `json:"mainnet_exit_root"` + RollupExitRoot common.Hash `json:"rollup_exit_root"` + ParentHash common.Hash `json:"parent_hash"` + MinTimestamp uint64 `json:"min_timestamp"` + BlockNumber uint64 `json:"block_number"` +} + +func (api *ZkEvmAPIImpl) GetExitRootTable(ctx context.Context) ([]l1InfoTreeData, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + hermezDb := hermez_db.NewHermezDbReader(tx) + + indexToRoots, err := hermezDb.GetL1InfoTreeIndexToRoots() + if err != nil { + return nil, err + } + + var result []l1InfoTreeData + + var idx uint64 = 1 + for { + info, err := hermezDb.GetL1InfoTreeUpdate(idx) + if err != nil { + return nil, err + } + if info == nil || info.Index == 0 { + break + } + data := l1InfoTreeData{ + Index: info.Index, + Ger: info.GER, + MainnetExitRoot: info.MainnetExitRoot, + RollupExitRoot: info.RollupExitRoot, + ParentHash: info.ParentHash, + MinTimestamp: info.Timestamp, + BlockNumber: info.BlockNumber, + InfoRoot: indexToRoots[info.Index], + } + result = append(result, data) + idx++ + } + + return result, nil +} + func (api *ZkEvmAPIImpl) sendGetBatchWitness(rpcUrl string, batchNumber uint64, mode *WitnessMode) (json.RawMessage, error) { res, err := client.JSONRPCCall(rpcUrl, "zkevm_getBatchWitness", batchNumber, mode) if err != nil { diff --git a/turbo/stages/zk_stages.go b/turbo/stages/zk_stages.go index 6d02ce472a5..9e669a2036f 100644 --- a/turbo/stages/zk_stages.go +++ b/turbo/stages/zk_stages.go @@ -143,6 +143,7 @@ func NewSequencerZkStages(ctx context.Context, &cfg.Miner, txPool, txPoolDb, + uint16(cfg.YieldSize), ), stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3, agg), zkStages.StageZkInterHashesCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg, cfg.Zk), diff --git a/zk/datastream/client/stream_client.go b/zk/datastream/client/stream_client.go index becca1a5721..9647c252e82 100644 --- a/zk/datastream/client/stream_client.go +++ b/zk/datastream/client/stream_client.go @@ -24,6 +24,11 @@ type EntityDefinition struct { Definition reflect.Type } +const ( + versionProto = 2 // converted to proto + versionAddedBlockEnd = 3 // Added block end +) + type StreamClient struct { ctx context.Context server string // Server address to connect IP:port @@ -41,7 +46,9 @@ type StreamClient struct { // Channels batchStartChan chan types.BatchStart + batchEndChan chan types.BatchEnd l2BlockChan chan types.FullL2Block + l2TxChan chan types.L2TransactionProto gerUpdatesChan chan types.GerUpdate // NB: unused from etrog onwards (forkid 7) // keeps track of the latest fork from the stream to assign to l2 blocks @@ -69,7 +76,8 @@ func NewClient(ctx context.Context, server string, version int, checkTimeout tim version: version, streamType: StSequencer, id: "", - batchStartChan: make(chan types.BatchStart, 1000), + batchStartChan: make(chan types.BatchStart, 100), + batchEndChan: make(chan types.BatchEnd, 100), l2BlockChan: make(chan types.FullL2Block, 100000), gerUpdatesChan: make(chan types.GerUpdate, 1000), currentFork: uint64(latestDownloadedForkId), @@ -78,12 +86,22 @@ func NewClient(ctx context.Context, server string, version int, checkTimeout tim return c } +func (c *StreamClient) IsVersion3() bool { + return c.version >= versionAddedBlockEnd +} + func (c *StreamClient) GetBatchStartChan() chan types.BatchStart { return c.batchStartChan } +func (c *StreamClient) GetBatchEndChan() chan types.BatchEnd { + return c.batchEndChan +} func (c *StreamClient) GetL2BlockChan() chan types.FullL2Block { return c.l2BlockChan } +func (c *StreamClient) GetL2TxChan() chan types.L2TransactionProto { + return c.l2TxChan +} func (c *StreamClient) GetGerUpdatesChan() chan types.GerUpdate { return c.gerUpdatesChan } @@ -157,45 +175,63 @@ func (c *StreamClient) GetHeader() error { return nil } -// sends start command, reads entries until limit reached and sends end command -func (c *StreamClient) ReadEntries(bookmark *types.BookmarkProto, l2BlocksAmount int) (*[]types.FullL2Block, *[]types.GerUpdate, []types.BookmarkProto, []types.BookmarkProto, uint64, error) { +func (c *StreamClient) ExecutePerFile(bookmark *types.BookmarkProto, function func(file *types.FileEntry) error) error { // Get header from server if err := c.GetHeader(); err != nil { - return nil, nil, nil, nil, 0, fmt.Errorf("%s get header error: %v", c.id, err) + return fmt.Errorf("%s get header error: %v", c.id, err) } protoBookmark, err := bookmark.Marshal() if err != nil { - return nil, nil, nil, nil, 0, fmt.Errorf("failed to marshal bookmark: %v", err) + return fmt.Errorf("failed to marshal bookmark: %v", err) } if err := c.initiateDownloadBookmark(protoBookmark); err != nil { - return nil, nil, nil, nil, 0, err + return err } + count := uint64(0) + logTicker := time.NewTicker(10 * time.Second) - fullL2Blocks, gerUpates, batchBookmarks, blockBookmarks, entriesRead, err := c.readFullL2Blocks(l2BlocksAmount) - if err != nil { - return nil, nil, nil, nil, 0, err + for { + select { + case <-logTicker.C: + fmt.Println("Entries read count: ", count) + default: + } + if c.Header.TotalEntries == count { + break + } + file, err := c.readFileEntry() + if err != nil { + return fmt.Errorf("error reading file entry: %v", err) + } + if err := function(file); err != nil { + return fmt.Errorf("error executing function: %v", err) + + } + count++ } - return fullL2Blocks, gerUpates, batchBookmarks, blockBookmarks, entriesRead, nil + return nil } -// reads entries to the end of the stream -// at end will wait for new entries to arrive -func (c *StreamClient) ReadAllEntriesToChannel() error { - c.streaming.Store(true) - defer c.streaming.Store(false) - - // if connection is lost, try to reconnect - // this occurs when all 5 attempts failed on previous run +func (c *StreamClient) EnsureConnected() (bool, error) { if c.conn == nil { if err := c.tryReConnect(); err != nil { - return fmt.Errorf("failed to reconnect the datastream client: %W", err) + return false, fmt.Errorf("failed to reconnect the datastream client: %w", err) } log.Info("[datastream_client] Datastream client connected.") } + return true, nil +} + +// reads entries to the end of the stream +// at end will wait for new entries to arrive +func (c *StreamClient) ReadAllEntriesToChannel() error { + c.streaming.Store(true) + defer c.streaming.Store(false) + var bookmark *types.BookmarkProto progress := c.progress.Load() if progress == 0 { @@ -225,7 +261,7 @@ func (c *StreamClient) ReadAllEntriesToChannel() error { } // reset the channels as there could be data ahead of the bookmark we want to track here. - c.resetChannels() + // c.resetChannels() return err2 } @@ -285,14 +321,15 @@ LOOP: c.conn.SetReadDeadline(time.Now().Add(c.checkTimeout)) } - fullBlock, batchStart, batchEnd, gerUpdates, batchBookmark, blockBookmark, _, _, localErr := c.readFullBlockProto() + fullBlock, batchStart, batchEnd, gerUpdate, batchBookmark, blockBookmark, localErr := c.readFullBlockProto() if localErr != nil { err = localErr break } + c.lastWrittenTime.Store(time.Now().UnixNano()) // skip over bookmarks (but only when fullblock is nil or will miss l2 blocks) - if (batchBookmark != nil || blockBookmark != nil) && fullBlock == nil { + if batchBookmark != nil || blockBookmark != nil { continue } @@ -300,52 +337,31 @@ LOOP: if batchStart != nil { c.currentFork = (*batchStart).ForkId c.batchStartChan <- *batchStart - continue - } - - if gerUpdates != nil { - for _, gerUpdate := range *gerUpdates { - c.gerUpdatesChan <- gerUpdate - } } - // we could have a scenario where a batch start is immediately followed by a batch end, - // so we need to report an error if the batch end is nil, and we have no block to process - if fullBlock == nil && batchEnd == nil { - return fmt.Errorf("block is nil, batch") + if gerUpdate != nil { + c.gerUpdatesChan <- *gerUpdate } if batchEnd != nil { // this check was inside c.readFullBlockProto() but it is better to move it here - if fullBlock == nil { - fullBlock = &types.FullL2Block{} - } - fullBlock.BatchEnd = true - fullBlock.LocalExitRoot = batchEnd.LocalExitRoot + c.batchEndChan <- *batchEnd } // ensure the block is assigned the currently known fork if fullBlock != nil { fullBlock.ForkId = c.currentFork + log.Trace("writing block to channel", "blockNumber", fullBlock.L2BlockNumber, "batchNumber", fullBlock.BatchNumber) + c.l2BlockChan <- *fullBlock } - - c.lastWrittenTime.Store(time.Now().UnixNano()) - log.Trace("writing block to channel", "blockNumber", fullBlock.L2BlockNumber, "batchNumber", fullBlock.BatchNumber) - c.l2BlockChan <- *fullBlock } return err } -func (c *StreamClient) resetChannels() { - c.batchStartChan = make(chan types.BatchStart, 1000) - c.l2BlockChan = make(chan types.FullL2Block, 100000) - c.gerUpdatesChan = make(chan types.GerUpdate, 1000) -} - func (c *StreamClient) tryReConnect() error { var err error - for i := 0; i < 5; i++ { + for i := 0; i < 50; i++ { if c.conn != nil { if err := c.conn.Close(); err != nil { return err @@ -362,193 +378,120 @@ func (c *StreamClient) tryReConnect() error { return err } -// reads a set amount of l2blocks from the server and returns them -// returns the parsed FullL2Blocks with transactions and the amount of entries read -func (c *StreamClient) readFullL2Blocks(l2BlocksAmount int) (*[]types.FullL2Block, *[]types.GerUpdate, []types.BookmarkProto, []types.BookmarkProto, uint64, error) { - fullL2Blocks := []types.FullL2Block{} - totalGerUpdates := []types.GerUpdate{} - entriesRead := uint64(0) - batchBookmarks := []types.BookmarkProto{} - blockBookmarks := []types.BookmarkProto{} - fromEntry := uint64(0) - - for { - if len(fullL2Blocks) >= l2BlocksAmount || entriesRead+fromEntry >= c.Header.TotalEntries { - break - } - - fullBlock, _, _, gerUpdates, batchBookmark, blockBookmark, fe, er, err := c.readFullBlockProto() - - if err != nil { - return nil, nil, nil, nil, 0, fmt.Errorf("failed to read full block: %v", err) - } - - // skip over bookmarks (but only when fullblock is nil or will miss l2 blocks) - if (batchBookmark != nil || blockBookmark != nil) && fullBlock == nil { - continue - } - - if fromEntry == 0 { - fromEntry = fe - } - - if gerUpdates != nil { - totalGerUpdates = append(totalGerUpdates, *gerUpdates...) - } - entriesRead += er - if fullBlock != nil { - fullL2Blocks = append(fullL2Blocks, *fullBlock) - } - if batchBookmark != nil { - batchBookmarks = append(batchBookmarks, *batchBookmark) - } - if blockBookmark != nil { - blockBookmarks = append(blockBookmarks, *blockBookmark) - } - } - - return &fullL2Blocks, &totalGerUpdates, batchBookmarks, blockBookmarks, entriesRead, nil -} - -func (c *StreamClient) readFullBlockProto() (*types.FullL2Block, *types.BatchStart, *types.BatchEnd, *[]types.GerUpdate, *types.BookmarkProto, *types.BookmarkProto, uint64, uint64, error) { - entriesRead := uint64(0) - +func (c *StreamClient) readFullBlockProto() ( + l2Block *types.FullL2Block, + batchStart *types.BatchStart, + batchEnd *types.BatchEnd, + gerUpdate *types.GerUpdate, + batchBookmark *types.BookmarkProto, + blockBookmark *types.BookmarkProto, + err error, +) { file, err := c.readFileEntry() if err != nil { - return nil, nil, nil, nil, nil, nil, 0, 0, fmt.Errorf("read file entry error: %v", err) - } - entriesRead++ - fromEntry := file.EntryNum - - gerUpdates := []types.GerUpdate{} - var batchBookmark *types.BookmarkProto - var blockBookmark *types.BookmarkProto - var batchStart *types.BatchStart - var batchEnd *types.BatchEnd - - for !file.IsL2Block() && !file.IsBatchStart() && !file.IsBatchEnd() { - if file.IsBookmark() { - bookmark, err := types.UnmarshalBookmark(file.Data) - if err != nil { - return nil, nil, nil, nil, nil, nil, 0, 0, fmt.Errorf("parse bookmark error: %v", err) - } - if bookmark.BookmarkType() == datastream.BookmarkType_BOOKMARK_TYPE_BATCH { - batchBookmark = bookmark - log.Trace("batch bookmark", "bookmark", bookmark) - return nil, nil, nil, &gerUpdates, batchBookmark, nil, 0, 0, nil - } else { - blockBookmark = bookmark - log.Trace("block bookmark", "bookmark", bookmark) - return nil, nil, nil, &gerUpdates, nil, blockBookmark, 0, 0, nil - } - } else if file.IsGerUpdate() { - gerUpdate, err := types.DecodeGerUpdateProto(file.Data) - if err != nil { - return nil, nil, nil, nil, nil, nil, 0, 0, fmt.Errorf("parse gerUpdate error: %v", err) - } - log.Trace("ger update", "ger", gerUpdate) - gerUpdates = append(gerUpdates, *gerUpdate) - } else { - return nil, nil, nil, nil, nil, nil, 0, 0, fmt.Errorf("unexpected entry type: %d", file.EntryType) - } - - file, err = c.readFileEntry() - if err != nil { - return nil, nil, nil, nil, nil, nil, 0, 0, fmt.Errorf("read file entry error: %v", err) - } - entriesRead++ - } - - // If starting with a batch, return so it can be held whilst blocks are added to it - if file.IsBatchStart() { - batchStart, err = types.UnmarshalBatchStart(file.Data) - if err != nil { - return nil, nil, nil, nil, nil, nil, 0, 0, fmt.Errorf("parse batch start error: %v", err) - } - log.Trace("batch start", "batchStart", batchStart) - return nil, batchStart, nil, &gerUpdates, nil, nil, fromEntry, entriesRead, nil + err = fmt.Errorf("read file entry error: %v", err) + return } - if file.IsBatchEnd() { - batchEnd, err = types.UnmarshalBatchEnd(file.Data) - if err != nil { - return nil, nil, nil, nil, nil, nil, 0, 0, fmt.Errorf("parse batch end error: %v", err) + switch file.EntryType { + case types.BookmarkEntryType: + var bookmark *types.BookmarkProto + if bookmark, err = types.UnmarshalBookmark(file.Data); err != nil { + return } - log.Trace("batch end", "batchEnd", batchEnd) - // we might not have a block here if the batch end was immediately after the batch start - return nil, nil, batchEnd, &gerUpdates, nil, nil, fromEntry, entriesRead, nil - } - - // Now handle the L2 block - var l2Block *types.FullL2Block - if file.IsL2Block() { - l2Block, err = types.UnmarshalL2Block(file.Data) - if err != nil { - return nil, nil, nil, nil, nil, nil, 0, 0, fmt.Errorf("parse L2 block error: %v", err) - } - log.Trace("l2 block", "l2Block", l2Block) + if bookmark.BookmarkType() == datastream.BookmarkType_BOOKMARK_TYPE_BATCH { + batchBookmark = bookmark + return + } else { + blockBookmark = bookmark + return + } + case types.EntryTypeGerUpdate: + if gerUpdate, err = types.DecodeGerUpdateProto(file.Data); err != nil { + return + } + log.Trace("ger update", "ger", gerUpdate) + return + case types.EntryTypeBatchStart: + if batchStart, err = types.UnmarshalBatchStart(file.Data); err != nil { + return + } + return + case types.EntryTypeBatchEnd: + if batchEnd, err = types.UnmarshalBatchEnd(file.Data); err != nil { + return + } + return + case types.EntryTypeL2Block: + if l2Block, err = types.UnmarshalL2Block(file.Data); err != nil { + return + } + + txs := []types.L2TransactionProto{} + + var innerFile *types.FileEntry + var l2Tx *types.L2TransactionProto + LOOP: + for { + if innerFile, err = c.readFileEntry(); err != nil { + return + } - file, err = c.readFileEntry() - if err != nil { - return nil, nil, nil, nil, nil, nil, 0, 0, fmt.Errorf("read file entry error: %v", err) - } - entriesRead++ - - // if not batch end or bookmark (l2 block - error on batch), then it must be a transaction - for !file.IsBatchEnd() && !file.IsBookmark() { - if file.IsL2Tx() { - l2Tx, err := types.UnmarshalTx(file.Data) - if err != nil { - return nil, nil, nil, nil, nil, nil, 0, 0, fmt.Errorf("parse L2 transaction error: %v", err) + if innerFile.IsL2Tx() { + if l2Tx, err = types.UnmarshalTx(innerFile.Data); err != nil { + return + } + txs = append(txs, *l2Tx) + } else if innerFile.IsL2BlockEnd() { + var l2BlockEnd *types.L2BlockEndProto + if l2BlockEnd, err = types.UnmarshalL2BlockEnd(innerFile.Data); err != nil { + return + } + if l2BlockEnd.GetBlockNumber() != l2Block.L2BlockNumber { + err = fmt.Errorf("block end number (%d) not equal to block number (%d)", l2BlockEnd.GetBlockNumber(), l2Block.L2BlockNumber) + return + } + break LOOP + } else if innerFile.IsBookmark() { + var bookmark *types.BookmarkProto + if bookmark, err = types.UnmarshalBookmark(innerFile.Data); err != nil || bookmark == nil { + return + } + if bookmark.BookmarkType() == datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK { + break LOOP + } else { + err = fmt.Errorf("unexpected bookmark type inside block: %v", bookmark.Type()) + return + } + } else if innerFile.IsBatchEnd() { + if batchEnd, err = types.UnmarshalBatchEnd(file.Data); err != nil { + return } - l2Block.L2Txs = append(l2Block.L2Txs, *l2Tx) - log.Trace("l2tx", "tx", l2Tx) + break LOOP } else { - return nil, nil, nil, nil, nil, nil, 0, 0, fmt.Errorf("unexpected entry type, expected transaction or batch end: %d", file.EntryType) + err = fmt.Errorf("unexpected entry type inside a block: %d", innerFile.EntryType) + return } - - file, err = c.readFileEntry() - if err != nil { - return nil, nil, nil, nil, nil, nil, 0, 0, fmt.Errorf("read file entry error: %v", err) - } - entriesRead++ } - if file.IsBatchEnd() { - batchEnd, err = types.UnmarshalBatchEnd(file.Data) - if err != nil { - return nil, nil, nil, nil, nil, nil, 0, 0, fmt.Errorf("parse batch end error: %v", err) - } - log.Trace("batch end", "batchEnd", batchEnd) - } - if file.IsBookmark() { - bookmark, err := types.UnmarshalBookmark(file.Data) - if err != nil { - return nil, nil, nil, nil, nil, nil, 0, 0, fmt.Errorf("parse bookmark error: %v", err) - } - if bookmark.BookmarkType() == datastream.BookmarkType_BOOKMARK_TYPE_BATCH { - batchBookmark = bookmark - log.Trace("batch bookmark", "bookmark", bookmark) - return nil, nil, nil, nil, nil, nil, 0, 0, fmt.Errorf("unexpected bookmark type: %d", bookmark.BookmarkType()) - } else { - blockBookmark = bookmark - log.Trace("block bookmark", "bookmark", bookmark) - } - } - } else { - return nil, nil, nil, nil, nil, nil, 0, 0, fmt.Errorf("unexpected entry type: %d", file.EntryType) + l2Block.L2Txs = txs + return + case types.EntryTypeL2Tx: + err = fmt.Errorf("unexpected l2Tx out of block") + return + default: + err = fmt.Errorf("unexpected entry type: %d", file.EntryType) + return } - - return l2Block, batchStart, batchEnd, &gerUpdates, batchBookmark, blockBookmark, fromEntry, entriesRead, nil } // reads file bytes from socket and tries to parse them // returns the parsed FileEntry -func (c *StreamClient) readFileEntry() (*types.FileEntry, error) { +func (c *StreamClient) readFileEntry() (file *types.FileEntry, err error) { // Read packet type packet, err := readBuffer(c.conn, 1) if err != nil { - return &types.FileEntry{}, fmt.Errorf("failed to read packet type: %v", err) + return file, fmt.Errorf("failed to read packet type: %v", err) } // Check packet type @@ -556,53 +499,52 @@ func (c *StreamClient) readFileEntry() (*types.FileEntry, error) { // Read server result entry for the command r, err := c.readResultEntry(packet) if err != nil { - return &types.FileEntry{}, err + return file, err } if err := r.GetError(); err != nil { - return &types.FileEntry{}, fmt.Errorf("got Result error code %d: %v", r.ErrorNum, err) + return file, fmt.Errorf("got Result error code %d: %v", r.ErrorNum, err) } - return &types.FileEntry{}, nil + return file, nil } else if packet[0] != PtData { - return &types.FileEntry{}, fmt.Errorf("error expecting data packet type %d and received %d", PtData, packet[0]) + return file, fmt.Errorf("error expecting data packet type %d and received %d", PtData, packet[0]) } // Read the rest of fixed size fields buffer, err := readBuffer(c.conn, types.FileEntryMinSize-1) if err != nil { - return &types.FileEntry{}, fmt.Errorf("error reading file bytes: %v", err) + return file, fmt.Errorf("error reading file bytes: %v", err) } buffer = append(packet, buffer...) // Read variable field (data) length := binary.BigEndian.Uint32(buffer[1:5]) if length < types.FileEntryMinSize { - return &types.FileEntry{}, errors.New("error reading data entry: wrong data length") + return file, errors.New("error reading data entry: wrong data length") } // Read rest of the file data bufferAux, err := readBuffer(c.conn, length-types.FileEntryMinSize) if err != nil { - return &types.FileEntry{}, fmt.Errorf("error reading file data bytes: %v", err) + return file, fmt.Errorf("error reading file data bytes: %v", err) } buffer = append(buffer, bufferAux...) // Decode binary data to data entry struct - file, err := types.DecodeFileEntry(buffer) - if err != nil { - return &types.FileEntry{}, fmt.Errorf("decode file entry error: %v", err) + if file, err = types.DecodeFileEntry(buffer); err != nil { + return file, fmt.Errorf("decode file entry error: %v", err) } - return file, nil + return } // reads header bytes from socket and tries to parse them // returns the parsed HeaderEntry -func (c *StreamClient) readHeaderEntry() (*types.HeaderEntry, error) { +func (c *StreamClient) readHeaderEntry() (h *types.HeaderEntry, err error) { // Read header stream bytes binaryHeader, err := readBuffer(c.conn, types.HeaderSizePreEtrog) if err != nil { - return &types.HeaderEntry{}, fmt.Errorf("failed to read header bytes %v", err) + return h, fmt.Errorf("failed to read header bytes %v", err) } headLength := binary.BigEndian.Uint32(binaryHeader[1:5]) @@ -610,51 +552,49 @@ func (c *StreamClient) readHeaderEntry() (*types.HeaderEntry, error) { // Read the rest of fixed size fields buffer, err := readBuffer(c.conn, types.HeaderSize-types.HeaderSizePreEtrog) if err != nil { - return &types.HeaderEntry{}, fmt.Errorf("failed to read header bytes %v", err) + return h, fmt.Errorf("failed to read header bytes %v", err) } binaryHeader = append(binaryHeader, buffer...) } // Decode bytes stream to header entry struct - h, err := types.DecodeHeaderEntry(binaryHeader) - if err != nil { - return &types.HeaderEntry{}, fmt.Errorf("error decoding binary header: %v", err) + if h, err = types.DecodeHeaderEntry(binaryHeader); err != nil { + return h, fmt.Errorf("error decoding binary header: %v", err) } - return h, nil + return } // reads result bytes and tries to parse them // returns the parsed ResultEntry -func (c *StreamClient) readResultEntry(packet []byte) (*types.ResultEntry, error) { +func (c *StreamClient) readResultEntry(packet []byte) (re *types.ResultEntry, err error) { if len(packet) != 1 { - return &types.ResultEntry{}, fmt.Errorf("expected packet size of 1, got: %d", len(packet)) + return re, fmt.Errorf("expected packet size of 1, got: %d", len(packet)) } // Read the rest of fixed size fields buffer, err := readBuffer(c.conn, types.ResultEntryMinSize-1) if err != nil { - return &types.ResultEntry{}, fmt.Errorf("failed to read main result bytes %v", err) + return re, fmt.Errorf("failed to read main result bytes %v", err) } buffer = append(packet, buffer...) // Read variable field (errStr) length := binary.BigEndian.Uint32(buffer[1:5]) if length < types.ResultEntryMinSize { - return &types.ResultEntry{}, fmt.Errorf("%s Error reading result entry", c.id) + return re, fmt.Errorf("%s Error reading result entry", c.id) } // read the rest of the result bufferAux, err := readBuffer(c.conn, length-types.ResultEntryMinSize) if err != nil { - return &types.ResultEntry{}, fmt.Errorf("failed to read result errStr bytes %v", err) + return re, fmt.Errorf("failed to read result errStr bytes %v", err) } buffer = append(buffer, bufferAux...) // Decode binary entry result - re, err := types.DecodeResultEntry(buffer) - if err != nil { - return &types.ResultEntry{}, fmt.Errorf("decode result entry error: %v", err) + if re, err = types.DecodeResultEntry(buffer); err != nil { + return re, fmt.Errorf("decode result entry error: %v", err) } return re, nil diff --git a/zk/datastream/client/stream_client_test.go b/zk/datastream/client/stream_client_test.go index 24dc4144f4c..f1ec21d59c2 100644 --- a/zk/datastream/client/stream_client_test.go +++ b/zk/datastream/client/stream_client_test.go @@ -15,14 +15,14 @@ func Test_readHeaderEntry(t *testing.T) { type testCase struct { name string input []byte - expectedResult types.HeaderEntry + expectedResult *types.HeaderEntry expectedError error } testCases := []testCase{ { name: "Happy path", input: []byte{101, 0, 0, 0, 29, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 64}, - expectedResult: types.HeaderEntry{ + expectedResult: &types.HeaderEntry{ PacketType: 101, HeadLength: 29, StreamType: types.StreamType(1), @@ -34,7 +34,7 @@ func Test_readHeaderEntry(t *testing.T) { { name: "Invalid byte array length", input: []byte{20, 21, 22, 23, 24, 20}, - expectedResult: types.HeaderEntry{}, + expectedResult: nil, expectedError: fmt.Errorf("failed to read header bytes reading from server: unexpected EOF"), }, } @@ -54,7 +54,7 @@ func Test_readHeaderEntry(t *testing.T) { header, err := c.readHeaderEntry() require.Equal(t, testCase.expectedError, err) - assert.DeepEqual(t, testCase.expectedResult, *header) + assert.DeepEqual(t, testCase.expectedResult, header) }) } } @@ -63,14 +63,14 @@ func Test_readResultEntry(t *testing.T) { type testCase struct { name string input []byte - expectedResult types.ResultEntry + expectedResult *types.ResultEntry expectedError error } testCases := []testCase{ { name: "Happy path", input: []byte{0, 0, 0, 9, 0, 0, 0, 0}, - expectedResult: types.ResultEntry{ + expectedResult: &types.ResultEntry{ PacketType: 1, Length: 9, ErrorNum: 0, @@ -81,7 +81,7 @@ func Test_readResultEntry(t *testing.T) { { name: "Happy path - error str length", input: []byte{0, 0, 0, 19, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, - expectedResult: types.ResultEntry{ + expectedResult: &types.ResultEntry{ PacketType: 1, Length: 19, ErrorNum: 0, @@ -92,13 +92,13 @@ func Test_readResultEntry(t *testing.T) { { name: "Invalid byte array length", input: []byte{20, 21, 22, 23, 24, 20}, - expectedResult: types.ResultEntry{}, + expectedResult: nil, expectedError: fmt.Errorf("failed to read main result bytes reading from server: unexpected EOF"), }, { name: "Invalid error length", input: []byte{0, 0, 0, 12, 0, 0, 0, 0, 20, 21}, - expectedResult: types.ResultEntry{}, + expectedResult: nil, expectedError: fmt.Errorf("failed to read result errStr bytes reading from server: unexpected EOF"), }, } @@ -118,7 +118,7 @@ func Test_readResultEntry(t *testing.T) { result, err := c.readResultEntry([]byte{1}) require.Equal(t, testCase.expectedError, err) - assert.DeepEqual(t, testCase.expectedResult, *result) + assert.DeepEqual(t, testCase.expectedResult, result) }) } } @@ -127,14 +127,14 @@ func Test_readFileEntry(t *testing.T) { type testCase struct { name string input []byte - expectedResult types.FileEntry + expectedResult *types.FileEntry expectedError error } testCases := []testCase{ { name: "Happy path", input: []byte{2, 0, 0, 0, 29, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 45, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 64}, - expectedResult: types.FileEntry{ + expectedResult: &types.FileEntry{ PacketType: 2, Length: 29, EntryType: types.EntryType(1), @@ -145,7 +145,7 @@ func Test_readFileEntry(t *testing.T) { }, { name: "Happy path - no data", input: []byte{2, 0, 0, 0, 17, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 45}, - expectedResult: types.FileEntry{ + expectedResult: &types.FileEntry{ PacketType: 2, Length: 17, EntryType: types.EntryType(1), @@ -157,18 +157,18 @@ func Test_readFileEntry(t *testing.T) { { name: "Invalid packet type", input: []byte{5, 0, 0, 0, 17, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 45}, - expectedResult: types.FileEntry{}, + expectedResult: nil, expectedError: fmt.Errorf("error expecting data packet type 2 and received 5"), }, { name: "Invalid byte array length", input: []byte{2, 21, 22, 23, 24, 20}, - expectedResult: types.FileEntry{}, + expectedResult: nil, expectedError: fmt.Errorf("error reading file bytes: reading from server: unexpected EOF"), }, { name: "Invalid data length", input: []byte{2, 0, 0, 0, 31, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 45, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 64}, - expectedResult: types.FileEntry{}, + expectedResult: nil, expectedError: fmt.Errorf("error reading file data bytes: reading from server: unexpected EOF"), }, } @@ -187,7 +187,7 @@ func Test_readFileEntry(t *testing.T) { result, err := c.readFileEntry() require.Equal(t, testCase.expectedError, err) - assert.DeepEqual(t, testCase.expectedResult, *result) + assert.DeepEqual(t, testCase.expectedResult, result) }) } } diff --git a/zk/datastream/proto/datastream.proto b/zk/datastream/proto/datastream.proto index 6bf2847c191..4dfce78ffd9 100644 --- a/zk/datastream/proto/datastream.proto +++ b/zk/datastream/proto/datastream.proto @@ -36,6 +36,11 @@ message L2Block { Debug debug = 14; } + +message L2BlockEnd { + uint64 number = 1; +} + message Transaction { uint64 l2block_number = 1; uint64 index = 2; @@ -79,6 +84,7 @@ enum EntryType { ENTRY_TYPE_TRANSACTION = 3; ENTRY_TYPE_BATCH_END = 4; ENTRY_TYPE_UPDATE_GER = 5; + ENTRY_TYPE_L2_BLOCK_END = 6; } enum BatchType { diff --git a/zk/datastream/proto/github.com/0xPolygonHermez/zkevm-node/state/datastream/datastream.pb.go b/zk/datastream/proto/github.com/0xPolygonHermez/zkevm-node/state/datastream/datastream.pb.go index 62ffb6a40e8..be8c75c5e92 100644 --- a/zk/datastream/proto/github.com/0xPolygonHermez/zkevm-node/state/datastream/datastream.pb.go +++ b/zk/datastream/proto/github.com/0xPolygonHermez/zkevm-node/state/datastream/datastream.pb.go @@ -72,12 +72,13 @@ func (BookmarkType) EnumDescriptor() ([]byte, []int) { type EntryType int32 const ( - EntryType_ENTRY_TYPE_UNSPECIFIED EntryType = 0 - EntryType_ENTRY_TYPE_BATCH_START EntryType = 1 - EntryType_ENTRY_TYPE_L2_BLOCK EntryType = 2 - EntryType_ENTRY_TYPE_TRANSACTION EntryType = 3 - EntryType_ENTRY_TYPE_BATCH_END EntryType = 4 - EntryType_ENTRY_TYPE_UPDATE_GER EntryType = 5 + EntryType_ENTRY_TYPE_UNSPECIFIED EntryType = 0 + EntryType_ENTRY_TYPE_BATCH_START EntryType = 1 + EntryType_ENTRY_TYPE_L2_BLOCK EntryType = 2 + EntryType_ENTRY_TYPE_TRANSACTION EntryType = 3 + EntryType_ENTRY_TYPE_BATCH_END EntryType = 4 + EntryType_ENTRY_TYPE_UPDATE_GER EntryType = 5 + EntryType_ENTRY_TYPE_L2_BLOCK_END EntryType = 6 ) // Enum value maps for EntryType. @@ -89,14 +90,16 @@ var ( 3: "ENTRY_TYPE_TRANSACTION", 4: "ENTRY_TYPE_BATCH_END", 5: "ENTRY_TYPE_UPDATE_GER", + 6: "ENTRY_TYPE_L2_BLOCK_END", } EntryType_value = map[string]int32{ - "ENTRY_TYPE_UNSPECIFIED": 0, - "ENTRY_TYPE_BATCH_START": 1, - "ENTRY_TYPE_L2_BLOCK": 2, - "ENTRY_TYPE_TRANSACTION": 3, - "ENTRY_TYPE_BATCH_END": 4, - "ENTRY_TYPE_UPDATE_GER": 5, + "ENTRY_TYPE_UNSPECIFIED": 0, + "ENTRY_TYPE_BATCH_START": 1, + "ENTRY_TYPE_L2_BLOCK": 2, + "ENTRY_TYPE_TRANSACTION": 3, + "ENTRY_TYPE_BATCH_END": 4, + "ENTRY_TYPE_UPDATE_GER": 5, + "ENTRY_TYPE_L2_BLOCK_END": 6, } ) @@ -483,6 +486,53 @@ func (x *L2Block) GetDebug() *Debug { return nil } +type L2BlockEnd struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Number uint64 `protobuf:"varint,1,opt,name=number,proto3" json:"number,omitempty"` +} + +func (x *L2BlockEnd) Reset() { + *x = L2BlockEnd{} + if protoimpl.UnsafeEnabled { + mi := &file_datastream_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *L2BlockEnd) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*L2BlockEnd) ProtoMessage() {} + +func (x *L2BlockEnd) ProtoReflect() protoreflect.Message { + mi := &file_datastream_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use L2BlockEnd.ProtoReflect.Descriptor instead. +func (*L2BlockEnd) Descriptor() ([]byte, []int) { + return file_datastream_proto_rawDescGZIP(), []int{3} +} + +func (x *L2BlockEnd) GetNumber() uint64 { + if x != nil { + return x.Number + } + return 0 +} + type Transaction struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -500,7 +550,7 @@ type Transaction struct { func (x *Transaction) Reset() { *x = Transaction{} if protoimpl.UnsafeEnabled { - mi := &file_datastream_proto_msgTypes[3] + mi := &file_datastream_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -513,7 +563,7 @@ func (x *Transaction) String() string { func (*Transaction) ProtoMessage() {} func (x *Transaction) ProtoReflect() protoreflect.Message { - mi := &file_datastream_proto_msgTypes[3] + mi := &file_datastream_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -526,7 +576,7 @@ func (x *Transaction) ProtoReflect() protoreflect.Message { // Deprecated: Use Transaction.ProtoReflect.Descriptor instead. func (*Transaction) Descriptor() ([]byte, []int) { - return file_datastream_proto_rawDescGZIP(), []int{3} + return file_datastream_proto_rawDescGZIP(), []int{4} } func (x *Transaction) GetL2BlockNumber() uint64 { @@ -596,7 +646,7 @@ type UpdateGER struct { func (x *UpdateGER) Reset() { *x = UpdateGER{} if protoimpl.UnsafeEnabled { - mi := &file_datastream_proto_msgTypes[4] + mi := &file_datastream_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -609,7 +659,7 @@ func (x *UpdateGER) String() string { func (*UpdateGER) ProtoMessage() {} func (x *UpdateGER) ProtoReflect() protoreflect.Message { - mi := &file_datastream_proto_msgTypes[4] + mi := &file_datastream_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -622,7 +672,7 @@ func (x *UpdateGER) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateGER.ProtoReflect.Descriptor instead. func (*UpdateGER) Descriptor() ([]byte, []int) { - return file_datastream_proto_rawDescGZIP(), []int{4} + return file_datastream_proto_rawDescGZIP(), []int{5} } func (x *UpdateGER) GetBatchNumber() uint64 { @@ -693,7 +743,7 @@ type BookMark struct { func (x *BookMark) Reset() { *x = BookMark{} if protoimpl.UnsafeEnabled { - mi := &file_datastream_proto_msgTypes[5] + mi := &file_datastream_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -706,7 +756,7 @@ func (x *BookMark) String() string { func (*BookMark) ProtoMessage() {} func (x *BookMark) ProtoReflect() protoreflect.Message { - mi := &file_datastream_proto_msgTypes[5] + mi := &file_datastream_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -719,7 +769,7 @@ func (x *BookMark) ProtoReflect() protoreflect.Message { // Deprecated: Use BookMark.ProtoReflect.Descriptor instead. func (*BookMark) Descriptor() ([]byte, []int) { - return file_datastream_proto_rawDescGZIP(), []int{5} + return file_datastream_proto_rawDescGZIP(), []int{6} } func (x *BookMark) GetType() BookmarkType { @@ -747,7 +797,7 @@ type Debug struct { func (x *Debug) Reset() { *x = Debug{} if protoimpl.UnsafeEnabled { - mi := &file_datastream_proto_msgTypes[6] + mi := &file_datastream_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -760,7 +810,7 @@ func (x *Debug) String() string { func (*Debug) ProtoMessage() {} func (x *Debug) ProtoReflect() protoreflect.Message { - mi := &file_datastream_proto_msgTypes[6] + mi := &file_datastream_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -773,7 +823,7 @@ func (x *Debug) ProtoReflect() protoreflect.Message { // Deprecated: Use Debug.ProtoReflect.Descriptor instead. func (*Debug) Descriptor() ([]byte, []int) { - return file_datastream_proto_rawDescGZIP(), []int{6} + return file_datastream_proto_rawDescGZIP(), []int{7} } func (x *Debug) GetMessage() string { @@ -840,79 +890,83 @@ var file_datastream_proto_rawDesc = []byte{ 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2a, 0x0a, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x52, 0x05, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x22, 0x94, 0x02, 0x0a, 0x0b, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x32, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6c, - 0x32, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, - 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x12, 0x18, 0x0a, - 0x07, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, - 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x1e, 0x65, 0x66, 0x66, 0x65, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x70, 0x72, 0x69, 0x63, 0x65, 0x5f, 0x70, - 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x1b, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x73, 0x50, 0x72, 0x69, - 0x63, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x0d, - 0x69, 0x6d, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x69, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, - 0x12, 0x2a, 0x0a, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, - 0x44, 0x65, 0x62, 0x75, 0x67, 0x52, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x22, 0x91, 0x02, 0x0a, - 0x09, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x45, 0x52, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x61, - 0x74, 0x63, 0x68, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x0b, 0x62, 0x61, 0x74, 0x63, 0x68, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, - 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x28, 0x0a, 0x10, 0x67, - 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x45, 0x78, 0x69, - 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, - 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, - 0x6f, 0x6f, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2a, 0x0a, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x52, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x22, 0x51, 0x0a, 0x08, 0x42, 0x6f, 0x6f, 0x6b, 0x4d, 0x61, 0x72, 0x6b, 0x12, 0x2f, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x64, 0x61, 0x74, - 0x61, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6f, 0x6f, 0x6b, 0x6d, - 0x61, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x05, 0x44, 0x65, 0x62, 0x75, 0x67, 0x12, 0x18, 0x0a, 0x07, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2a, 0x62, 0x0a, 0x0c, 0x42, 0x6f, 0x6f, 0x6b, 0x6d, 0x61, - 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x42, 0x4f, 0x4f, 0x4b, 0x4d, 0x41, - 0x52, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x42, 0x4f, 0x4f, 0x4b, 0x4d, 0x41, 0x52, - 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x54, 0x43, 0x48, 0x10, 0x01, 0x12, 0x1a, - 0x0a, 0x16, 0x42, 0x4f, 0x4f, 0x4b, 0x4d, 0x41, 0x52, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x4c, 0x32, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x02, 0x2a, 0xad, 0x01, 0x0a, 0x09, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x45, 0x4e, 0x54, 0x52, - 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x10, 0x01, - 0x12, 0x17, 0x0a, 0x13, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4c, - 0x32, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x45, 0x4e, 0x54, - 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, - 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x18, 0x0a, 0x14, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x45, 0x4e, 0x44, 0x10, 0x04, 0x12, - 0x19, 0x0a, 0x15, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x50, - 0x44, 0x41, 0x54, 0x45, 0x5f, 0x47, 0x45, 0x52, 0x10, 0x05, 0x2a, 0x87, 0x01, 0x0a, 0x09, 0x42, - 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x42, 0x41, 0x54, 0x43, - 0x48, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x52, 0x45, 0x47, 0x55, 0x4c, 0x41, 0x52, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, - 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4f, 0x52, 0x43, 0x45, - 0x44, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x49, 0x4e, 0x4a, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, - 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, - 0x49, 0x44, 0x10, 0x04, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x30, 0x78, 0x50, 0x6f, 0x6c, 0x79, 0x67, 0x6f, 0x6e, 0x48, 0x65, 0x72, 0x6d, - 0x65, 0x7a, 0x2f, 0x7a, 0x6b, 0x65, 0x76, 0x6d, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x22, 0x24, 0x0a, 0x0a, 0x4c, 0x32, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x45, 0x6e, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x94, 0x02, 0x0a, 0x0b, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6c, + 0x32, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6c, 0x32, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x64, 0x12, 0x43, 0x0a, + 0x1e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x70, + 0x72, 0x69, 0x63, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1b, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x47, 0x61, 0x73, 0x50, 0x72, 0x69, 0x63, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, + 0x67, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x6d, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, + 0x6f, 0x6f, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x69, 0x6d, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2a, 0x0a, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x52, 0x05, 0x64, 0x65, 0x62, + 0x75, 0x67, 0x22, 0x91, 0x02, 0x0a, 0x09, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x45, 0x52, + 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x61, 0x74, 0x63, 0x68, 0x4e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x12, 0x28, 0x0a, 0x10, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x65, 0x78, 0x69, 0x74, + 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x67, 0x6c, 0x6f, + 0x62, 0x61, 0x6c, 0x45, 0x78, 0x69, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x63, + 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x63, + 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x6f, 0x72, 0x6b, 0x5f, + 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6b, 0x49, 0x64, + 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2a, 0x0a, 0x05, 0x64, 0x65, + 0x62, 0x75, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x52, + 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x22, 0x51, 0x0a, 0x08, 0x42, 0x6f, 0x6f, 0x6b, 0x4d, 0x61, + 0x72, 0x6b, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x1b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x76, 0x31, + 0x2e, 0x42, 0x6f, 0x6f, 0x6b, 0x6d, 0x61, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x05, 0x44, 0x65, 0x62, + 0x75, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2a, 0x62, 0x0a, 0x0c, + 0x42, 0x6f, 0x6f, 0x6b, 0x6d, 0x61, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, + 0x42, 0x4f, 0x4f, 0x4b, 0x4d, 0x41, 0x52, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x42, + 0x4f, 0x4f, 0x4b, 0x4d, 0x41, 0x52, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x54, + 0x43, 0x48, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x42, 0x4f, 0x4f, 0x4b, 0x4d, 0x41, 0x52, 0x4b, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4c, 0x32, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x02, + 0x2a, 0xca, 0x01, 0x0a, 0x09, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, + 0x0a, 0x16, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x45, 0x4e, + 0x54, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x53, + 0x54, 0x41, 0x52, 0x54, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4c, 0x32, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x02, 0x12, + 0x1a, 0x0a, 0x16, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x52, + 0x41, 0x4e, 0x53, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x18, 0x0a, 0x14, 0x45, + 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, + 0x45, 0x4e, 0x44, 0x10, 0x04, 0x12, 0x19, 0x0a, 0x15, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x47, 0x45, 0x52, 0x10, 0x05, + 0x12, 0x1b, 0x0a, 0x17, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4c, + 0x32, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x45, 0x4e, 0x44, 0x10, 0x06, 0x2a, 0x87, 0x01, + 0x0a, 0x09, 0x42, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x42, + 0x41, 0x54, 0x43, 0x48, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x42, 0x41, 0x54, 0x43, 0x48, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x47, 0x55, 0x4c, 0x41, 0x52, 0x10, 0x01, 0x12, + 0x15, 0x0a, 0x11, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4f, + 0x52, 0x43, 0x45, 0x44, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x4a, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, + 0x16, 0x0a, 0x12, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x04, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x30, 0x78, 0x50, 0x6f, 0x6c, 0x79, 0x67, 0x6f, 0x6e, 0x48, + 0x65, 0x72, 0x6d, 0x65, 0x7a, 0x2f, 0x7a, 0x6b, 0x65, 0x76, 0x6d, 0x2d, 0x6e, 0x6f, 0x64, 0x65, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -928,7 +982,7 @@ func file_datastream_proto_rawDescGZIP() []byte { } var file_datastream_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_datastream_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_datastream_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_datastream_proto_goTypes = []interface{}{ (BookmarkType)(0), // 0: datastream.v1.BookmarkType (EntryType)(0), // 1: datastream.v1.EntryType @@ -936,24 +990,25 @@ var file_datastream_proto_goTypes = []interface{}{ (*BatchStart)(nil), // 3: datastream.v1.BatchStart (*BatchEnd)(nil), // 4: datastream.v1.BatchEnd (*L2Block)(nil), // 5: datastream.v1.L2Block - (*Transaction)(nil), // 6: datastream.v1.Transaction - (*UpdateGER)(nil), // 7: datastream.v1.UpdateGER - (*BookMark)(nil), // 8: datastream.v1.BookMark - (*Debug)(nil), // 9: datastream.v1.Debug + (*L2BlockEnd)(nil), // 6: datastream.v1.L2BlockEnd + (*Transaction)(nil), // 7: datastream.v1.Transaction + (*UpdateGER)(nil), // 8: datastream.v1.UpdateGER + (*BookMark)(nil), // 9: datastream.v1.BookMark + (*Debug)(nil), // 10: datastream.v1.Debug } var file_datastream_proto_depIdxs = []int32{ - 2, // 0: datastream.v1.BatchStart.type:type_name -> datastream.v1.BatchType - 9, // 1: datastream.v1.BatchStart.debug:type_name -> datastream.v1.Debug - 9, // 2: datastream.v1.BatchEnd.debug:type_name -> datastream.v1.Debug - 9, // 3: datastream.v1.L2Block.debug:type_name -> datastream.v1.Debug - 9, // 4: datastream.v1.Transaction.debug:type_name -> datastream.v1.Debug - 9, // 5: datastream.v1.UpdateGER.debug:type_name -> datastream.v1.Debug - 0, // 6: datastream.v1.BookMark.type:type_name -> datastream.v1.BookmarkType - 7, // [7:7] is the sub-list for method output_type - 7, // [7:7] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name + 2, // 0: datastream.v1.BatchStart.type:type_name -> datastream.v1.BatchType + 10, // 1: datastream.v1.BatchStart.debug:type_name -> datastream.v1.Debug + 10, // 2: datastream.v1.BatchEnd.debug:type_name -> datastream.v1.Debug + 10, // 3: datastream.v1.L2Block.debug:type_name -> datastream.v1.Debug + 10, // 4: datastream.v1.Transaction.debug:type_name -> datastream.v1.Debug + 10, // 5: datastream.v1.UpdateGER.debug:type_name -> datastream.v1.Debug + 0, // 6: datastream.v1.BookMark.type:type_name -> datastream.v1.BookmarkType + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_datastream_proto_init() } @@ -999,7 +1054,7 @@ func file_datastream_proto_init() { } } file_datastream_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Transaction); i { + switch v := v.(*L2BlockEnd); i { case 0: return &v.state case 1: @@ -1011,7 +1066,7 @@ func file_datastream_proto_init() { } } file_datastream_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateGER); i { + switch v := v.(*Transaction); i { case 0: return &v.state case 1: @@ -1023,7 +1078,7 @@ func file_datastream_proto_init() { } } file_datastream_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BookMark); i { + switch v := v.(*UpdateGER); i { case 0: return &v.state case 1: @@ -1035,6 +1090,18 @@ func file_datastream_proto_init() { } } file_datastream_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BookMark); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_datastream_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Debug); i { case 0: return &v.state @@ -1053,7 +1120,7 @@ func file_datastream_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_datastream_proto_rawDesc, NumEnums: 3, - NumMessages: 7, + NumMessages: 8, NumExtensions: 0, NumServices: 0, }, diff --git a/zk/datastream/server/data_stream_server.go b/zk/datastream/server/data_stream_server.go index d661fc77d58..f94650e3f52 100644 --- a/zk/datastream/server/data_stream_server.go +++ b/zk/datastream/server/data_stream_server.go @@ -1,19 +1,21 @@ package server import ( + "fmt" + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/core/rawdb" eritypes "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/zk/datastream/proto/github.com/0xPolygonHermez/zkevm-node/state/datastream" "github.com/ledgerwatch/erigon/zk/datastream/types" zktypes "github.com/ledgerwatch/erigon/zk/types" "github.com/ledgerwatch/erigon/zk/utils" - - "github.com/ledgerwatch/erigon/zk/hermez_db" ) type DbReader interface { + GetL2BlockNosByBatch(batchNo uint64) ([]uint64, error) GetLocalExitRootForBatchNo(batchNo uint64) (libcommon.Hash, error) GetBatchGlobalExitRootsProto(lastBatchNumber, batchNumber uint64) ([]types.GerUpdateProto, error) GetForkId(batchNumber uint64) (uint64, error) @@ -82,10 +84,33 @@ func (d *DataStreamEntries) AddMany(entries []DataStreamEntryProto) { } } +func (d *DataStreamEntries) Size() int { + if d == nil || d.entries == nil { + return 0 + } + return len(d.entries) +} + func (d *DataStreamEntries) Entries() []DataStreamEntryProto { + if d == nil || d.entries == nil { + return []DataStreamEntryProto{} + } return d.entries } +func (d *DataStreamEntries) Marshal() (result []byte, err error) { + var b []byte + for _, entry := range d.entries { + b, err = encodeEntryToBytesProto(entry) + if err != nil { + return nil, err + } + result = append(result, b...) + } + + return result, nil +} + func NewDataStreamEntries(size int) *DataStreamEntries { return &DataStreamEntries{ entries: make([]DataStreamEntryProto, size), @@ -113,85 +138,82 @@ func (srv *DataStreamServer) CommitEntriesToStreamProto(entries []DataStreamEntr } if latestBlockNum != nil { - srv.highestBlockWritten = latestBlockNum + a := *latestBlockNum + srv.highestBlockWritten = &a } if latestBatchNum != nil { - srv.highestBatchWritten = latestBatchNum + a := *latestBatchNum + srv.highestBatchWritten = &a } return nil } func createBlockWithBatchCheckStreamEntriesProto( - chainId uint64, reader DbReader, tx kv.Tx, block, lastBlock *eritypes.Block, batchNumber, - lastBatchNumber uint64, - l1InfoTreeMinTimestamps map[uint64]uint64, - isBatchEnd bool, - transactionsToIncludeByIndex []int, // passing nil here will include all transactions in the blocks -) ([]DataStreamEntryProto, error) { + lastBatchNumber, + chainId, + forkId uint64, + shouldSkipBatchEndEntry bool, +) (*DataStreamEntries, error) { var err error - var startEntriesProto, blockEntriesProto, endEntriesProto []DataStreamEntryProto - - gers, err := reader.GetBatchGlobalExitRootsProto(lastBatchNumber, batchNumber) - if err != nil { - return nil, err - } - + var endEntriesProto []DataStreamEntryProto + var startEntriesProto, blockEntries *DataStreamEntries // we might have a series of empty batches to account for, so we need to know the gap batchGap := batchNumber - lastBatchNumber isBatchStart := batchGap > 0 - // filter transactions by indexes that should be included - filteredTransactions := filterTransactionByIndexes(block.Transactions(), transactionsToIncludeByIndex) - - blockNum := block.NumberU64() // batch start // BATCH BOOKMARK if isBatchStart { + gers, err := reader.GetBatchGlobalExitRootsProto(lastBatchNumber, batchNumber) + if err != nil { + return nil, err + } + // the genesis we insert fully, so we would have to skip closing it + if !shouldSkipBatchEndEntry { + localExitRoot, err := utils.GetBatchLocalExitRootFromSCStorage(batchNumber, reader, tx) + if err != nil { + return nil, err + } + lastBlockRoot := lastBlock.Root() + if endEntriesProto, err = addBatchEndEntriesProto(lastBatchNumber, &lastBlockRoot, gers, &localExitRoot); err != nil { + return nil, err + } + } + if startEntriesProto, err = createBatchStartEntriesProto(reader, tx, batchNumber, lastBatchNumber, batchGap, chainId, block.Root(), gers); err != nil { return nil, err } } - forkId, err := reader.GetForkId(batchNumber) - if err != nil { - return nil, err - } + blockNum := block.NumberU64() + l1InfoTreeMinTimestamps := make(map[uint64]uint64) deltaTimestamp := block.Time() - lastBlock.Time() if blockNum == 1 { deltaTimestamp = block.Time() l1InfoTreeMinTimestamps[0] = 0 } - blockEntries, err := createFullBlockStreamEntriesProto(reader, tx, block, filteredTransactions, forkId, deltaTimestamp, batchNumber, l1InfoTreeMinTimestamps) - if err != nil { + if blockEntries, err = createFullBlockStreamEntriesProto(reader, tx, block, block.Transactions(), forkId, deltaTimestamp, batchNumber, l1InfoTreeMinTimestamps); err != nil { return nil, err } - blockEntriesProto = blockEntries.Entries() - if isBatchEnd { - localExitRoot, err := utils.GetBatchLocalExitRootFromSCStorage(batchNumber, reader, tx) - if err != nil { - return nil, err - } - blockRoot := block.Root() - if endEntriesProto, err = addBatchEndEntriesProto(tx, batchNumber, lastBatchNumber, &blockRoot, gers, &localExitRoot); err != nil { - return nil, err - } + if blockEntries.Size() == 0 { + return nil, fmt.Errorf("didn't create any entries for block %d", blockNum) } - entries := NewDataStreamEntries(len(startEntriesProto) + len(blockEntriesProto) + len(endEntriesProto)) - entries.AddMany(startEntriesProto) - entries.AddMany(blockEntriesProto) + entries := NewDataStreamEntries(len(endEntriesProto) + startEntriesProto.Size() + blockEntries.Size()) entries.AddMany(endEntriesProto) + entries.AddMany(startEntriesProto.Entries()) + entries.AddMany(blockEntries.Entries()) - return entries.Entries(), nil + return entries, nil } func createFullBlockStreamEntriesProto( @@ -204,7 +226,7 @@ func createFullBlockStreamEntriesProto( batchNumber uint64, l1InfoTreeMinTimestamps map[uint64]uint64, ) (*DataStreamEntries, error) { - entries := NewDataStreamEntries(len(filteredTransactions) + 2) + entries := NewDataStreamEntries(len(filteredTransactions) + 3) // block bookmark + block + block end blockNum := block.NumberU64() // L2 BLOCK BOOKMARK entries.Add(newL2BlockBookmarkEntryProto(blockNum)) @@ -251,6 +273,8 @@ func createFullBlockStreamEntriesProto( entries.Add(transaction) } + entries.Add(newL2BlockEndProto(blockNum)) + return entries, nil } @@ -281,32 +305,102 @@ func createTransactionEntryProto( return txProto, nil } -func CreateAndBuildStreamEntryBytesProto( - chainId uint64, - block *eritypes.Block, - reader *hermez_db.HermezDbReader, +func BuildWholeBatchStreamEntriesProto( tx kv.Tx, - lastBlock *eritypes.Block, + reader DbReader, + chainId uint64, + previousBatchNumber, batchNumber uint64, - lastBatchNumber uint64, + blocks []eritypes.Block, + txsPerBlock map[uint64][]eritypes.Transaction, l1InfoTreeMinTimestamps map[uint64]uint64, - isBatchEnd bool, - transactionsToIncludeByIndex []int, // passing nil here will include all transactions in the blocks -) (result []byte, err error) { - entries, err := createBlockWithBatchCheckStreamEntriesProto(chainId, reader, tx, block, lastBlock, batchNumber, lastBatchNumber, l1InfoTreeMinTimestamps, isBatchEnd, transactionsToIncludeByIndex) +) (allEntries *DataStreamEntries, err error) { + var batchEndEntries []DataStreamEntryProto + var batchStartEntries *DataStreamEntries + + forkId, err := reader.GetForkId(batchNumber) if err != nil { return nil, err } - for _, entry := range entries { - b, err := encodeEntryToBytesProto(entry) + gers, err := reader.GetBatchGlobalExitRootsProto(previousBatchNumber, batchNumber) + if err != nil { + return nil, err + } + + if batchStartEntries, err = createBatchStartEntriesProto(reader, tx, batchNumber, previousBatchNumber, batchNumber-previousBatchNumber, chainId, blocks[0].Root(), gers); err != nil { + return nil, err + } + + prevBatchLastBlock, err := rawdb.ReadBlockByNumber(tx, blocks[0].NumberU64()-1) + if err != nil { + return nil, err + } + + lastBlock := *prevBatchLastBlock + + blocksEntries := make([]DataStreamEntryProto, 0) + + for _, block := range blocks { + blockNum := block.NumberU64() + + deltaTimestamp := block.Time() - lastBlock.Time() + if blockNum == 1 { + deltaTimestamp = block.Time() + l1InfoTreeMinTimestamps[0] = 0 + } + + txForBlock, found := txsPerBlock[blockNum] + if !found { + return nil, fmt.Errorf("no transactions array found for block %d", blockNum) + } + + blockEntries, err := createFullBlockStreamEntriesProto(reader, tx, &block, txForBlock, forkId, deltaTimestamp, batchNumber, l1InfoTreeMinTimestamps) if err != nil { return nil, err } - result = append(result, b...) + blocksEntries = append(blocksEntries, blockEntries.Entries()...) + + lastBlock = block } - return result, nil + // the genesis we insert fully, so we would have to skip closing it + localExitRoot, err := utils.GetBatchLocalExitRootFromSCStorage(batchNumber, reader, tx) + if err != nil { + return nil, err + } + + blockRoot := lastBlock.Root() + + batchEndEntries, err = addBatchEndEntriesProto(batchNumber, &blockRoot, gers, &localExitRoot) + if err != nil { + return nil, err + } + + allEntries = NewDataStreamEntries(batchStartEntries.Size() + len(blocksEntries) + len(batchEndEntries)) + allEntries.AddMany(batchStartEntries.Entries()) + allEntries.AddMany(blocksEntries) + allEntries.AddMany(batchEndEntries) + + return allEntries, nil +} + +func (srv *DataStreamServer) IsLastEntryBatchEnd() (isBatchEnd bool, err error) { + header := srv.stream.GetHeader() + + if header.TotalEntries == 0 { + return false, nil + } + + //find end block entry to delete from it onward + entryNum := header.TotalEntries - 1 + var entry datastreamer.FileEntry + entry, err = srv.stream.GetEntry(entryNum) + if err != nil { + return false, err + } + + return uint32(entry.Type) == uint32(types.EntryTypeBatchEnd), nil } func (srv *DataStreamServer) GetHighestBlockNumber() (uint64, error) { diff --git a/zk/datastream/server/data_stream_server_utils.go b/zk/datastream/server/data_stream_server_utils.go index 296276b9ddd..d017b3bc9a7 100644 --- a/zk/datastream/server/data_stream_server_utils.go +++ b/zk/datastream/server/data_stream_server_utils.go @@ -59,6 +59,14 @@ func newL2BlockProto( } } +func newL2BlockEndProto( + blockNumber uint64, +) *types.L2BlockEndProto { + return &types.L2BlockEndProto{ + Number: blockNumber, + } +} + func newTransactionProto( effectiveGasPricePercentage uint8, stateRoot libcommon.Hash, @@ -134,11 +142,15 @@ func createBatchStartEntriesProto( batchNumber, lastBatchNumber, batchGap, chainId uint64, root libcommon.Hash, gers []types.GerUpdateProto, -) ([]DataStreamEntryProto, error) { +) (*DataStreamEntries, error) { var err error var batchStartEntries []DataStreamEntryProto - entries := make([]DataStreamEntryProto, 0, 2+int(3*(batchGap-1))+len(gers)) + batchGapEntriesCount := int(batchGap) - 1 + if batchGapEntriesCount < 0 { + batchGapEntriesCount = 0 + } + entries := NewDataStreamEntries(2 + 3*batchGapEntriesCount + len(gers)) // if we have a gap of more than 1 batch then we need to write in the batch start and ends for these empty batches if batchGap > 1 { @@ -150,14 +162,14 @@ func createBatchStartEntriesProto( if batchStartEntries, err = addBatchStartEntries(reader, workingBatch, chainId); err != nil { return nil, err } - entries = append(entries, batchStartEntries...) + // entries = append(entries, batchStartEntries...) + entries.AddMany(batchStartEntries) // see if we have any gers to handle for _, ger := range gers { upd := ger.UpdateGER if upd.BatchNumber == workingBatch { - entries = append( - entries, + entries.Add( newGerUpdateProto(upd.BatchNumber, upd.Timestamp, libcommon.BytesToHash(upd.GlobalExitRoot), libcommon.BytesToAddress(upd.Coinbase), upd.ForkId, upd.ChainId, libcommon.BytesToHash(upd.StateRoot)), ) } @@ -167,7 +179,7 @@ func createBatchStartEntriesProto( if localExitRoot, err = utils.GetBatchLocalExitRootFromSCStorage(workingBatch, reader, tx); err != nil { return nil, err } - entries = append(entries, newBatchEndProto(localExitRoot, root, workingBatch)) + entries.Add(newBatchEndProto(localExitRoot, root, workingBatch)) } } @@ -175,13 +187,12 @@ func createBatchStartEntriesProto( if batchStartEntries, err = addBatchStartEntries(reader, batchNumber, chainId); err != nil { return nil, err } - entries = append(entries, batchStartEntries...) + entries.AddMany(batchStartEntries) return entries, nil } func addBatchEndEntriesProto( - tx kv.Tx, - batchNumber, lastBatchNumber uint64, + batchNumber uint64, root *libcommon.Hash, gers []types.GerUpdateProto, localExitRoot *libcommon.Hash, @@ -231,22 +242,6 @@ func addBatchStartEntries(reader DbReader, batchNum, chainId uint64) ([]DataStre return entries, nil } -func filterTransactionByIndexes( - filteredTransactions eritypes.Transactions, - transactionsToIncludeByIndex []int, -) eritypes.Transactions { - if transactionsToIncludeByIndex != nil { - filteredTransactionsBuilder := make(eritypes.Transactions, len(transactionsToIncludeByIndex)) - for i, txIndexInBlock := range transactionsToIncludeByIndex { - filteredTransactionsBuilder[i] = filteredTransactions[txIndexInBlock] - } - - filteredTransactions = filteredTransactionsBuilder - } - - return filteredTransactions -} - const ( PACKET_TYPE_DATA = 2 // NOOP_ENTRY_NUMBER is used because we don't care about the entry number when feeding an atrificial diff --git a/zk/datastream/server/datastream_populate.go b/zk/datastream/server/datastream_populate.go index 2d5c5e7d315..030638b18c0 100644 --- a/zk/datastream/server/datastream_populate.go +++ b/zk/datastream/server/datastream_populate.go @@ -1,6 +1,7 @@ package server import ( + "context" "fmt" "time" @@ -14,71 +15,187 @@ import ( "github.com/ledgerwatch/log/v3" ) -const GenesisForkId = 0 // genesis fork is always 0 in the datastream +const ( + GenesisForkId = 0 // genesis fork is always 0 in the datastream + insertEntryCount = 100_000 + commitEntryCountLimit = 80_000 +) -func (srv *DataStreamServer) WriteBlocksToStream( +// gets the blocks for the said batch from the reader +// writes a bookmarks, batch start, blocks and batch end +// basically writes a whole standalone batch +// plus the GER updates if the batch gap is > 1 +// starts atomicOp and commits it internally +func (srv *DataStreamServer) WriteWholeBatchToStream( + logPrefix string, tx kv.Tx, reader DbReader, - from, to uint64, + prevBatchNum, + batchNum uint64, +) error { + var err error + blocksForBatch, err := reader.GetL2BlockNosByBatch(batchNum) + if err != nil { + return err + } + + var fromBlockNum, toBlockNum uint64 + for _, blockNum := range blocksForBatch { + if fromBlockNum == 0 || blockNum < fromBlockNum { + fromBlockNum = blockNum + } + if blockNum > toBlockNum { + toBlockNum = blockNum + } + } + + if err = srv.UnwindIfNecessary(logPrefix, reader, fromBlockNum, prevBatchNum, batchNum); err != nil { + return err + } + + if err = srv.stream.StartAtomicOp(); err != nil { + return err + } + defer srv.stream.RollbackAtomicOp() + + blocks := make([]eritypes.Block, 0) + txsPerBlock := make(map[uint64][]eritypes.Transaction) + for blockNumber := fromBlockNum; blockNumber <= toBlockNum; blockNumber++ { + block, err := rawdb.ReadBlockByNumber(tx, blockNumber) + if err != nil { + return err + } + + blocks = append(blocks, *block) + txsPerBlock[blockNumber] = block.Transactions() + } + + entries, err := BuildWholeBatchStreamEntriesProto(tx, reader, srv.GetChainId(), batchNum, batchNum, blocks, txsPerBlock, make(map[uint64]uint64)) + if err != nil { + return err + } + + if err = srv.CommitEntriesToStreamProto(entries.Entries(), &toBlockNum, &batchNum); err != nil { + return err + } + + if err = srv.stream.CommitAtomicOp(); err != nil { + return err + } + + return nil +} + +// writes consecutively blocks from-to +// checks for all batch related stuff in the meantime - batch start, batche end, etc +// starts atomicOp and commits it internally +func (srv *DataStreamServer) WriteBlocksToStreamConsecutively( + ctx context.Context, logPrefix string, + tx kv.Tx, + reader DbReader, + from, to uint64, ) error { + var err error + + // logger stuff t := utils.StartTimer("write-stream", "writeblockstostream") defer t.LogTimer() + logTicker := time.NewTicker(10 * time.Second) + totalToWrite := to - (from - 1) + copyFrom := from + ////////// - var err error + latestbatchNum, err := reader.GetBatchNoByL2Block(from - 1) + if err != nil { + return err + } + + batchNum, err := reader.GetBatchNoByL2Block(from) + if err != nil { + return err + } + + if err = srv.UnwindIfNecessary(logPrefix, reader, from, latestbatchNum, batchNum); err != nil { + return err + } - logTicker := time.NewTicker(10 * time.Second) - var lastBlock *eritypes.Block if err = srv.stream.StartAtomicOp(); err != nil { return err } - totalToWrite := to - (from - 1) - insertEntryCount := 100_000 - entries := make([]DataStreamEntryProto, insertEntryCount) - index := 0 - copyFrom := from - var latestbatchNum uint64 + defer srv.stream.RollbackAtomicOp() + + // check if a new batch starts and the old needs closing before that + // if it is already closed with a batch end, do not add a new batch end + // this is needed because we have to write a batch end when writing a new block from the next batch + // because at the current block we might not know if it is the last one in the batch + // but we know for certain if it is a 1st block from a new batch + islastEntrybatchEnd, err := srv.IsLastEntryBatchEnd() + if err != nil { + return err + } + + lastBlock, err := rawdb.ReadBlockByNumber(tx, from-1) + if err != nil { + return err + } + + entries := make([]DataStreamEntryProto, 0, insertEntryCount) + var forkId uint64 +LOOP: for currentBlockNumber := from; currentBlockNumber <= to; currentBlockNumber++ { select { case <-logTicker.C: log.Info(fmt.Sprintf("[%s]: progress", logPrefix), "block", currentBlockNumber, "target", to, "%", float64(currentBlockNumber-copyFrom)/float64(totalToWrite)*100) + case <-ctx.Done(): + break LOOP default: } - if lastBlock == nil { - lastBlock, err = rawdb.ReadBlockByNumber(tx, currentBlockNumber-1) + block, err := rawdb.ReadBlockByNumber(tx, currentBlockNumber) + if err != nil { + return err + } + + batchNum, err := reader.GetBatchNoByL2Block(currentBlockNumber) + if err != nil { + return err + } + + // fork id changes only per batch so query it only once per batch + if batchNum != latestbatchNum { + forkId, err = reader.GetForkId(batchNum) if err != nil { return err } } - block, blockEntries, batchNum, err := srv.createBlockStreamEntriesWithBatchCheck(logPrefix, tx, reader, lastBlock, currentBlockNumber) + blockEntries, err := createBlockWithBatchCheckStreamEntriesProto(reader, tx, block, lastBlock, batchNum, latestbatchNum, srv.chainId, forkId, islastEntrybatchEnd) if err != nil { return err } + entries = append(entries, blockEntries.Entries()...) + latestbatchNum = batchNum + lastBlock = block - for _, entry := range blockEntries { - entries[index] = entry - index++ - } + // the check is needed only before the first block + // after that - write batch end before each batch start + islastEntrybatchEnd = false // basically commit once 80% of the entries array is filled - if index+1 >= insertEntryCount*4/5 { + if len(entries) >= commitEntryCountLimit { log.Info(fmt.Sprintf("[%s] Commit count reached, committing entries", logPrefix), "block", currentBlockNumber) - if err = srv.CommitEntriesToStreamProto(entries[:index], ¤tBlockNumber, &batchNum); err != nil { + if err = srv.CommitEntriesToStreamProto(entries, ¤tBlockNumber, &batchNum); err != nil { return err } - entries = make([]DataStreamEntryProto, insertEntryCount) - index = 0 + entries = make([]DataStreamEntryProto, 0, insertEntryCount) } - - lastBlock = block } - if err = srv.CommitEntriesToStreamProto(entries[:index], &to, &latestbatchNum); err != nil { + if err = srv.CommitEntriesToStreamProto(entries, &to, &latestbatchNum); err != nil { return err } @@ -89,41 +206,63 @@ func (srv *DataStreamServer) WriteBlocksToStream( return nil } -func (srv *DataStreamServer) WriteBlockToStream( +// gets other needed data from the reader +// writes a batchBookmark and batch start (if needed), block bookmark, block and txs in it +// basically a full standalone block +func (srv *DataStreamServer) WriteBlockWithBatchStartToStream( logPrefix string, tx kv.Tx, reader DbReader, - batchNum, prevBatchNum, - blockNum uint64, -) error { + forkId, + batchNum, prevBlockBatchNum uint64, + prevBlock, block eritypes.Block, +) (err error) { t := utils.StartTimer("write-stream", "writeblockstostream") defer t.LogTimer() - var err error + blockNum := block.NumberU64() - if err = srv.UnwindIfNecessary(logPrefix, reader, blockNum, prevBatchNum, batchNum); err != nil { + if err = srv.UnwindIfNecessary(logPrefix, reader, blockNum, prevBlockBatchNum, batchNum); err != nil { return err } if err = srv.stream.StartAtomicOp(); err != nil { return err } + defer srv.stream.RollbackAtomicOp() - lastBlock, err := rawdb.ReadBlockByNumber(tx, blockNum-1) - if err != nil { - return err + // if start of new batch add batch start entries + var batchStartEntries *DataStreamEntries + if prevBlockBatchNum != batchNum { + gers, err := reader.GetBatchGlobalExitRootsProto(prevBlockBatchNum, batchNum) + if err != nil { + return err + } + + if batchStartEntries, err = createBatchStartEntriesProto(reader, tx, batchNum, prevBlockBatchNum, batchNum-prevBlockBatchNum, srv.GetChainId(), block.Root(), gers); err != nil { + return err + } } - block, err := rawdb.ReadBlockByNumber(tx, blockNum) - if err != nil { - return err + + l1InfoTreeMinTimestamps := make(map[uint64]uint64) + deltaTimestamp := block.Time() - prevBlock.Time() + if blockNum == 1 { + deltaTimestamp = block.Time() + l1InfoTreeMinTimestamps[0] = 0 } - entries, err := createBlockWithBatchCheckStreamEntriesProto(srv.chainId, reader, tx, block, lastBlock, batchNum, prevBatchNum, make(map[uint64]uint64), false, nil) + blockEntries, err := createFullBlockStreamEntriesProto(reader, tx, &block, block.Transactions(), forkId, deltaTimestamp, batchNum, make(map[uint64]uint64)) if err != nil { return err } - if err = srv.CommitEntriesToStreamProto(entries, &blockNum, &batchNum); err != nil { + if batchStartEntries != nil { + if err = srv.CommitEntriesToStreamProto(batchStartEntries.Entries(), &blockNum, &batchNum); err != nil { + return err + } + } + + if err = srv.CommitEntriesToStreamProto(blockEntries.Entries(), &blockNum, &batchNum); err != nil { return err } @@ -134,21 +273,25 @@ func (srv *DataStreamServer) WriteBlockToStream( return nil } -func (srv *DataStreamServer) UnwindIfNecessary(logPrefix string, reader DbReader, blockNum, prevBatchNum, batchNum uint64) error { +// checks if the stream has blocks above the current one +// if there is something, try to unwind it +// in the unwind chek if the block is at batch start +// if it is - unwind to previous batch's end, so it deletes batch stat of current batch as well +func (srv *DataStreamServer) UnwindIfNecessary(logPrefix string, reader DbReader, blockNum, prevBlockBatchNum, batchNum uint64) error { // if from is higher than the last datastream block number - unwind the stream highestDatastreamBlock, err := srv.GetHighestBlockNumber() if err != nil { return err } - //if this is a new batch case, we must unwind to previous batch's batch end + // if this is a new batch case, we must unwind to previous batch's batch end // otherwise it would corrupt the datastream with batch bookmark after a batch start or something similar if highestDatastreamBlock >= blockNum { - if prevBatchNum != batchNum { - log.Warn(fmt.Sprintf("[%s] Datastream must unwind to batch", logPrefix), "prevBatchNum", prevBatchNum, "batchNum", batchNum) + if prevBlockBatchNum != batchNum { + log.Warn(fmt.Sprintf("[%s] Datastream must unwind to batch", logPrefix), "prevBlockBatchNum", prevBlockBatchNum, "batchNum", batchNum) //get latest block in prev batch - lastBlockInPrevbatch, err := reader.GetHighestBlockInBatch(prevBatchNum) + lastBlockInPrevbatch, err := reader.GetHighestBlockInBatch(prevBlockBatchNum) if err != nil { return err } @@ -156,7 +299,7 @@ func (srv *DataStreamServer) UnwindIfNecessary(logPrefix string, reader DbReader // this represents a case where the block we must unwind to is part of a previous batch // this should never happen since previous batch in this use must be already completed if lastBlockInPrevbatch != blockNum-1 { - return fmt.Errorf("datastream must unwind to prev batch, but it would corrupt the datastream: prevBatchNum: %d, abtchNum: %d, blockNum: %d", prevBatchNum, batchNum, blockNum) + return fmt.Errorf("datastream must unwind to prev batch, but it would corrupt the datastream: prevBlockBatchNum: %d, batchNum: %d, blockNum: %d", prevBlockBatchNum, batchNum, blockNum) } if err := srv.UnwindToBatchStart(batchNum); err != nil { @@ -173,8 +316,6 @@ func (srv *DataStreamServer) UnwindIfNecessary(logPrefix string, reader DbReader } func (srv *DataStreamServer) WriteBatchEnd( - logPrefix string, - tx kv.Tx, reader DbReader, batchNumber, lastBatchNumber uint64, @@ -189,8 +330,9 @@ func (srv *DataStreamServer) WriteBatchEnd( if err = srv.stream.StartAtomicOp(); err != nil { return err } + defer srv.stream.RollbackAtomicOp() - batchEndEntries, err := addBatchEndEntriesProto(tx, batchNumber, lastBatchNumber, stateRoot, gers, localExitRoot) + batchEndEntries, err := addBatchEndEntriesProto(batchNumber, stateRoot, gers, localExitRoot) if err != nil { return err } @@ -206,48 +348,6 @@ func (srv *DataStreamServer) WriteBatchEnd( return nil } -func (srv *DataStreamServer) createBlockStreamEntriesWithBatchCheck( - logPrefix string, - tx kv.Tx, - reader DbReader, - lastBlock *eritypes.Block, - blockNumber uint64, -) (*eritypes.Block, []DataStreamEntryProto, uint64, error) { - block, err := rawdb.ReadBlockByNumber(tx, blockNumber) - if err != nil { - return nil, nil, 0, err - } - - batchNum, err := reader.GetBatchNoByL2Block(blockNumber) - if err != nil { - return nil, nil, 0, err - } - - prevBatchNum, err := reader.GetBatchNoByL2Block(blockNumber - 1) - if err != nil { - return nil, nil, 0, err - } - - if err = srv.UnwindIfNecessary(logPrefix, reader, blockNumber, prevBatchNum, batchNum); err != nil { - return nil, nil, 0, err - } - - nextBatchNum, nextBatchExists, err := reader.CheckBatchNoByL2Block(blockNumber + 1) - if err != nil { - return nil, nil, 0, err - } - - // a 0 next batch num here would mean we don't know about the next batch so must be at the end of the batch - isBatchEnd := !nextBatchExists || nextBatchNum > batchNum - - entries, err := createBlockWithBatchCheckStreamEntriesProto(srv.chainId, reader, tx, block, lastBlock, batchNum, prevBatchNum, make(map[uint64]uint64), isBatchEnd, nil) - if err != nil { - return nil, nil, 0, err - } - - return block, entries, batchNum, nil -} - func (srv *DataStreamServer) WriteGenesisToStream( genesis *eritypes.Block, reader *hermez_db.HermezDbReader, @@ -267,6 +367,7 @@ func (srv *DataStreamServer) WriteGenesisToStream( if err != nil { return err } + defer srv.stream.RollbackAtomicOp() batchBookmark := newBatchBookmarkEntryProto(genesis.NumberU64()) l2BlockBookmark := newL2BlockBookmarkEntryProto(genesis.NumberU64()) diff --git a/zk/datastream/test/test.go b/zk/datastream/test/test.go deleted file mode 100644 index c3a1f600796..00000000000 --- a/zk/datastream/test/test.go +++ /dev/null @@ -1,101 +0,0 @@ -package main - -import ( - "context" - "fmt" - "strconv" - - "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/zk/datastream/client" - "github.com/ledgerwatch/erigon/zk/datastream/proto/github.com/0xPolygonHermez/zkevm-node/state/datastream" - "github.com/ledgerwatch/erigon/zk/datastream/test/utils" - "github.com/ledgerwatch/erigon/zk/datastream/types" - "github.com/ledgerwatch/erigon/zkevm/log" -) - -const dataStreamCardona = "datastream.cardona.zkevm-rpc.com:6900" -const dataStreamBali = "datastream.internal.zkevm-rpc.com:6900" -const datastreamMainnet = "stream.zkevm-rpc.com:6900" -const estest = "34.175.214.161:6900" -const localhost = "localhost:6910" - -// This code downloads headers and blocks from a datastream server. -func main() { - // Create client - c := client.NewClient(context.Background(), localhost, 0, 0, 0) - - // Start client (connect to the server) - defer c.Stop() - if err := c.Start(); err != nil { - panic(err) - } - - // create bookmark - bookmark := types.NewBookmarkProto(188312, datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK) - - // Read all entries from server - blocksRead, _, _, entriesReadAmount, _, err := c.ReadEntries(bookmark, 1) - if err != nil { - panic(err) - } - fmt.Println("Entries read amount: ", entriesReadAmount) - fmt.Println("Blocks read amount: ", len(*blocksRead)) - - // forkId := uint16(0) - for _, dsBlock := range *blocksRead { - fmt.Println(len(dsBlock.L2Txs)) - } -} - -func matchBlocks(dsBlock types.FullL2Block, rpcBlock utils.Result, lastGer common.Hash) bool { - decimal_num, err := strconv.ParseUint(rpcBlock.Number[2:], 16, 64) - if err != nil { - log.Errorf("Error parsing block number. Error: %v, BlockNumber: %d, rpcBlockNumber: %d", err, dsBlock.L2BlockNumber, rpcBlock.Number) - return false - } - - if decimal_num != dsBlock.L2BlockNumber { - log.Errorf("Block numbers don't match. BlockNumber: %d, rpcBlockNumber: %d", dsBlock.L2BlockNumber, decimal_num) - return false - } - - if rpcBlock.StateRoot != dsBlock.StateRoot.String() { - log.Errorf("Block state roots don't match. BlockNumber: %d, dsBlockStateRoot: %s, rpcBlockStateRoot: %s", dsBlock.L2BlockNumber, dsBlock.StateRoot.String(), rpcBlock.StateRoot) - return false - } - - decimal_timestamp, err := strconv.ParseUint(rpcBlock.Timestamp[2:], 16, 64) - if err != nil { - log.Errorf("Error parsing block timestamp. Error: %v, BlockNumber: %d, rpcBlockTimestamp: %d", err, dsBlock.L2BlockNumber, rpcBlock.Timestamp) - return false - } - - if decimal_timestamp != uint64(dsBlock.Timestamp) { - log.Errorf("Block timestamps don't match. BlockNumber: %d, dsBlockTimestamp: %d, rpcBlockTimestamp: %d", dsBlock.L2BlockNumber, dsBlock.Timestamp, decimal_timestamp) - return false - } - - if len(dsBlock.L2Txs) != len(rpcBlock.Transactions) { - log.Errorf("Block txs don't match. BlockNumber: %d, dsBlockTxs: %d, rpcBlockTxs: %d", dsBlock.L2BlockNumber, len(dsBlock.L2Txs), len(rpcBlock.Transactions)) - return false - } - - bloxkNumHex := fmt.Sprintf("%x", dsBlock.L2BlockNumber) - txHex := fmt.Sprintf("%x", dsBlock.Timestamp) - - if lastGer.Hex() != dsBlock.GlobalExitRoot.Hex() { - if err := utils.CompareValuesString(bloxkNumHex, txHex, dsBlock.GlobalExitRoot); err != nil { - log.Error("Error comparing values: ", err) - return false - } - } - - // for i, tx := range dsBlock.L2Txs { - // if tx..String() != rpcBlock.Transactions[i] { - // log.Error("Block txs don't match", "blockNumber", dsBlock.L2BlockNumber, "dsBlockTx", tx.String(), "rpcBlockTx", rpcBlock.Transactions[i]) - // return false - // } - // } - - return true -} diff --git a/zk/datastream/types/batch_proto.go b/zk/datastream/types/batch_proto.go index 32d3287a911..946e49351f6 100644 --- a/zk/datastream/types/batch_proto.go +++ b/zk/datastream/types/batch_proto.go @@ -41,6 +41,7 @@ func (b *BatchStartProto) Type() EntryType { } type BatchEnd struct { + Number uint64 LocalExitRoot libcommon.Hash StateRoot libcommon.Hash Debug Debug @@ -76,6 +77,7 @@ func UnmarshalBatchEnd(data []byte) (*BatchEnd, error) { } return &BatchEnd{ + Number: batchEnd.Number, LocalExitRoot: libcommon.BytesToHash(batchEnd.LocalExitRoot), StateRoot: libcommon.BytesToHash(batchEnd.StateRoot), Debug: ProcessDebug(batchEnd.Debug), diff --git a/zk/datastream/types/entry_type.go b/zk/datastream/types/entry_type.go index d8e52754bc1..827aabfb15c 100644 --- a/zk/datastream/types/entry_type.go +++ b/zk/datastream/types/entry_type.go @@ -9,5 +9,6 @@ var ( EntryTypeL2Tx EntryType = 3 EntryTypeBatchEnd EntryType = 4 EntryTypeGerUpdate EntryType = 5 + EntryTypeL2BlockEnd EntryType = 6 BookmarkEntryType EntryType = 176 ) diff --git a/zk/datastream/types/file.go b/zk/datastream/types/file.go index 8576c8be220..41c043caa22 100644 --- a/zk/datastream/types/file.go +++ b/zk/datastream/types/file.go @@ -3,6 +3,7 @@ package types import ( "encoding/binary" "fmt" + "github.com/ledgerwatch/erigon/zk/datastream/proto/github.com/0xPolygonHermez/zkevm-node/state/datastream" ) @@ -32,6 +33,9 @@ func (f *FileEntry) IsBookmarkBlock() bool { return uint32(f.EntryType) == uint32(datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK) } +func (f *FileEntry) IsL2BlockEnd() bool { + return uint32(f.EntryType) == uint32(6) //TODO: fix once it is added in the lib +} func (f *FileEntry) IsL2Block() bool { return uint32(f.EntryType) == uint32(datastream.EntryType_ENTRY_TYPE_L2_BLOCK) } diff --git a/zk/datastream/types/l2block_proto.go b/zk/datastream/types/l2block_proto.go index 7cb0d6258c4..36be0c9e446 100644 --- a/zk/datastream/types/l2block_proto.go +++ b/zk/datastream/types/l2block_proto.go @@ -6,6 +6,35 @@ import ( "google.golang.org/protobuf/proto" ) +type L2BlockEndProto struct { + Number uint64 +} + +func (b *L2BlockEndProto) Marshal() ([]byte, error) { + return proto.Marshal(&datastream.L2BlockEnd{ + Number: b.Number, + }) +} + +func (b *L2BlockEndProto) Type() EntryType { + return EntryTypeL2BlockEnd +} + +func (b *L2BlockEndProto) GetBlockNumber() uint64 { + return b.Number +} + +func UnmarshalL2BlockEnd(data []byte) (*L2BlockEndProto, error) { + blockEnd := datastream.L2BlockEnd{} + if err := proto.Unmarshal(data, &blockEnd); err != nil { + return nil, err + } + + return &L2BlockEndProto{ + Number: blockEnd.Number, + }, nil +} + type L2BlockProto struct { *datastream.L2Block } @@ -19,16 +48,13 @@ type FullL2Block struct { GlobalExitRoot libcommon.Hash Coinbase libcommon.Address ForkId uint64 - ChainId uint64 L1BlockHash libcommon.Hash L2Blockhash libcommon.Hash - StateRoot libcommon.Hash - L2Txs []L2TransactionProto ParentHash libcommon.Hash - BatchEnd bool - LocalExitRoot libcommon.Hash + StateRoot libcommon.Hash BlockGasLimit uint64 BlockInfoRoot libcommon.Hash + L2Txs []L2TransactionProto Debug Debug } diff --git a/zk/debug_tools/datastream-bytes/main.go b/zk/debug_tools/datastream-bytes/main.go index f17e0584c65..5aa4ed7204a 100644 --- a/zk/debug_tools/datastream-bytes/main.go +++ b/zk/debug_tools/datastream-bytes/main.go @@ -8,6 +8,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/erigon/zk/hermez_db" ) @@ -30,41 +31,35 @@ func main() { err := db.View(context.Background(), func(tx kv.Tx) error { hermezDb := hermez_db.NewHermezDbReader(tx) - blocks, err := hermezDb.GetL2BlockNosByBatch(uint64(batchNum)) + blockNumbers, err := hermezDb.GetL2BlockNosByBatch(uint64(batchNum)) if err != nil { return err } - if len(blocks) == 0 { + if len(blockNumbers) == 0 { return fmt.Errorf("no blocks found for batch %d", batchNum) } - lastBlock, err := rawdb.ReadBlockByNumber(tx, blocks[0]-1) - if err != nil { - return err - } - previousBatch := batchNum - 1 + blocks := make([]types.Block, 0, len(blockNumbers)) + txsPerBlock := make(map[uint64][]types.Transaction) - for idx, blockNumber := range blocks { + for _, blockNumber := range blockNumbers { block, err := rawdb.ReadBlockByNumber(tx, blockNumber) if err != nil { return err } - - //gerUpdates := []dstypes.GerUpdate{} - var l1InfoTreeMinTimestamps map[uint64]uint64 - - isBatchEnd := idx == len(blocks)-1 - - sBytes, err := server.CreateAndBuildStreamEntryBytesProto(uint64(chainId), block, hermezDb, tx, lastBlock, uint64(batchNum), uint64(previousBatch), l1InfoTreeMinTimestamps, isBatchEnd, nil) - if err != nil { - return err - } - streamBytes = append(streamBytes, sBytes...) - lastBlock = block - // we only put in the batch bookmark at the start of the stream data once - previousBatch = batchNum + blocks = append(blocks, *block) + txsPerBlock[blockNumber] = block.Transactions() + } + var l1InfoTreeMinTimestamps map[uint64]uint64 + entries, err := server.BuildWholeBatchStreamEntriesProto(tx, hermezDb, uint64(chainId), uint64(previousBatch), uint64(batchNum), blocks, txsPerBlock, l1InfoTreeMinTimestamps) + if err != nil { + return err + } + streamBytes, err = entries.Marshal() + if err != nil { + return err } return nil diff --git a/zk/debug_tools/datastream-compare/main.go b/zk/debug_tools/datastream-compare/main.go index 8f73a3acae5..6db9105703c 100644 --- a/zk/debug_tools/datastream-compare/main.go +++ b/zk/debug_tools/datastream-compare/main.go @@ -1,227 +1,215 @@ package main -import ( - "context" - "fmt" - - "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/zk/datastream/client" - "github.com/ledgerwatch/erigon/zk/datastream/proto/github.com/0xPolygonHermez/zkevm-node/state/datastream" - "github.com/ledgerwatch/erigon/zk/datastream/types" - "github.com/ledgerwatch/erigon/zk/debug_tools" - "github.com/ledgerwatch/log/v3" -) - const localDatastream = "localhost:6900" const fromBlock = 18809 const amountToRead = 10 // This code downloads headers and blocks from a datastream server. func main() { - ctx := context.Background() - cfg, err := debug_tools.GetConf() - if err != nil { - panic(fmt.Sprintf("RPGCOnfig: %s", err)) - } - - // Create client - localClient := client.NewClient(ctx, localDatastream, 3, 500, 0) - remoteClient := client.NewClient(ctx, cfg.Datastream, 3, 500, 0) - - // Start client (connect to the server) - defer localClient.Stop() - if err := localClient.Start(); err != nil { - panic(err) - } - - defer remoteClient.Stop() - if err := remoteClient.Start(); err != nil { - panic(err) - } - - // create bookmark - bookmark := types.NewBookmarkProto(fromBlock, datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK) - - // Read all entries from server - blocksReadLocal, gerUpdatesLocal, _, _, _, err := localClient.ReadEntries(bookmark, amountToRead) - if err != nil { - panic(err) - } - // Read all entries from server - blocksReadRemote, gerUpdatesRemote, _, _, _, err := remoteClient.ReadEntries(bookmark, amountToRead) - if err != nil { - panic(err) - } - - for i, block := range *blocksReadLocal { - fmt.Println(i) - fmt.Println(block.L2BlockNumber) - } - - blockCountMatches := len(*blocksReadLocal) == len(*blocksReadRemote) - - if !blockCountMatches { - log.Error("Block amounts don't match", "localBlocks", len(*blocksReadLocal), "remoteBlocks", len(*blocksReadRemote)) - } else { - blockMismatch := false - for i, localBlock := range *blocksReadLocal { - remoteBlock := (*blocksReadRemote)[i] - - if localBlock.BatchNumber != remoteBlock.BatchNumber { - log.Error("Block batch numbers don't match", "blockNum", localBlock.L2BlockNumber, "localBatchNumber", localBlock.BatchNumber, "remoteBatchNumber", remoteBlock.BatchNumber) - blockMismatch = true - } - - if localBlock.L2BlockNumber != remoteBlock.L2BlockNumber { - log.Error("Block numbers don't match", "localBlockNumber", localBlock.L2BlockNumber, "remoteBlockNumber", remoteBlock.L2BlockNumber) - blockMismatch = true - } - - if localBlock.Timestamp != remoteBlock.Timestamp { - log.Error("Block timestamps don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockTimestamp", localBlock.Timestamp, "remoteBlockTimestamp", remoteBlock.Timestamp) - blockMismatch = true - } - - if localBlock.DeltaTimestamp != remoteBlock.DeltaTimestamp && localBlock.L2BlockNumber != 1 { - log.Error("Block delta timestamps don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockDeltaTimestamp", localBlock.DeltaTimestamp, "remoteBlockDeltaTimestamp", remoteBlock.DeltaTimestamp) - blockMismatch = true - } - - if localBlock.L1InfoTreeIndex != remoteBlock.L1InfoTreeIndex { - log.Error("Block L1InfoTreeIndex don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockL1InfoTreeIndex", localBlock.L1InfoTreeIndex, "remoteBlockL1InfoTreeIndex", remoteBlock.L1InfoTreeIndex) - blockMismatch = true - } - - if localBlock.GlobalExitRoot != remoteBlock.GlobalExitRoot && localBlock.GlobalExitRoot != *new(common.Hash) { - log.Error("Block global exit roots don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockGlobalExitRoot", localBlock.GlobalExitRoot, "remoteBlockGlobalExitRoot", remoteBlock.GlobalExitRoot) - blockMismatch = true - } - - if localBlock.Coinbase != remoteBlock.Coinbase { - log.Error("Block coinbases don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockCoinbase", localBlock.Coinbase, "remoteBlockCoinbase", remoteBlock.Coinbase) - blockMismatch = true - } - - if localBlock.ForkId != remoteBlock.ForkId { - log.Error("Block forkids don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockForkId", localBlock.ForkId, "remoteBlockForkId", remoteBlock.ForkId) - blockMismatch = true - } - - if localBlock.ChainId != remoteBlock.ChainId { - log.Error("Block chainids don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockChainId", localBlock.ChainId, "remoteBlockChainId", remoteBlock.ChainId) - blockMismatch = true - } - - if localBlock.L1BlockHash != remoteBlock.L1BlockHash { - log.Error("Block L1BlockHash don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockL1BlockHash", localBlock.L1BlockHash, "remoteBlockL1BlockHash", remoteBlock.L1BlockHash) - blockMismatch = true - } - - //don't check blockhash, because of pre forkid8 bugs it will mismatch for sure - // if localBlock.L2Blockhash != remoteBlock.L2Blockhash { - // log.Error("Block hashes don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockHash", localBlock.L2Blockhash, "remoteBlockHash", remoteBlock.L2Blockhash) - // } - - if localBlock.StateRoot != remoteBlock.StateRoot { - log.Error("Block state roots don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockStateRoot", localBlock.StateRoot, "remoteBlockStateRoot", remoteBlock.StateRoot) - blockMismatch = true - } - - if len(localBlock.L2Txs) != len(remoteBlock.L2Txs) { - log.Error("Block transactions don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockTxs", localBlock.L2Txs, "remoteBlock") - blockMismatch = true - } - - for i, localTx := range localBlock.L2Txs { - remoteTx := remoteBlock.L2Txs[i] - - if localTx.EffectiveGasPricePercentage != remoteTx.EffectiveGasPricePercentage { - log.Error("Block txs EffectiveGasPricePercentage don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockTx", localTx.EffectiveGasPricePercentage, "remoteBlockTx", remoteTx.EffectiveGasPricePercentage) - blockMismatch = true - } - - if localTx.IsValid != remoteTx.IsValid { - log.Error("Block txs IsValid don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockTx", localTx.IsValid, "remoteBlockTx", remoteTx.IsValid) - blockMismatch = true - } - - if localTx.IntermediateStateRoot != remoteTx.IntermediateStateRoot { - log.Error("Block txs StateRoot don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockTx", localTx.IntermediateStateRoot, "remoteBlockTx", remoteTx.IntermediateStateRoot) - blockMismatch = true - } - - for i, b := range localTx.Encoded { - if b != remoteTx.Encoded[i] { - log.Error("Block txs Encoded don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockTx", localTx.Encoded, "remoteBlockTx", remoteTx.Encoded) - blockMismatch = true - } - - if blockMismatch { - break - } - } - - if blockMismatch { - break - } - - } - - if blockMismatch { - break - } - } - } - - gerCountMatches := len(*gerUpdatesLocal) == len(*gerUpdatesRemote) - if !gerCountMatches { - log.Error("GerUpdate amounts don't match", "localGerUpdates", len(*gerUpdatesLocal), "remoteGerUpdates", len(*gerUpdatesRemote)) - } else { - gerMismatch := false - for i, localGerUpdate := range *gerUpdatesLocal { - remoteGerUpate := (*gerUpdatesRemote)[i] - - if localGerUpdate.BatchNumber != remoteGerUpate.BatchNumber { - log.Error("GerUpdate batch numbers don't match", "localGerUpdate", localGerUpdate, "remoteGerUpdate", remoteGerUpate) - gerMismatch = true - } - - // their gerupdate chainId is wrong for some reason - // if localGerUpdate.ChainId != remoteGerUpate.ChainId { - // log.Error("GerUpdate ChainId don't match", "localGerUpdate", localGerUpdate.ChainId, "remoteGerUpdate", remoteGerUpate.ChainId) - // gerMismatch = true - // } - - if localGerUpdate.Coinbase != remoteGerUpate.Coinbase { - log.Error("GerUpdate.Coinbase don't match", "localGerUpdate", localGerUpdate.Coinbase, "remoteGerUpdate", remoteGerUpate.Coinbase) - gerMismatch = true - } - - if localGerUpdate.ForkId != remoteGerUpate.ForkId { - log.Error("GerUpdate.ForkId don't match", "localGerUpdate", localGerUpdate.ForkId, "remoteGerUpdate", remoteGerUpate.ForkId) - gerMismatch = true - } - - if localGerUpdate.GlobalExitRoot != remoteGerUpate.GlobalExitRoot { - log.Error("GerUpdate.GlobalExitRoot don't match", "localGerUpdate", localGerUpdate.GlobalExitRoot, "remoteGerUpdate", remoteGerUpate.GlobalExitRoot) - gerMismatch = true - } - - if localGerUpdate.StateRoot != remoteGerUpate.StateRoot { - log.Error("GerUpdate.StateRoot don't match", "localGerUpdate", localGerUpdate.StateRoot, "remoteGerUpdate", remoteGerUpate.StateRoot) - gerMismatch = true - } - - if localGerUpdate.Timestamp != remoteGerUpate.Timestamp { - log.Error("GerUpdate.Timestamp don't match", "localGerUpdate", localGerUpdate.Timestamp, "remoteGerUpdate", remoteGerUpate.Timestamp) - gerMismatch = true - } - - if gerMismatch { - break - } - } - } - - fmt.Println("Check finished") + // ctx := context.Background() + // cfg, err := debug_tools.GetConf() + // if err != nil { + // panic(fmt.Sprintf("RPGCOnfig: %s", err)) + // } + + // // Create client + // localClient := client.NewClient(ctx, localDatastream, 3, 500, 0) + // remoteClient := client.NewClient(ctx, cfg.Datastream, 3, 500, 0) + + // // Start client (connect to the server) + // defer localClient.Stop() + // if err := localClient.Start(); err != nil { + // panic(err) + // } + + // defer remoteClient.Stop() + // if err := remoteClient.Start(); err != nil { + // panic(err) + // } + + // // create bookmark + // bookmark := types.NewBookmarkProto(fromBlock, datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK) + + // // Read all entries from server + // blocksReadLocal, gerUpdatesLocal, _, _, _, err := localClient.ReadEntries(bookmark, amountToRead) + // if err != nil { + // panic(err) + // } + // // Read all entries from server + // blocksReadRemote, gerUpdatesRemote, _, _, _, err := remoteClient.ReadEntries(bookmark, amountToRead) + // if err != nil { + // panic(err) + // } + + // for i, block := range *blocksReadLocal { + // fmt.Println(i) + // fmt.Println(block.L2BlockNumber) + // } + + // blockCountMatches := len(*blocksReadLocal) == len(*blocksReadRemote) + + // if !blockCountMatches { + // log.Error("Block amounts don't match", "localBlocks", len(*blocksReadLocal), "remoteBlocks", len(*blocksReadRemote)) + // } else { + // blockMismatch := false + // for i, localBlock := range *blocksReadLocal { + // remoteBlock := (*blocksReadRemote)[i] + + // if localBlock.BatchNumber != remoteBlock.BatchNumber { + // log.Error("Block batch numbers don't match", "blockNum", localBlock.L2BlockNumber, "localBatchNumber", localBlock.BatchNumber, "remoteBatchNumber", remoteBlock.BatchNumber) + // blockMismatch = true + // } + + // if localBlock.L2BlockNumber != remoteBlock.L2BlockNumber { + // log.Error("Block numbers don't match", "localBlockNumber", localBlock.L2BlockNumber, "remoteBlockNumber", remoteBlock.L2BlockNumber) + // blockMismatch = true + // } + + // if localBlock.Timestamp != remoteBlock.Timestamp { + // log.Error("Block timestamps don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockTimestamp", localBlock.Timestamp, "remoteBlockTimestamp", remoteBlock.Timestamp) + // blockMismatch = true + // } + + // if localBlock.DeltaTimestamp != remoteBlock.DeltaTimestamp && localBlock.L2BlockNumber != 1 { + // log.Error("Block delta timestamps don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockDeltaTimestamp", localBlock.DeltaTimestamp, "remoteBlockDeltaTimestamp", remoteBlock.DeltaTimestamp) + // blockMismatch = true + // } + + // if localBlock.L1InfoTreeIndex != remoteBlock.L1InfoTreeIndex { + // log.Error("Block L1InfoTreeIndex don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockL1InfoTreeIndex", localBlock.L1InfoTreeIndex, "remoteBlockL1InfoTreeIndex", remoteBlock.L1InfoTreeIndex) + // blockMismatch = true + // } + + // if localBlock.GlobalExitRoot != remoteBlock.GlobalExitRoot && localBlock.GlobalExitRoot != *new(common.Hash) { + // log.Error("Block global exit roots don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockGlobalExitRoot", localBlock.GlobalExitRoot, "remoteBlockGlobalExitRoot", remoteBlock.GlobalExitRoot) + // blockMismatch = true + // } + + // if localBlock.Coinbase != remoteBlock.Coinbase { + // log.Error("Block coinbases don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockCoinbase", localBlock.Coinbase, "remoteBlockCoinbase", remoteBlock.Coinbase) + // blockMismatch = true + // } + + // if localBlock.ForkId != remoteBlock.ForkId { + // log.Error("Block forkids don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockForkId", localBlock.ForkId, "remoteBlockForkId", remoteBlock.ForkId) + // blockMismatch = true + // } + + // // if localBlock.ChainId != remoteBlock.ChainId { + // // log.Error("Block chainids don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockChainId", localBlock.ChainId, "remoteBlockChainId", remoteBlock.ChainId) + // // blockMismatch = true + // // } + + // if localBlock.L1BlockHash != remoteBlock.L1BlockHash { + // log.Error("Block L1BlockHash don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockL1BlockHash", localBlock.L1BlockHash, "remoteBlockL1BlockHash", remoteBlock.L1BlockHash) + // blockMismatch = true + // } + + // //don't check blockhash, because of pre forkid8 bugs it will mismatch for sure + // // if localBlock.L2Blockhash != remoteBlock.L2Blockhash { + // // log.Error("Block hashes don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockHash", localBlock.L2Blockhash, "remoteBlockHash", remoteBlock.L2Blockhash) + // // } + + // if localBlock.StateRoot != remoteBlock.StateRoot { + // log.Error("Block state roots don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockStateRoot", localBlock.StateRoot, "remoteBlockStateRoot", remoteBlock.StateRoot) + // blockMismatch = true + // } + + // if len(localBlock.L2Txs) != len(remoteBlock.L2Txs) { + // log.Error("Block transactions don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockTxs", localBlock.L2Txs, "remoteBlock") + // blockMismatch = true + // } + + // for i, localTx := range localBlock.L2Txs { + // remoteTx := remoteBlock.L2Txs[i] + + // if localTx.EffectiveGasPricePercentage != remoteTx.EffectiveGasPricePercentage { + // log.Error("Block txs EffectiveGasPricePercentage don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockTx", localTx.EffectiveGasPricePercentage, "remoteBlockTx", remoteTx.EffectiveGasPricePercentage) + // blockMismatch = true + // } + + // if localTx.IsValid != remoteTx.IsValid { + // log.Error("Block txs IsValid don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockTx", localTx.IsValid, "remoteBlockTx", remoteTx.IsValid) + // blockMismatch = true + // } + + // if localTx.IntermediateStateRoot != remoteTx.IntermediateStateRoot { + // log.Error("Block txs StateRoot don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockTx", localTx.IntermediateStateRoot, "remoteBlockTx", remoteTx.IntermediateStateRoot) + // blockMismatch = true + // } + + // for i, b := range localTx.Encoded { + // if b != remoteTx.Encoded[i] { + // log.Error("Block txs Encoded don't match", "localBlockNumber", localBlock.L2BlockNumber, "localBlockTx", localTx.Encoded, "remoteBlockTx", remoteTx.Encoded) + // blockMismatch = true + // } + + // if blockMismatch { + // break + // } + // } + + // if blockMismatch { + // break + // } + + // } + + // if blockMismatch { + // break + // } + // } + // } + + // gerCountMatches := len(*gerUpdatesLocal) == len(*gerUpdatesRemote) + // if !gerCountMatches { + // log.Error("GerUpdate amounts don't match", "localGerUpdates", len(*gerUpdatesLocal), "remoteGerUpdates", len(*gerUpdatesRemote)) + // } else { + // gerMismatch := false + // for i, localGerUpdate := range *gerUpdatesLocal { + // remoteGerUpate := (*gerUpdatesRemote)[i] + + // if localGerUpdate.BatchNumber != remoteGerUpate.BatchNumber { + // log.Error("GerUpdate batch numbers don't match", "localGerUpdate", localGerUpdate, "remoteGerUpdate", remoteGerUpate) + // gerMismatch = true + // } + + // // their gerupdate chainId is wrong for some reason + // // if localGerUpdate.ChainId != remoteGerUpate.ChainId { + // // log.Error("GerUpdate ChainId don't match", "localGerUpdate", localGerUpdate.ChainId, "remoteGerUpdate", remoteGerUpate.ChainId) + // // gerMismatch = true + // // } + + // if localGerUpdate.Coinbase != remoteGerUpate.Coinbase { + // log.Error("GerUpdate.Coinbase don't match", "localGerUpdate", localGerUpdate.Coinbase, "remoteGerUpdate", remoteGerUpate.Coinbase) + // gerMismatch = true + // } + + // if localGerUpdate.ForkId != remoteGerUpate.ForkId { + // log.Error("GerUpdate.ForkId don't match", "localGerUpdate", localGerUpdate.ForkId, "remoteGerUpdate", remoteGerUpate.ForkId) + // gerMismatch = true + // } + + // if localGerUpdate.GlobalExitRoot != remoteGerUpate.GlobalExitRoot { + // log.Error("GerUpdate.GlobalExitRoot don't match", "localGerUpdate", localGerUpdate.GlobalExitRoot, "remoteGerUpdate", remoteGerUpate.GlobalExitRoot) + // gerMismatch = true + // } + + // if localGerUpdate.StateRoot != remoteGerUpate.StateRoot { + // log.Error("GerUpdate.StateRoot don't match", "localGerUpdate", localGerUpdate.StateRoot, "remoteGerUpdate", remoteGerUpate.StateRoot) + // gerMismatch = true + // } + + // if localGerUpdate.Timestamp != remoteGerUpate.Timestamp { + // log.Error("GerUpdate.Timestamp don't match", "localGerUpdate", localGerUpdate.Timestamp, "remoteGerUpdate", remoteGerUpate.Timestamp) + // gerMismatch = true + // } + + // if gerMismatch { + // break + // } + // } + // } + + // fmt.Println("Check finished") } diff --git a/zk/debug_tools/datastream-correctness-check/main.go b/zk/debug_tools/datastream-correctness-check/main.go new file mode 100644 index 00000000000..5d637753700 --- /dev/null +++ b/zk/debug_tools/datastream-correctness-check/main.go @@ -0,0 +1,149 @@ +package main + +import ( + "context" + "fmt" + + "github.com/ledgerwatch/erigon/zk/datastream/client" + "github.com/ledgerwatch/erigon/zk/datastream/proto/github.com/0xPolygonHermez/zkevm-node/state/datastream" + "github.com/ledgerwatch/erigon/zk/datastream/types" + "github.com/ledgerwatch/erigon/zk/debug_tools" +) + +func main() { + ctx := context.Background() + cfg, err := debug_tools.GetConf() + if err != nil { + panic(fmt.Sprintf("RPGCOnfig: %s", err)) + } + + // Create client + client := client.NewClient(ctx, cfg.Datastream, 3, 500, 0) + + // Start client (connect to the server) + defer client.Stop() + if err := client.Start(); err != nil { + panic(err) + } + + // create bookmark + bookmark := types.NewBookmarkProto(5191325, datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK) + + // var previousFile *types.FileEntry + progressBatch := uint64(0) + progressBlock := uint64(0) + + printFunction := func(file *types.FileEntry) error { + switch file.EntryType { + case types.EntryTypeL2Block: + l2Block, err := types.UnmarshalL2Block(file.Data) + if err != nil { + return err + } + fmt.Println("L2Block: ", l2Block.L2BlockNumber, "batch", l2Block.BatchNumber, "stateRoot", l2Block.StateRoot.Hex()) + if l2Block.L2BlockNumber > 5191335 { + return fmt.Errorf("stop") + } + case types.EntryTypeBatchEnd: + batchEnd, err := types.UnmarshalBatchEnd(file.Data) + if err != nil { + return err + } + fmt.Println("BatchEnd: ", batchEnd.Number, "stateRoot", batchEnd.StateRoot.Hex()) + + } + + return nil + } + + // function := func(file *types.FileEntry) error { + // switch file.EntryType { + // case types.EntryTypeL2BlockEnd: + // if previousFile != nil && previousFile.EntryType != types.EntryTypeL2Block && previousFile.EntryType != types.EntryTypeL2Tx { + // return fmt.Errorf("unexpected entry type before l2 block end: %v", previousFile.EntryType) + // } + // case types.BookmarkEntryType: + // bookmark, err := types.UnmarshalBookmark(file.Data) + // if err != nil { + // return err + // } + // if bookmark.BookmarkType() == datastream.BookmarkType_BOOKMARK_TYPE_BATCH { + // progressBatch = bookmark.Value + // if previousFile != nil && previousFile.EntryType != types.EntryTypeBatchEnd { + // return fmt.Errorf("unexpected entry type before batch bookmark type: %v, bookmark batch number: %d", previousFile.EntryType, bookmark.Value) + // } + // } + // if bookmark.BookmarkType() == datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK { + // progressBlock = bookmark.Value + // if previousFile != nil && + // previousFile.EntryType != types.EntryTypeBatchStart && + // previousFile.EntryType != types.EntryTypeL2BlockEnd { + // return fmt.Errorf("unexpected entry type before block bookmark type: %v, bookmark block number: %d", previousFile.EntryType, bookmark.Value) + // } + // } + // case types.EntryTypeBatchStart: + // batchStart, err := types.UnmarshalBatchStart(file.Data) + // if err != nil { + // return err + // } + // progressBatch = batchStart.Number + // if previousFile != nil { + // if previousFile.EntryType != types.BookmarkEntryType { + // return fmt.Errorf("unexpected entry type before batch start: %v, batchStart Batch number: %d", previousFile.EntryType, batchStart.Number) + // } else { + // bookmark, err := types.UnmarshalBookmark(previousFile.Data) + // if err != nil { + // return err + // } + // if bookmark.BookmarkType() != datastream.BookmarkType_BOOKMARK_TYPE_BATCH { + // return fmt.Errorf("unexpected bookmark type before batch start: %v, batchStart Batch number: %d", bookmark.BookmarkType(), batchStart.Number) + // } + // } + // } + // case types.EntryTypeBatchEnd: + // if previousFile != nil && + // previousFile.EntryType != types.EntryTypeL2BlockEnd && + // previousFile.EntryType != types.EntryTypeBatchStart { + // return fmt.Errorf("unexpected entry type before batch end: %v", previousFile.EntryType) + // } + // case types.EntryTypeL2Tx: + // if previousFile != nil && previousFile.EntryType != types.EntryTypeL2Tx && previousFile.EntryType != types.EntryTypeL2Block { + // return fmt.Errorf("unexpected entry type before l2 tx: %v", previousFile.EntryType) + // } + // case types.EntryTypeL2Block: + // l2Block, err := types.UnmarshalL2Block(file.Data) + // if err != nil { + // return err + // } + // progressBlock = l2Block.L2BlockNumber + // if previousFile != nil { + // if previousFile.EntryType != types.BookmarkEntryType && !previousFile.IsL2BlockEnd() { + // return fmt.Errorf("unexpected entry type before l2 block: %v, block number: %d", previousFile.EntryType, l2Block.L2BlockNumber) + // } else { + // bookmark, err := types.UnmarshalBookmark(previousFile.Data) + // if err != nil { + // return err + // } + // if bookmark.BookmarkType() != datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK { + // return fmt.Errorf("unexpected bookmark type before l2 block: %v, block number: %d", bookmark.BookmarkType(), l2Block.L2BlockNumber) + // } + + // } + // } + // case types.EntryTypeGerUpdate: + // return nil + // default: + // return fmt.Errorf("unexpected entry type: %v", file.EntryType) + // } + + // previousFile = file + // return nil + // } + // send start command + err = client.ExecutePerFile(bookmark, printFunction) + fmt.Println("progress block: ", progressBlock) + fmt.Println("progress batch: ", progressBatch) + if err != nil { + panic(fmt.Sprintf("found an error: %s", err)) + } +} diff --git a/zk/debug_tools/env-checker/envs.json b/zk/debug_tools/env-checker/envs.json index 31ffb4d4182..2b112065678 100644 --- a/zk/debug_tools/env-checker/envs.json +++ b/zk/debug_tools/env-checker/envs.json @@ -1,19 +1,19 @@ { "groups": [ { - "groupName": "Integration 5", + "groupName": "Integration 8", "nodes": [ { - "nodeName": "Integration 5 - Legacy", - "rpcURL": "http://34.175.214.161:8505" + "nodeName": "Integration 8 - Legacy", + "rpcURL": "http://34.175.214.161:18505" }, { - "nodeName": "Integration 5 - Erigon RPC", - "rpcURL": "http://34.175.214.161:8500" + "nodeName": "Integration 8 - Erigon RPC", + "rpcURL": "http://34.175.214.161:18124" }, { - "nodeName": "Integration 5 - Erigon Sequencer", - "rpcURL": "http://34.175.214.161:8005" + "nodeName": "Integration 8 - Erigon Sequencer", + "rpcURL": "http://34.175.214.161:18123" } ] } diff --git a/zk/debug_tools/rpc-batch-compare/main.go b/zk/debug_tools/rpc-batch-compare/main.go index 1db9c1b6bb5..e0d5ffa66e8 100644 --- a/zk/debug_tools/rpc-batch-compare/main.go +++ b/zk/debug_tools/rpc-batch-compare/main.go @@ -59,7 +59,7 @@ func getBatchByNumber(url string, number *big.Int) (map[string]interface{}, erro requestBody, _ := json.Marshal(map[string]interface{}{ "jsonrpc": "2.0", "method": "zkevm_getBatchByNumber", - "params": []interface{}{number.String(), true}, + "params": []interface{}{number.String(), false}, "id": 1, }) @@ -105,12 +105,10 @@ func compareBatches(erigonURL, legacyURL string, batchNumber *big.Int) (string, // ignore list il := []string{ "timestamp", - "verifyBatchTxHash", - "sendSequencesTxHash", "accInputHash", - "globalExitRoot", - "mainnetExitRoot", + "transactions", "rollupExitRoot", + "mainnetExitRoot", } for _, i := range il { delete(batch1, i) diff --git a/zk/debug_tools/test-contracts/contracts/DelegateCalled.sol b/zk/debug_tools/test-contracts/contracts/DelegateCalled.sol new file mode 100644 index 00000000000..fb96f88369a --- /dev/null +++ b/zk/debug_tools/test-contracts/contracts/DelegateCalled.sol @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity >=0.7.0 <0.9.0; + +contract DelegateCalled { + uint256 num; + address sender; + uint256 value; + + function setVars(uint256 _num) public payable { + num = _num; + sender = msg.sender; + value = msg.value; + } + + function setVarsViaCall(uint256 _num) public payable { + bool ok; + (ok, ) = address(this).call( + abi.encodeWithSignature("setVars(uint256)", _num) + ); + require(ok, "failed to perform call"); + } + + function getVars() public view returns (uint256, address, uint256) { + return (num, sender, value); + } + + function getVarsAndVariable(uint256 _num) public view returns (uint256, address, uint256, uint256) { + return (num, sender, value, _num); + } +} \ No newline at end of file diff --git a/zk/debug_tools/test-contracts/contracts/DelegateCaller.sol b/zk/debug_tools/test-contracts/contracts/DelegateCaller.sol new file mode 100644 index 00000000000..cebda7bd72d --- /dev/null +++ b/zk/debug_tools/test-contracts/contracts/DelegateCaller.sol @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity >=0.7.0 <0.9.0; + +contract DelegateCaller { + function call(address _contract, uint _num) public payable { + bool ok; + (ok, ) = _contract.call( + abi.encodeWithSignature("setVars(uint256)", _num) + ); + require(ok, "failed to perform call"); + } + + function delegateCall(address _contract, uint _num) public payable { + bool ok; + (ok, ) = _contract.delegatecall( + abi.encodeWithSignature("setVars(uint256)", _num) + ); + require(ok, "failed to perform delegate call"); + } + + function staticCall(address _contract) public payable { + bool ok; + bytes memory result; + (ok, result) = _contract.staticcall( + abi.encodeWithSignature("getVars()") + ); + require(ok, "failed to perform static call"); + + uint256 num; + address sender; + uint256 value; + + (num, sender, value) = abi.decode(result, (uint256, address, uint256)); + } + + function invalidStaticCallMoreParameters(address _contract) view public { + bool ok; + (ok,) = _contract.staticcall( + abi.encodeWithSignature("getVarsAndVariable(uint256)", 1, 2) + ); + require(!ok, "static call was supposed to fail with more parameters"); + } + + function invalidStaticCallLessParameters(address _contract) view public { + bool ok; + (ok,) = _contract.staticcall( + abi.encodeWithSignature("getVarsAndVariable(uint256)") + ); + require(!ok, "static call was supposed to fail with less parameters"); + } + + function invalidStaticCallWithInnerCall(address _contract) view public { + bool ok; + (ok,) = _contract.staticcall( + abi.encodeWithSignature("getVarsAndVariable(uint256)") + ); + require(!ok, "static call was supposed to fail with less parameters"); + } + + function multiCall(address _contract, uint _num) public payable { + call(_contract, _num); + delegateCall(_contract, _num); + staticCall(_contract); + } + + function preEcrecover_0() pure public { + bytes32 messHash = 0x456e9aea5e197a1f1af7a3e85a3212fa4049a3ba34c2289b4c860fc0b0c64ef3; + uint8 v = 28; + bytes32 r = 0x9242685bf161793cc25603c231bc2f568eb630ea16aa137d2664ac8038825608; + bytes32 s = 0x4f8ae3bd7535248d0bd448298cc2e2071e56992d0774dc340c368ae950852ada; + + ecrecover(messHash, v, r, s); + } +} \ No newline at end of file diff --git a/zk/debug_tools/test-contracts/package.json b/zk/debug_tools/test-contracts/package.json index 7d223095400..7dc842b2cf6 100644 --- a/zk/debug_tools/test-contracts/package.json +++ b/zk/debug_tools/test-contracts/package.json @@ -12,7 +12,9 @@ "counter:mainnet": "npx hardhat compile && npx hardhat run scripts/counter.js --network mainnet", "emitlog:bali": "npx hardhat compile && npx hardhat run scripts/emitlog.js --network bali", "emitlog:cardona": "npx hardhat compile && npx hardhat run scripts/emitlog.js --network cardona", - "emitlog:mainnet": "npx hardhat compile && npx hardhat run scripts/emitlog.js --network mainnet" + "emitlog:mainnet": "npx hardhat compile && npx hardhat run scripts/emitlog.js --network mainnet", + "spam:local": "npx hardhat compile && npx hardhat run scripts/spam-transactions.js --network local", + "delegateCall:local": "npx hardhat compile && npx hardhat run scripts/delegate-call.js --network local" }, "keywords": [], "author": "", diff --git a/zk/debug_tools/test-contracts/scripts/delegate-call.js b/zk/debug_tools/test-contracts/scripts/delegate-call.js new file mode 100644 index 00000000000..a359f9890ab --- /dev/null +++ b/zk/debug_tools/test-contracts/scripts/delegate-call.js @@ -0,0 +1,31 @@ +// deploys contracts and calls a method to produce delegate call +async function main() { +try { + const DelegateCalled = await hre.ethers.getContractFactory("DelegateCalled"); + const DelegateCaller = await hre.ethers.getContractFactory("DelegateCaller"); + + // Deploy the contracts + const calledContract = await DelegateCalled.deploy(); + const callerContract = await DelegateCaller.deploy(); + + // Wait for the deployment transactions to be mined + await calledContract.waitForDeployment(); + await callerContract.waitForDeployment(); + + console.log(`DelegateCalled deployed to: ${await calledContract.getAddress()}`); + console.log(`DelegateCaller deployed to: ${await callerContract.getAddress()}`); + + const delegateCallResult = await callerContract.delegateCall(calledContract.getAddress(), 1); + console.log('delegateCallResult method call transaction: ', delegateCallResult.hash); + } catch (error) { + console.error(error.toString()); + process.exit(1); + } +} + +main() + .then(() => process.exit(0)) + .catch(error => { + console.error(error); + process.exit(1); + }); \ No newline at end of file diff --git a/zk/debug_tools/test-contracts/scripts/spam-transactions.js b/zk/debug_tools/test-contracts/scripts/spam-transactions.js new file mode 100644 index 00000000000..08a7ebcd035 --- /dev/null +++ b/zk/debug_tools/test-contracts/scripts/spam-transactions.js @@ -0,0 +1,34 @@ +async function main() { +try { + + const provider = hre.ethers.provider; + const signer = await provider.getSigner(); + const nonce = await signer.getNonce(); + const balance = await provider.getBalance(signer.address); + console.log("Balance before: " + balance); + + for (let i = nonce+10000; i >= nonce; i--) { + try { + await signer.sendTransaction({ + to: "0xB6f9665E564c0ADdA517c698Ebe32BA6Feb5Da35", + value: hre.ethers.parseEther("0.000000000000000001"), + gasPrice: 10, //if allowGreeTransactions flag is not set, the minimum gasPrice is 1gWei + gasLimit: 40000, + nonce: i + }); + } catch(e) { + console.log(e.toString()); + } + } +} catch (error) { + console.error(error); + process.exit(1); + } +} + +main() + .then(() => process.exit(0)) + .catch(error => { + console.error(error); + process.exit(1); + }); \ No newline at end of file diff --git a/zk/hermez_db/db.go b/zk/hermez_db/db.go index c74b04224e2..131221a977f 100644 --- a/zk/hermez_db/db.go +++ b/zk/hermez_db/db.go @@ -264,6 +264,54 @@ func (db *HermezDbReader) GetSequenceByBatchNo(batchNo uint64) (*types.L1BatchIn return db.getByBatchNo(L1SEQUENCES, batchNo) } +func (db *HermezDbReader) GetSequenceByBatchNoOrHighest(batchNo uint64) (*types.L1BatchInfo, error) { + seq, err := db.GetSequenceByBatchNo(batchNo) + if err != nil { + return nil, err + } + + if seq != nil { + return seq, nil + } + + // start a cursor at the current batch no and then call .next to find the next highest sequence + c, err := db.tx.Cursor(L1SEQUENCES) + if err != nil { + return nil, err + } + defer c.Close() + + var k, v []byte + for k, v, err = c.Seek(Uint64ToBytes(batchNo)); k != nil; k, v, err = c.Next() { + if err != nil { + return nil, err + } + + l1Block, batch, err := SplitKey(k) + if err != nil { + return nil, err + } + + if batch > batchNo { + if len(v) != 64 { + return nil, fmt.Errorf("invalid hash length") + } + + l1TxHash := common.BytesToHash(v[:32]) + stateRoot := common.BytesToHash(v[32:64]) + + return &types.L1BatchInfo{ + BatchNo: batch, + L1BlockNo: l1Block, + StateRoot: stateRoot, + L1TxHash: l1TxHash, + }, nil + } + } + + return nil, nil +} + func (db *HermezDbReader) GetVerificationByL1Block(l1BlockNo uint64) (*types.L1BatchInfo, error) { return db.getByL1Block(L1VERIFICATIONS, l1BlockNo) } @@ -272,6 +320,59 @@ func (db *HermezDbReader) GetVerificationByBatchNo(batchNo uint64) (*types.L1Bat return db.getByBatchNo(L1VERIFICATIONS, batchNo) } +func (db *HermezDbReader) GetVerificationByBatchNoOrHighest(batchNo uint64) (*types.L1BatchInfo, error) { + batchInfo, err := db.GetVerificationByBatchNo(batchNo) + if err != nil { + return nil, err + } + + if batchInfo != nil { + return batchInfo, nil + } + + // start a cursor at the current batch no and then call .next to find the next highest verification + c, err := db.tx.Cursor(L1VERIFICATIONS) + if err != nil { + return nil, err + } + defer c.Close() + + var k, v []byte + for k, v, err = c.Seek(Uint64ToBytes(batchNo)); k != nil; k, v, err = c.Next() { + if err != nil { + return nil, err + } + + l1Block, batch, err := SplitKey(k) + if err != nil { + return nil, err + } + + if batch > batchNo { + if len(v) != 96 && len(v) != 64 { + return nil, fmt.Errorf("invalid hash length") + } + + l1TxHash := common.BytesToHash(v[:32]) + stateRoot := common.BytesToHash(v[32:64]) + var l1InfoRoot common.Hash + if len(v) > 64 { + l1InfoRoot = common.BytesToHash(v[64:]) + } + + return &types.L1BatchInfo{ + BatchNo: batch, + L1BlockNo: l1Block, + StateRoot: stateRoot, + L1TxHash: l1TxHash, + L1InfoRoot: l1InfoRoot, + }, nil + } + } + + return nil, nil +} + func (db *HermezDbReader) getByL1Block(table string, l1BlockNo uint64) (*types.L1BatchInfo, error) { c, err := db.tx.Cursor(table) if err != nil { @@ -1463,6 +1564,26 @@ func (db *HermezDb) GetL1InfoTreeIndexByRoot(hash common.Hash) (uint64, bool, er return BytesToUint64(data), data != nil, nil } +func (db *HermezDbReader) GetL1InfoTreeIndexToRoots() (map[uint64]common.Hash, error) { + c, err := db.tx.Cursor(L1_INFO_ROOTS) + if err != nil { + return nil, err + } + defer c.Close() + + indexToRoot := make(map[uint64]common.Hash) + for k, v, err := c.First(); k != nil; k, v, err = c.Next() { + if err != nil { + return nil, err + } + index := BytesToUint64(v) + root := common.BytesToHash(k) + indexToRoot[index] = root + } + + return indexToRoot, nil +} + func (db *HermezDbReader) GetForkIdByBlockNum(blockNum uint64) (uint64, error) { blockbatch, err := db.GetBatchNoByL2Block(blockNum) if err != nil { diff --git a/zk/l1_cache/l1_cache.go b/zk/l1_cache/l1_cache.go new file mode 100644 index 00000000000..7d0bf44d379 --- /dev/null +++ b/zk/l1_cache/l1_cache.go @@ -0,0 +1,269 @@ +package l1_cache + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "errors" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/mdbx" + "github.com/ledgerwatch/log/v3" +) + +const ( + bucketName = "Cache" + expiryBucket = "Expiry" +) + +// methods we don't cache +var methodsToIgnore = map[string]struct{}{} + +// methods we configure expiry for +var methodsToExpire = map[string]time.Duration{ + "eth_getBlockByNumber": 1 * time.Minute, +} + +// params that trigger expiration +var paramsToExpire = map[string]struct{}{ + "latest": {}, + "finalized": {}, +} + +type L1Cache struct { + server *http.Server + db kv.RwDB +} + +func NewL1Cache(ctx context.Context, dbPath string, port uint) (*L1Cache, error) { + db := mdbx.NewMDBX(log.New()).Path(dbPath).MustOpen() + + tx, err := db.BeginRw(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + if err := tx.CreateBucket(bucketName); err != nil { + return nil, err + } + if err := tx.CreateBucket(expiryBucket); err != nil { + return nil, err + } + if err := tx.Commit(); err != nil { + return nil, err + } + + http.HandleFunc("/", handleRequest(db)) + addr := fmt.Sprintf(":%d", port) + server := &http.Server{ + Addr: addr, + Handler: nil, + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + MaxHeaderBytes: 1 << 20, + } + + go func() { + log.Info("Starting L1 Cache Server on port:", "port", port) + if err := server.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { + log.Error("L1 Cache Server stopped", "error", err) + } + }() + + go func() { + <-ctx.Done() + log.Info("Shutting down L1 Cache Server...") + if err := server.Shutdown(context.Background()); err != nil { + log.Error("Failed to shutdown L1 Cache Server", "error", err) + } + db.Close() + }() + + return &L1Cache{ + server: server, + db: db, + }, nil +} + +func fetchFromCache(tx kv.RwTx, key string) ([]byte, bool) { + data, err := tx.GetOne(bucketName, []byte(key)) + if err != nil || data == nil { + return nil, false + } + + expiry, err := tx.GetOne(expiryBucket, []byte(key)) + if err == nil && expiry != nil { + expiryTime, err := time.Parse(time.RFC3339, string(expiry)) + if err == nil && time.Now().After(expiryTime) { + // Cache entry has expired + evictFromCache(tx, key) + return nil, false + } + } + + // Check if the cached response contains an error + var jsonResponse map[string]interface{} + if err := json.Unmarshal(data, &jsonResponse); err == nil { + if _, hasError := jsonResponse["error"]; hasError { + // Cache entry is an error, evict it + evictFromCache(tx, key) + return nil, false + } + } + + return data, true +} + +func evictFromCache(tx kv.RwTx, key string) { + if err := tx.Delete(bucketName, []byte(key)); err != nil { + log.Warn("Failed to evict from cache", "error", err) + } + if err := tx.Delete(expiryBucket, []byte(key)); err != nil { + log.Warn("Failed to evict from cache", "error", err) + } +} + +func saveToCache(tx kv.RwTx, key string, response []byte, duration time.Duration) error { + if err := tx.Put(bucketName, []byte(key), response); err != nil { + return err + } + // Only set expiry if duration is not zero (indicating that it should expire) + if duration > 0 { + expiryTime := time.Now().Add(duration).Format(time.RFC3339) + if err := tx.Put(expiryBucket, []byte(key), []byte(expiryTime)); err != nil { + return err + } + } + return nil +} + +func generateCacheKey(chainID string, body []byte) (string, error) { + var request map[string]interface{} + err := json.Unmarshal(body, &request) + if err != nil { + return "", err + } + delete(request, "id") + modifiedBody, err := json.Marshal(request) + if err != nil { + return "", err + } + return fmt.Sprintf("%s_%s", chainID, modifiedBody), nil +} + +func handleRequest(db kv.RwDB) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + endpoint := r.URL.Query().Get("endpoint") + chainID := r.URL.Query().Get("chainid") + if endpoint == "" || chainID == "" { + http.Error(w, "Missing endpoint or chainid parameter", http.StatusBadRequest) + return + } + + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "Failed to read request body", http.StatusInternalServerError) + return + } + defer r.Body.Close() + + var request map[string]interface{} + if err := json.Unmarshal(body, &request); err != nil { + http.Error(w, "Invalid JSON-RPC request", http.StatusBadRequest) + return + } + + method, ok := request["method"].(string) + if !ok { + http.Error(w, "Invalid JSON-RPC method", http.StatusBadRequest) + return + } + + cacheKey, err := generateCacheKey(chainID, body) + if err != nil { + http.Error(w, "Failed to generate cache key", http.StatusInternalServerError) + return + } + + if _, ignore := methodsToIgnore[method]; !ignore { + tx, err := db.BeginRw(r.Context()) + if err != nil { + http.Error(w, "Failed to begin transaction", http.StatusInternalServerError) + return + } + if cachedResponse, found := fetchFromCache(tx, cacheKey); found { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("X-Cache-Status", "HIT") + w.Write(cachedResponse) + tx.Commit() + return + } + tx.Rollback() + } + + resp, err := http.Post(endpoint, "application/json", bytes.NewBuffer(body)) + if err != nil { + http.Error(w, "Failed to fetch from upstream", http.StatusInternalServerError) + return + } + defer resp.Body.Close() + + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + http.Error(w, "Failed to read upstream response", http.StatusInternalServerError) + return + } + + if resp.StatusCode == http.StatusOK { + // Check if the response contains a JSON-RPC error + var jsonResponse map[string]interface{} + if err := json.Unmarshal(responseBody, &jsonResponse); err == nil { + if _, hasError := jsonResponse["error"]; hasError { + fmt.Println("Received error response from upstream, not caching") + } else { + if _, ignore := methodsToIgnore[method]; !ignore { + cacheDuration := time.Duration(0) + if duration, found := methodsToExpire[method]; found { + if method == "eth_getBlockByNumber" { + params, ok := request["params"].([]interface{}) + if ok && len(params) > 0 { + param, ok := params[0].(string) + if ok { + if _, shouldExpire := paramsToExpire[param]; shouldExpire { + cacheDuration = duration + } + } + } + } else { + cacheDuration = duration + } + } + tx, err := db.BeginRw(r.Context()) + if err != nil { + http.Error(w, "Failed to begin transaction", http.StatusInternalServerError) + return + } + defer tx.Rollback() + if err := saveToCache(tx, cacheKey, responseBody, cacheDuration); err != nil { + http.Error(w, "Failed to save to cache", http.StatusInternalServerError) + return + } + tx.Commit() + } + } + } else { + fmt.Println("Failed to parse upstream response, not caching") + } + } + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("X-Cache-Status", "MISS") + w.Write(responseBody) + } +} diff --git a/zk/legacy_executor_verifier/legacy_executor_verifier.go b/zk/legacy_executor_verifier/legacy_executor_verifier.go index 412ae4b4639..986a93d93f4 100644 --- a/zk/legacy_executor_verifier/legacy_executor_verifier.go +++ b/zk/legacy_executor_verifier/legacy_executor_verifier.go @@ -10,19 +10,21 @@ import ( "errors" "fmt" + "sync" + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier/proto/github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" "github.com/ledgerwatch/erigon/zk/syncer" - "github.com/ledgerwatch/log/v3" - "sync" "github.com/ledgerwatch/erigon/zk/utils" + "github.com/ledgerwatch/log/v3" ) var ErrNoExecutorAvailable = fmt.Errorf("no executor available") @@ -226,7 +228,7 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ hermezDb := hermez_db.NewHermezDbReader(tx) l1InfoTreeMinTimestamps := make(map[uint64]uint64) - streamBytes, err := v.GetStreamBytes(request.BatchNumber, tx, blocks, hermezDb, l1InfoTreeMinTimestamps, nil) + streamBytes, err := v.GetWholeBatchStreamBytes(request.BatchNumber, tx, blocks, hermezDb, l1InfoTreeMinTimestamps, nil) if err != nil { return verifierBundle, err } @@ -281,8 +283,10 @@ func (v *LegacyExecutorVerifier) AddRequestUnsafe(request *VerifierRequest, sequ // log timing w/o stream write t.LogTimer() - if err = v.checkAndWriteToStream(tx, hermezDb, request.BatchNumber); err != nil { - log.Error("error writing data to stream", "err", err) + if ok { + if err = v.checkAndWriteToStream(tx, hermezDb, request.BatchNumber); err != nil { + log.Error("error writing data to stream", "err", err) + } } verifierBundle.response = &VerifierResponse{ @@ -326,10 +330,10 @@ func (v *LegacyExecutorVerifier) checkAndWriteToStream(tx kv.Tx, hdb *hermez_db. // check if we have the next batch we're waiting for if latestBatch == newBatch-1 { - v.lowestWrittenBatch = newBatch if err := v.WriteBatchToStream(newBatch, hdb, tx); err != nil { return err } + v.lowestWrittenBatch = newBatch delete(v.responsesToWrite, newBatch) } } @@ -437,12 +441,8 @@ func (v *LegacyExecutorVerifier) IsRequestAddedUnsafe(batch uint64) bool { func (v *LegacyExecutorVerifier) WriteBatchToStream(batchNumber uint64, hdb *hermez_db.HermezDbReader, roTx kv.Tx) error { log.Info("[Verifier] Writing batch to stream", "batch", batchNumber) - blks, err := hdb.GetL2BlockNosByBatch(batchNumber) - if err != nil { - return err - } - if err := v.streamServer.WriteBlocksToStream(roTx, hdb, blks[0], blks[len(blks)-1], "verifier"); err != nil { + if err := v.streamServer.WriteWholeBatchToStream("verifier", roTx, hdb, v.lowestWrittenBatch, batchNumber); err != nil { return err } return nil @@ -495,50 +495,59 @@ func (v *LegacyExecutorVerifier) availableBlocksToProcess(innerCtx context.Conte return blocks, nil } -func (v *LegacyExecutorVerifier) GetStreamBytes( +func (v *LegacyExecutorVerifier) GetWholeBatchStreamBytes( batchNumber uint64, tx kv.Tx, - blocks []uint64, + blockNumbers []uint64, hermezDb *hermez_db.HermezDbReader, l1InfoTreeMinTimestamps map[uint64]uint64, transactionsToIncludeByIndex [][]int, // passing nil here will include all transactions in the blocks -) ([]byte, error) { - lastBlock, err := rawdb.ReadBlockByNumber(tx, blocks[0]-1) - if err != nil { - return nil, err - } - var streamBytes []byte +) (streamBytes []byte, err error) { + blocks := make([]types.Block, 0, len(blockNumbers)) + txsPerBlock := make(map[uint64][]types.Transaction) // as we only ever use the executor verifier for whole batches we can safely assume that the previous batch // will always be the request batch - 1 and that the first block in the batch will be at the batch // boundary so we will always add in the batch bookmark to the stream previousBatch := batchNumber - 1 - for idx, blockNumber := range blocks { + for idx, blockNumber := range blockNumbers { block, err := rawdb.ReadBlockByNumber(tx, blockNumber) if err != nil { return nil, err } + blocks = append(blocks, *block) - var sBytes []byte - - isBatchEnd := idx == len(blocks)-1 - - var transactionsToIncludeByIndexInBlock []int = nil + filteredTransactions := block.Transactions() + // filter transactions by indexes that should be included if transactionsToIncludeByIndex != nil { - transactionsToIncludeByIndexInBlock = transactionsToIncludeByIndex[idx] + filteredTransactions = filterTransactionByIndexes(block.Transactions(), transactionsToIncludeByIndex[idx]) } - sBytes, err = server.CreateAndBuildStreamEntryBytesProto(v.streamServer.GetChainId(), block, hermezDb, tx, lastBlock, batchNumber, previousBatch, l1InfoTreeMinTimestamps, isBatchEnd, transactionsToIncludeByIndexInBlock) - if err != nil { - return nil, err + + txsPerBlock[blockNumber] = filteredTransactions + } + + entries, err := server.BuildWholeBatchStreamEntriesProto(tx, hermezDb, v.streamServer.GetChainId(), batchNumber, previousBatch, blocks, txsPerBlock, l1InfoTreeMinTimestamps) + if err != nil { + return nil, err + } + + return entries.Marshal() +} + +func filterTransactionByIndexes( + filteredTransactions types.Transactions, + transactionsToIncludeByIndex []int, +) types.Transactions { + if transactionsToIncludeByIndex != nil { + filteredTransactionsBuilder := make(types.Transactions, len(transactionsToIncludeByIndex)) + for i, txIndexInBlock := range transactionsToIncludeByIndex { + filteredTransactionsBuilder[i] = filteredTransactions[txIndexInBlock] } - streamBytes = append(streamBytes, sBytes...) - lastBlock = block - // we only put in the batch bookmark at the start of the stream data once - previousBatch = batchNumber + filteredTransactions = filteredTransactionsBuilder } - return streamBytes, nil + return filteredTransactions } diff --git a/zk/stages/stage_batches.go b/zk/stages/stage_batches.go index ec7b0dbf3fc..5926cd71104 100644 --- a/zk/stages/stage_batches.go +++ b/zk/stages/stage_batches.go @@ -76,11 +76,14 @@ type HermezDb interface { type DatastreamClient interface { ReadAllEntriesToChannel() error GetL2BlockChan() chan types.FullL2Block + GetL2TxChan() chan types.L2TransactionProto GetBatchStartChan() chan types.BatchStart + GetBatchEndChan() chan types.BatchEnd GetGerUpdatesChan() chan types.GerUpdate GetLastWrittenTimeAtomic() *atomic.Int64 GetStreamingAtomic() *atomic.Bool GetProgressAtomic() *atomic.Uint64 + EnsureConnected() (bool, error) } type BatchesCfg struct { @@ -167,17 +170,26 @@ func SpawnStageBatches( // if no error, break, else continue trying to get them // Create bookmark + connected := false + for i := 0; i < 5; i++ { + connected, err = cfg.dsClient.EnsureConnected() + if err != nil { + log.Error("[datastream_client] Error connecting to datastream", "error", err) + continue + } + if connected { + break + } + } + go func() { log.Info(fmt.Sprintf("[%s] Started downloading L2Blocks routine", logPrefix)) defer log.Info(fmt.Sprintf("[%s] Finished downloading L2Blocks routine", logPrefix)) - for i := 0; i < 5; i++ { + if connected { if err := cfg.dsClient.ReadAllEntriesToChannel(); err != nil { log.Error("[datastream_client] Error downloading blocks from datastream", "error", err) - continue } - // if it was overnot because of an error, don't try to reconnect - break } }() } @@ -221,6 +233,7 @@ func SpawnStageBatches( l2BlockChan := cfg.dsClient.GetL2BlockChan() batchStartChan := cfg.dsClient.GetBatchStartChan() + batchEndChan := cfg.dsClient.GetBatchEndChan() gerUpdateChan := cfg.dsClient.GetGerUpdatesChan() lastWrittenTimeAtomic := cfg.dsClient.GetLastWrittenTimeAtomic() streamingAtomic := cfg.dsClient.GetStreamingAtomic() @@ -248,6 +261,12 @@ LOOP: } } _ = batchStart + case batchEnd := <-batchEndChan: + if batchEnd.LocalExitRoot != emptyHash { + if err := hermezDb.WriteLocalExitRootForBatchNo(batchEnd.Number, batchEnd.LocalExitRoot); err != nil { + return fmt.Errorf("write local exit root for l1 block hash error: %v", err) + } + } case l2Block := <-l2BlockChan: if cfg.zkCfg.SyncLimit > 0 && l2Block.L2BlockNumber >= cfg.zkCfg.SyncLimit { // stop the node going into a crazy loop @@ -275,10 +294,6 @@ LOOP: // NOTE (RPC): avoided use of 'writeForkIdBlockOnce' by reading instead batch by forkId, and then lowest block number in batch } - l2Block.ChainId = cfg.zkCfg.L2ChainId - - atLeastOneBlockWritten = true - // ignore genesis or a repeat of the last block if l2Block.L2BlockNumber == 0 { continue @@ -358,6 +373,7 @@ LOOP: lastHash = l2Block.L2Blockhash + atLeastOneBlockWritten = true lastBlockHeight = l2Block.L2BlockNumber blocksWritten++ progressChan <- blocksWritten @@ -410,6 +426,7 @@ LOOP: startTime = time.Now() } } + time.Sleep(10 * time.Millisecond) } if blocksWritten != prevAmountBlocksWritten && blocksWritten%STAGE_PROGRESS_SAVE == 0 { @@ -841,18 +858,11 @@ func writeL2Block(eriDb ErigonDb, hermezDb HermezDb, l2Block *types.FullL2Block, return fmt.Errorf("write ger for l1 block hash error: %v", err) } } - - // LER per batch - write the ler of the last block in the batch - if l2Block.BatchEnd && l2Block.LocalExitRoot != emptyHash { - if err := hermezDb.WriteLocalExitRootForBatchNo(l2Block.BatchNumber, l2Block.LocalExitRoot); err != nil { - return fmt.Errorf("write local exit root for l1 block hash error: %v", err) - } - } } } if l2Block.L1InfoTreeIndex != 0 { - if err = hermezDb.WriteBlockL1InfoTreeIndex(l2Block.L2BlockNumber, uint64(l2Block.L1InfoTreeIndex)); err != nil { + if err := hermezDb.WriteBlockL1InfoTreeIndex(l2Block.L2BlockNumber, uint64(l2Block.L1InfoTreeIndex)); err != nil { return err } @@ -862,13 +872,13 @@ func writeL2Block(eriDb ErigonDb, hermezDb HermezDb, l2Block *types.FullL2Block, // for the stream and also for the block info root to be correct if uint64(l2Block.L1InfoTreeIndex) <= highestL1InfoTreeIndex { l1InfoTreeIndexReused = true - if err = hermezDb.WriteBlockGlobalExitRoot(l2Block.L2BlockNumber, l2Block.GlobalExitRoot); err != nil { + if err := hermezDb.WriteBlockGlobalExitRoot(l2Block.L2BlockNumber, l2Block.GlobalExitRoot); err != nil { return fmt.Errorf("write block global exit root error: %w", err) } - if err = hermezDb.WriteBlockL1BlockHash(l2Block.L2BlockNumber, l2Block.L1BlockHash); err != nil { + if err := hermezDb.WriteBlockL1BlockHash(l2Block.L2BlockNumber, l2Block.L1BlockHash); err != nil { return fmt.Errorf("write block global exit root error: %w", err) } - if err = hermezDb.WriteReusedL1InfoTreeIndex(l2Block.L2BlockNumber); err != nil { + if err := hermezDb.WriteReusedL1InfoTreeIndex(l2Block.L2BlockNumber); err != nil { return fmt.Errorf("write reused l1 info tree index error: %w", err) } } @@ -879,7 +889,7 @@ func writeL2Block(eriDb ErigonDb, hermezDb HermezDb, l2Block *types.FullL2Block, // we always want the last written GER in this table as it's at the batch level, so it can and should // be overwritten if !l1InfoTreeIndexReused && didStoreGer { - if err = hermezDb.WriteLatestUsedGer(l2Block.BatchNumber, l2Block.GlobalExitRoot); err != nil { + if err := hermezDb.WriteLatestUsedGer(l2Block.BatchNumber, l2Block.GlobalExitRoot); err != nil { return fmt.Errorf("write latest used ger error: %w", err) } } diff --git a/zk/stages/stage_batches_test.go b/zk/stages/stage_batches_test.go index c721e7bc6c0..5ffd407e1d4 100644 --- a/zk/stages/stage_batches_test.go +++ b/zk/stages/stage_batches_test.go @@ -36,7 +36,6 @@ func TestUnwindBatches(t *testing.T) { GlobalExitRoot: common.Hash{byte(i)}, Coinbase: common.Address{byte(i)}, ForkId: uint64(i) / 3, - ChainId: uint64(1), L1BlockHash: common.Hash{byte(i)}, L2Blockhash: common.Hash{byte(i)}, StateRoot: common.Hash{byte(i)}, diff --git a/zk/stages/stage_dataStreamCatchup.go b/zk/stages/stage_dataStreamCatchup.go index 0e25e858780..d369f160e12 100644 --- a/zk/stages/stage_dataStreamCatchup.go +++ b/zk/stages/stage_dataStreamCatchup.go @@ -61,7 +61,7 @@ func SpawnStageDataStreamCatchup( createdTx = true } - finalBlockNumber, err := CatchupDatastream(logPrefix, tx, stream, cfg.chainId, cfg.streamVersion, cfg.hasExecutors) + finalBlockNumber, err := CatchupDatastream(ctx, logPrefix, tx, stream, cfg.chainId, cfg.streamVersion, cfg.hasExecutors) if err != nil { return err } @@ -77,7 +77,7 @@ func SpawnStageDataStreamCatchup( return err } -func CatchupDatastream(logPrefix string, tx kv.RwTx, stream *datastreamer.StreamServer, chainId uint64, streamVersion int, hasExecutors bool) (uint64, error) { +func CatchupDatastream(ctx context.Context, logPrefix string, tx kv.RwTx, stream *datastreamer.StreamServer, chainId uint64, streamVersion int, hasExecutors bool) (uint64, error) { srv := server.NewDataStreamServer(stream, chainId) reader := hermez_db.NewHermezDbReader(tx) @@ -143,7 +143,7 @@ func CatchupDatastream(logPrefix string, tx kv.RwTx, stream *datastreamer.Stream } } - if err = srv.WriteBlocksToStream(tx, reader, previousProgress+1, finalBlockNumber, logPrefix); err != nil { + if err = srv.WriteBlocksToStreamConsecutively(ctx, logPrefix, tx, reader, previousProgress+1, finalBlockNumber); err != nil { return 0, err } diff --git a/zk/stages/stage_interhashes.go b/zk/stages/stage_interhashes.go index 25d306b9606..5963a3ca2db 100644 --- a/zk/stages/stage_interhashes.go +++ b/zk/stages/stage_interhashes.go @@ -149,7 +149,7 @@ func SpawnZkIntermediateHashesStage(s *stagedsync.StageState, u stagedsync.Unwin return trie.EmptyRoot, err } } else { - if root, err = regenerateIntermediateHashes(logPrefix, tx, eridb, smt, to); err != nil { + if root, err = regenerateIntermediateHashes(ctx, logPrefix, tx, eridb, smt, to); err != nil { return trie.EmptyRoot, err } } @@ -241,7 +241,7 @@ func UnwindZkIntermediateHashesStage(u *stagedsync.UnwindState, s *stagedsync.St return nil } -func regenerateIntermediateHashes(logPrefix string, db kv.RwTx, eridb *db2.EriDb, smtIn *smt.SMT, toBlock uint64) (common.Hash, error) { +func regenerateIntermediateHashes(ctx context.Context, logPrefix string, db kv.RwTx, eridb *db2.EriDb, smtIn *smt.SMT, toBlock uint64) (common.Hash, error) { log.Info(fmt.Sprintf("[%s] Regeneration trie hashes started", logPrefix)) defer log.Info(fmt.Sprintf("[%s] Regeneration ended", logPrefix)) @@ -325,7 +325,7 @@ func regenerateIntermediateHashes(logPrefix string, db kv.RwTx, eridb *db2.EriDb log.Info(fmt.Sprintf("[%s] Collecting account data finished in %v", logPrefix, dataCollectTime)) // generate tree - if _, err := smtIn.GenerateFromKVBulk(logPrefix, keys); err != nil { + if _, err := smtIn.GenerateFromKVBulk(ctx, logPrefix, keys); err != nil { return trie.EmptyRoot, err } diff --git a/zk/stages/stage_l1_info_tree.go b/zk/stages/stage_l1_info_tree.go index 6a1098f9aa0..b4f806671b0 100644 --- a/zk/stages/stage_l1_info_tree.go +++ b/zk/stages/stage_l1_info_tree.go @@ -88,6 +88,7 @@ LOOP: if !cfg.syncer.IsDownloading() { break LOOP } + time.Sleep(10 * time.Millisecond) } } diff --git a/zk/stages/stage_l1_sequencer_sync.go b/zk/stages/stage_l1_sequencer_sync.go index bc1617cab32..021fbba3755 100644 --- a/zk/stages/stage_l1_sequencer_sync.go +++ b/zk/stages/stage_l1_sequencer_sync.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/big" + "time" "github.com/iden3/go-iden3-crypto/keccak256" "github.com/ledgerwatch/erigon-lib/common" @@ -143,6 +144,7 @@ Loop: if !cfg.syncer.IsDownloading() { break Loop } + time.Sleep(10 * time.Millisecond) } } @@ -207,7 +209,6 @@ func HandleL1InfoTreeUpdate( } const ( - injectedBatchLogTrailingBytes = 24 injectedBatchLogTransactionStartByte = 128 injectedBatchLastGerStartByte = 31 injectedBatchLastGerEndByte = 64 @@ -230,9 +231,11 @@ func HandleInitialSequenceBatches( } } - // the log appears to have some trailing 24 bytes of all 0s in it. Not sure why but we can't handle the + // the log appears to have some trailing some bytes of all 0s in it. Not sure why but we can't handle the // TX without trimming these off + injectedBatchLogTrailingBytes := getTrailingCutoffLen(l.Data) trailingCutoff := len(l.Data) - injectedBatchLogTrailingBytes + log.Debug(fmt.Sprintf("Handle initial sequence batches, trail len:%v, log data: %v", injectedBatchLogTrailingBytes, l.Data)) txData := l.Data[injectedBatchLogTransactionStartByte:trailingCutoff] @@ -260,3 +263,12 @@ func UnwindL1SequencerSyncStage(u *stagedsync.UnwindState, tx kv.RwTx, cfg L1Seq func PruneL1SequencerSyncStage(s *stagedsync.PruneState, tx kv.RwTx, cfg L1SequencerSyncCfg, ctx context.Context) error { return nil } + +func getTrailingCutoffLen(logData []byte) int { + for i := len(logData) - 1; i >= 0; i-- { + if logData[i] != 0 { + return len(logData) - i - 1 + } + } + return 0 +} diff --git a/zk/stages/stage_l1syncer.go b/zk/stages/stage_l1syncer.go index 814b5b4147b..737ab38bca1 100644 --- a/zk/stages/stage_l1syncer.go +++ b/zk/stages/stage_l1syncer.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "time" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/log/v3" @@ -162,6 +163,7 @@ Loop: if !cfg.syncer.IsDownloading() { break Loop } + time.Sleep(10 * time.Millisecond) } } diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index 7dace195c9c..c23a057febf 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -73,6 +73,15 @@ func SpawnSequencingStage( getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(sdb.tx, hash, number) } hasExecutorForThisBatch := !isLastBatchPariallyProcessed && cfg.zk.HasExecutors() + // handle case where batch wasn't closed properly + // close it before starting a new one + // this occurs when sequencer was switched from syncer or sequencer datastream files were deleted + // and datastream was regenerated + isLastEntryBatchEnd, err := cfg.datastreamServer.IsLastEntryBatchEnd() + if err != nil { + return err + } + // injected batch if executionAt == 0 { // set the block height for the fork we're running at to ensure contract interactions are correct @@ -92,8 +101,7 @@ func SpawnSequencingStage( return err } - // write the batch directly to the stream - if err = cfg.datastreamServer.WriteBlocksToStream(tx, sdb.hermezDb.HermezDbReader, injectedBatchBlockNumber, injectedBatchBlockNumber, logPrefix); err != nil { + if err = cfg.datastreamServer.WriteWholeBatchToStream(logPrefix, tx, sdb.hermezDb.HermezDbReader, lastBatch, injectedBatchNumber); err != nil { return err } @@ -106,6 +114,23 @@ func SpawnSequencingStage( return nil } + if !isLastBatchPariallyProcessed && !isLastEntryBatchEnd { + log.Warn(fmt.Sprintf("[%s] Last batch %d was not closed properly, closing it now...", logPrefix, lastBatch)) + ler, err := utils.GetBatchLocalExitRootFromSCStorage(lastBatch, sdb.hermezDb.HermezDbReader, tx) + if err != nil { + return err + } + + lastBlock, err := rawdb.ReadBlockByNumber(sdb.tx, executionAt) + if err != nil { + return err + } + root := lastBlock.Root() + if err = cfg.datastreamServer.WriteBatchEnd(sdb.hermezDb, lastBatch, lastBatch-1, &root, &ler); err != nil { + return err + } + } + if err := utils.UpdateZkEVMBlockCfg(cfg.chainConfig, sdb.hermezDb, logPrefix); err != nil { return err } @@ -246,6 +271,7 @@ func SpawnSequencingStage( prevHeader := rawdb.ReadHeaderByNumber(tx, executionAt) batchDataOverflow := false + tryHaltSequencer(logPrefix, cfg, thisBatch) var block *types.Block for blockNumber := executionAt + 1; runLoopBlocks; blockNumber++ { @@ -361,7 +387,7 @@ func SpawnSequencingStage( default: if limboRecovery { cfg.txPool.LockFlusher() - blockTransactions, err = getLimboTransaction(cfg, limboTxHash) + blockTransactions, err = getLimboTransaction(ctx, cfg, limboTxHash) if err != nil { cfg.txPool.UnlockFlusher() return err @@ -369,7 +395,7 @@ func SpawnSequencingStage( cfg.txPool.UnlockFlusher() } else if !l1Recovery { cfg.txPool.LockFlusher() - blockTransactions, err = getNextPoolTransactions(cfg, executionAt, forkId, yielded) + blockTransactions, err = getNextPoolTransactions(ctx, cfg, executionAt, forkId, yielded) if err != nil { cfg.txPool.UnlockFlusher() return err @@ -377,6 +403,12 @@ func SpawnSequencingStage( cfg.txPool.UnlockFlusher() } + if len(blockTransactions) == 0 { + time.Sleep(250 * time.Millisecond) + } else { + log.Trace(fmt.Sprintf("[%s] Yielded transactions from the pool", logPrefix), "txCount", len(blockTransactions)) + } + var receipt *types.Receipt var execResult *core.ExecutionResult for i, transaction := range blockTransactions { @@ -500,25 +532,22 @@ func SpawnSequencingStage( // because it would be later added twice counters := batchCounters.CombineCollectorsNoChanges(l1InfoIndex != 0) - err = sdb.hermezDb.WriteBatchCounters(thisBatch, counters.UsedAsMap()) - if err != nil { + if err = sdb.hermezDb.WriteBatchCounters(thisBatch, counters.UsedAsMap()); err != nil { return err } - err = sdb.hermezDb.WriteIsBatchPartiallyProcessed(thisBatch) - if err != nil { + if err = sdb.hermezDb.WriteIsBatchPartiallyProcessed(thisBatch); err != nil { return err } - if err = cfg.datastreamServer.WriteBlockToStream(logPrefix, tx, sdb.hermezDb, thisBatch, lastBatch, blockNumber); err != nil { + if err = cfg.datastreamServer.WriteBlockWithBatchStartToStream(logPrefix, tx, sdb.hermezDb, forkId, thisBatch, lastBatch, *parentBlock, *block); err != nil { return err } if err = tx.Commit(); err != nil { return err } - tx, err = cfg.db.BeginRw(ctx) - if err != nil { + if tx, err = cfg.db.BeginRw(ctx); err != nil { return err } // TODO: This creates stacked up deferrals @@ -562,7 +591,7 @@ func SpawnSequencingStage( if !hasExecutorForThisBatch { blockRoot := block.Root() - if err = cfg.datastreamServer.WriteBatchEnd(logPrefix, tx, sdb.hermezDb, thisBatch, lastBatch, &blockRoot, &ler); err != nil { + if err = cfg.datastreamServer.WriteBatchEnd(sdb.hermezDb, thisBatch, lastBatch, &blockRoot, &ler); err != nil { return err } } @@ -575,3 +604,13 @@ func SpawnSequencingStage( return nil } + +func tryHaltSequencer(logPrefix string, cfg SequenceBlockCfg, thisBatch uint64) { + if cfg.zk.SequencerHaltOnBatchNumber != 0 && + cfg.zk.SequencerHaltOnBatchNumber == thisBatch { + for { + log.Info(fmt.Sprintf("[%s] Halt sequencer on batch %d...", logPrefix, thisBatch)) + time.Sleep(5 * time.Second) //nolint:gomnd + } + } +} diff --git a/zk/stages/stage_sequence_execute_transactions.go b/zk/stages/stage_sequence_execute_transactions.go index 47cccca59d8..78c64438b11 100644 --- a/zk/stages/stage_sequence_execute_transactions.go +++ b/zk/stages/stage_sequence_execute_transactions.go @@ -3,7 +3,6 @@ package stages import ( "context" "encoding/binary" - "time" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" @@ -24,75 +23,49 @@ import ( "github.com/ledgerwatch/log/v3" ) -func getNextPoolTransactions(cfg SequenceBlockCfg, executionAt, forkId uint64, alreadyYielded mapset.Set[[32]byte]) ([]types.Transaction, error) { +func getNextPoolTransactions(ctx context.Context, cfg SequenceBlockCfg, executionAt, forkId uint64, alreadyYielded mapset.Set[[32]byte]) ([]types.Transaction, error) { var transactions []types.Transaction var err error - var count int - killer := time.NewTicker(50 * time.Millisecond) -LOOP: - for { - // ensure we don't spin forever looking for transactions, attempt for a while then exit up to the caller - select { - case <-killer.C: - break LOOP - default: - } - if err := cfg.txPoolDb.View(context.Background(), func(poolTx kv.Tx) error { - slots := types2.TxsRlp{} - _, count, err = cfg.txPool.YieldBest(yieldSize, &slots, poolTx, executionAt, utils.GetBlockGasLimitForFork(forkId), 0, alreadyYielded) - if err != nil { - return err - } - if count == 0 { - time.Sleep(500 * time.Microsecond) - return nil - } - transactions, err = extractTransactionsFromSlot(&slots) - if err != nil { - return err - } - return nil - }); err != nil { - return nil, err - } - if len(transactions) > 0 { - break + gasLimit := utils.GetBlockGasLimitForFork(forkId) + + if err := cfg.txPoolDb.View(ctx, func(poolTx kv.Tx) error { + slots := types2.TxsRlp{} + if _, _, err = cfg.txPool.YieldBest(cfg.yieldSize, &slots, poolTx, executionAt, gasLimit, 0, alreadyYielded); err != nil { + return err } + yieldedTxs, err := extractTransactionsFromSlot(&slots) + if err != nil { + return err + } + transactions = append(transactions, yieldedTxs...) + return nil + }); err != nil { + return nil, err } return transactions, err } -func getLimboTransaction(cfg SequenceBlockCfg, txHash *common.Hash) ([]types.Transaction, error) { +func getLimboTransaction(ctx context.Context, cfg SequenceBlockCfg, txHash *common.Hash) ([]types.Transaction, error) { var transactions []types.Transaction + // ensure we don't spin forever looking for transactions, attempt for a while then exit up to the caller + if err := cfg.txPoolDb.View(ctx, func(poolTx kv.Tx) error { + slots, err := cfg.txPool.GetLimboTxRplsByHash(poolTx, txHash) + if err != nil { + return err + } - for { - // ensure we don't spin forever looking for transactions, attempt for a while then exit up to the caller - if err := cfg.txPoolDb.View(context.Background(), func(poolTx kv.Tx) error { - slots, err := cfg.txPool.GetLimboTxRplsByHash(poolTx, txHash) + if slots != nil { + transactions, err = extractTransactionsFromSlot(slots) if err != nil { return err } - - if slots != nil { - transactions, err = extractTransactionsFromSlot(slots) - if err != nil { - return err - } - } - - return nil - }); err != nil { - return nil, err - } - - if len(transactions) == 0 { - time.Sleep(250 * time.Millisecond) - } else { - break } + return nil + }); err != nil { + return nil, err } return transactions, nil @@ -240,6 +213,7 @@ func attemptAddTransaction( return nil, nil, false, err } + batchCounters.UpdateExecutionAndProcessingCountersCache(txCounters) // now that we have executed we can check again for an overflow if overflow, err = batchCounters.CheckForOverflow(l1InfoIndex != 0); err != nil { return nil, nil, false, err diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index 54f7b59d59f..a590d7759fb 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -51,7 +51,10 @@ const ( transactionGasLimit = 30000000 - yieldSize = 100 // arbitrary number defining how many transactions to yield from the pool at once + // this is the max number of send transactions that can be included in a block without overflowing counters + // this is for simple send transactions, any other type would consume more counters + // + preForkId11TxLimit = 444 ) var ( @@ -88,6 +91,8 @@ type SequenceBlockCfg struct { txPool *txpool.TxPool txPoolDb kv.RwDB + + yieldSize uint16 } func StageSequenceBlocksCfg( @@ -114,6 +119,7 @@ func StageSequenceBlocksCfg( txPool *txpool.TxPool, txPoolDb kv.RwDB, + yieldSize uint16, ) SequenceBlockCfg { return SequenceBlockCfg{ @@ -139,6 +145,7 @@ func StageSequenceBlocksCfg( miningConfig: miningConfig, txPool: txPool, txPoolDb: txPoolDb, + yieldSize: yieldSize, } } @@ -281,37 +288,35 @@ func prepareHeader(tx kv.RwTx, previousBlockNumber, deltaTimestamp, forcedTimest return header, parentBlock, nil } -func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, decodedBlock *zktx.DecodedBatchL2Data, l1Recovery bool, proposedTimestamp uint64) (uint64, *zktypes.L1InfoTreeUpdate, uint64, common.Hash, common.Hash, bool, error) { - var l1TreeUpdateIndex uint64 - var l1TreeUpdate *zktypes.L1InfoTreeUpdate - var err error - +func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, decodedBlock *zktx.DecodedBatchL2Data, l1Recovery bool, proposedTimestamp uint64) ( + infoTreeIndexProgress uint64, + l1TreeUpdate *zktypes.L1InfoTreeUpdate, + l1TreeUpdateIndex uint64, + l1BlockHash common.Hash, + ger common.Hash, + shouldWriteGerToContract bool, + err error, +) { // if we are in a recovery state and recognise that a l1 info tree index has been reused // then we need to not include the GER and L1 block hash into the block info root calculation, so // we keep track of this here - shouldWriteGerToContract := true + shouldWriteGerToContract = true - l1BlockHash := common.Hash{} - ger := common.Hash{} - - infoTreeIndexProgress, err := stages.GetStageProgress(sdb.tx, stages.HighestUsedL1InfoIndex) - if err != nil { - return infoTreeIndexProgress, l1TreeUpdate, l1TreeUpdateIndex, l1BlockHash, ger, shouldWriteGerToContract, err + if infoTreeIndexProgress, err = stages.GetStageProgress(sdb.tx, stages.HighestUsedL1InfoIndex); err != nil { + return } if l1Recovery { l1TreeUpdateIndex = uint64(decodedBlock.L1InfoTreeIndex) - l1TreeUpdate, err = sdb.hermezDb.GetL1InfoTreeUpdate(l1TreeUpdateIndex) - if err != nil { - return infoTreeIndexProgress, l1TreeUpdate, l1TreeUpdateIndex, l1BlockHash, ger, shouldWriteGerToContract, err + if l1TreeUpdate, err = sdb.hermezDb.GetL1InfoTreeUpdate(l1TreeUpdateIndex); err != nil { + return } if infoTreeIndexProgress >= l1TreeUpdateIndex { shouldWriteGerToContract = false } } else { - l1TreeUpdateIndex, l1TreeUpdate, err = calculateNextL1TreeUpdateToUse(infoTreeIndexProgress, sdb.hermezDb, proposedTimestamp) - if err != nil { - return infoTreeIndexProgress, l1TreeUpdate, l1TreeUpdateIndex, l1BlockHash, ger, shouldWriteGerToContract, err + if l1TreeUpdateIndex, l1TreeUpdate, err = calculateNextL1TreeUpdateToUse(infoTreeIndexProgress, sdb.hermezDb, proposedTimestamp); err != nil { + return } if l1TreeUpdateIndex > 0 { infoTreeIndexProgress = l1TreeUpdateIndex @@ -324,7 +329,7 @@ func prepareL1AndInfoTreeRelatedStuff(sdb *stageDb, decodedBlock *zktx.DecodedBa ger = l1TreeUpdate.GER } - return infoTreeIndexProgress, l1TreeUpdate, l1TreeUpdateIndex, l1BlockHash, ger, shouldWriteGerToContract, nil + return } // will be called at the start of every new block created within a batch to figure out if there is a new GER diff --git a/zk/stages/stage_sequencer_executor_verify.go b/zk/stages/stage_sequencer_executor_verify.go index 4bed70e9e1c..3f9c050abcb 100644 --- a/zk/stages/stage_sequencer_executor_verify.go +++ b/zk/stages/stage_sequencer_executor_verify.go @@ -173,8 +173,7 @@ func SpawnSequencerExecutorVerifyStage( } l1InfoTreeMinTimestamps := make(map[uint64]uint64) - _, err = cfg.verifier.GetStreamBytes(response.BatchNumber, tx, blockNumbers, hermezDbReader, l1InfoTreeMinTimestamps, nil) - if err != nil { + if _, err = cfg.verifier.GetWholeBatchStreamBytes(response.BatchNumber, tx, blockNumbers, hermezDbReader, l1InfoTreeMinTimestamps, nil); err != nil { return err } @@ -215,7 +214,7 @@ func SpawnSequencerExecutorVerifyStage( senderMapKey := sender.Hex() blocksForStreamBytes, transactionsToIncludeByIndex := limboStreamBytesBuilderHelper.append(senderMapKey, blockNumber, i) - streamBytes, err := cfg.verifier.GetStreamBytes(response.BatchNumber, tx, blocksForStreamBytes, hermezDbReader, l1InfoTreeMinTimestamps, transactionsToIncludeByIndex) + streamBytes, err := cfg.verifier.GetWholeBatchStreamBytes(response.BatchNumber, tx, blocksForStreamBytes, hermezDbReader, l1InfoTreeMinTimestamps, transactionsToIncludeByIndex) if err != nil { return err } diff --git a/zk/stages/stage_sequencer_l1_block_sync.go b/zk/stages/stage_sequencer_l1_block_sync.go index e2d856452a0..b00810c27df 100644 --- a/zk/stages/stage_sequencer_l1_block_sync.go +++ b/zk/stages/stage_sequencer_l1_block_sync.go @@ -209,6 +209,7 @@ LOOP: if !cfg.syncer.IsDownloading() { break LOOP } + time.Sleep(10 * time.Millisecond) } } diff --git a/zk/stages/test_utils.go b/zk/stages/test_utils.go index 8e4d843eb76..276a89ea47b 100644 --- a/zk/stages/test_utils.go +++ b/zk/stages/test_utils.go @@ -13,9 +13,11 @@ type TestDatastreamClient struct { streamingAtomic atomic.Bool progress atomic.Uint64 l2BlockChan chan types.FullL2Block + l2TxChan chan types.L2TransactionProto gerUpdatesChan chan types.GerUpdate errChan chan error batchStartChan chan types.BatchStart + batchEndChan chan types.BatchEnd } func NewTestDatastreamClient(fullL2Blocks []types.FullL2Block, gerUpdates []types.GerUpdate) *TestDatastreamClient { @@ -31,6 +33,10 @@ func NewTestDatastreamClient(fullL2Blocks []types.FullL2Block, gerUpdates []type return client } +func (c *TestDatastreamClient) EnsureConnected() (bool, error) { + return true, nil +} + func (c *TestDatastreamClient) ReadAllEntriesToChannel() error { c.streamingAtomic.Store(true) @@ -48,6 +54,10 @@ func (c *TestDatastreamClient) GetL2BlockChan() chan types.FullL2Block { return c.l2BlockChan } +func (c *TestDatastreamClient) GetL2TxChan() chan types.L2TransactionProto { + return c.l2TxChan +} + func (c *TestDatastreamClient) GetGerUpdatesChan() chan types.GerUpdate { return c.gerUpdatesChan } @@ -60,6 +70,10 @@ func (c *TestDatastreamClient) GetBatchStartChan() chan types.BatchStart { return c.batchStartChan } +func (c *TestDatastreamClient) GetBatchEndChan() chan types.BatchEnd { + return c.batchEndChan +} + func (c *TestDatastreamClient) GetLastWrittenTimeAtomic() *atomic.Int64 { return &c.lastWrittenTimeAtomic } diff --git a/zk/syncer/l1_syncer.go b/zk/syncer/l1_syncer.go index 7910c05f53a..1c5b5563fcd 100644 --- a/zk/syncer/l1_syncer.go +++ b/zk/syncer/l1_syncer.go @@ -34,6 +34,7 @@ type IEtherman interface { FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]ethTypes.Log, error) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) TransactionByHash(ctx context.Context, hash common.Hash) (ethTypes.Transaction, bool, error) + TransactionReceipt(ctx context.Context, txHash common.Hash) (*ethTypes.Receipt, error) } type fetchJob struct { @@ -48,6 +49,7 @@ type jobResult struct { } type L1Syncer struct { + ctx context.Context etherMans []IEtherman ethermanIndex uint8 ethermanMtx *sync.Mutex @@ -71,8 +73,9 @@ type L1Syncer struct { highestBlockType string // finalized, latest, safe } -func NewL1Syncer(etherMans []IEtherman, l1ContractAddresses []common.Address, topics [][]common.Hash, blockRange, queryDelay uint64, highestBlockType string) *L1Syncer { +func NewL1Syncer(ctx context.Context, etherMans []IEtherman, l1ContractAddresses []common.Address, topics [][]common.Hash, blockRange, queryDelay uint64, highestBlockType string) *L1Syncer { return &L1Syncer{ + ctx: ctx, etherMans: etherMans, ethermanIndex: 0, ethermanMtx: &sync.Mutex{}, @@ -223,6 +226,21 @@ func (s *L1Syncer) GetOldAccInputHash(ctx context.Context, addr *common.Address, } } +func (s *L1Syncer) GetL1BlockTimeStampByTxHash(ctx context.Context, txHash common.Hash) (uint64, error) { + em := s.getNextEtherman() + r, err := em.TransactionReceipt(ctx, txHash) + if err != nil { + return 0, err + } + + header, err := em.HeaderByNumber(context.Background(), r.BlockNumber) + if err != nil { + return 0, err + } + + return header.Time, nil +} + func (s *L1Syncer) L1QueryHeaders(logs []ethTypes.Log) (map[uint64]*ethTypes.Header, error) { logsSize := len(logs) @@ -302,9 +320,12 @@ func (s *L1Syncer) getLatestL1Block() (uint64, error) { } func (s *L1Syncer) queryBlocks() error { - startBlock := s.lastCheckedL1Block.Load() + // Fixed receiving duplicate log events. + // lastCheckedL1Block means that it has already been checked in the previous cycle. + // It should not be checked again in the new cycle, so +1 is added here. + startBlock := s.lastCheckedL1Block.Load() + 1 - log.Debug("GetHighestSequence", "startBlock", s.lastCheckedL1Block.Load()) + log.Debug("GetHighestSequence", "startBlock", startBlock) // define the blocks we're going to fetch up front fetches := make([]fetchJob, 0) @@ -347,6 +368,9 @@ func (s *L1Syncer) queryBlocks() error { loop: for { select { + case <-s.ctx.Done(): + close(stop) + break loop case res := <-results: complete++ if res.Error != nil { diff --git a/zk/tests/nightly-l1-recovery/docker-compose-8.yml b/zk/tests/nightly-l1-recovery/docker-compose-8.yml new file mode 100644 index 00000000000..4756f7df13a --- /dev/null +++ b/zk/tests/nightly-l1-recovery/docker-compose-8.yml @@ -0,0 +1,61 @@ +services: + cache: + image: golang:1.19 + command: ["go", "run", "cmd/hack/rpc_cache/main.go", "-file", "/cache/network8-cache.db"] + volumes: + - ../../../:/repo + - l1-cache:/cache + working_dir: /repo + networks: + - erigon-net + + erigon: + build: + context: ../../../ + dockerfile: Dockerfile + command: ["--config", "/config/network8-config.yaml", "--zkevm.l1-sync-stop-batch", "100"] + environment: + - CDK_ERIGON_SEQUENCER=1 + volumes: + - ./:/config + - datadir:/datadir + networks: + - erigon-net + depends_on: + - cache + + erigon-sync: + build: + context: ../../../ + dockerfile: Dockerfile + command: [ "--config", "/config/network8-sync-config.yaml" ] + volumes: + - ./:/config + - datadir-syncer:/datadir + networks: + - erigon-net + depends_on: + - cache + - erigon + + block-checker: + image: golang:1.19 + command: ["go", "run", "/repo/zk/debug_tools/nightly-block-compare-wait/main.go", "--compare=http://34.175.214.161:18505", "--compare2=http://erigon-sync:8123", "--duration=3h", "--interval=10s"] + volumes: + - ../../../:/repo + working_dir: /repo + networks: + - erigon-net + depends_on: + - erigon + - cache + - erigon-sync + +networks: + erigon-net: + driver: bridge + +volumes: + datadir: + datadir-syncer: + l1-cache: diff --git a/zk/tests/nightly-l1-recovery/dynamic-integration8-allocs.json b/zk/tests/nightly-l1-recovery/dynamic-integration8-allocs.json new file mode 100644 index 00000000000..3069e697569 --- /dev/null +++ b/zk/tests/nightly-l1-recovery/dynamic-integration8-allocs.json @@ -0,0 +1,86 @@ +{ + "0xD9E2C34379f0785280754cD05544B7c0321386Ee": { + "contractName": "PolygonZkEVMDeployer", + "balance": "0", + "nonce": "4", + "code": "0x60806040526004361061006e575f3560e01c8063715018a61161004c578063715018a6146100e25780638da5cb5b146100f6578063e11ae6cb1461011f578063f2fde38b14610132575f80fd5b80632b79805a146100725780634a94d487146100875780636d07dbf81461009a575b5f80fd5b610085610080366004610908565b610151565b005b6100856100953660046109a2565b6101c2565b3480156100a5575f80fd5b506100b96100b43660046109f5565b610203565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b3480156100ed575f80fd5b50610085610215565b348015610101575f80fd5b505f5473ffffffffffffffffffffffffffffffffffffffff166100b9565b61008561012d366004610a15565b610228565b34801561013d575f80fd5b5061008561014c366004610a61565b61028e565b61015961034a565b5f6101658585856103ca565b90506101718183610527565b5060405173ffffffffffffffffffffffffffffffffffffffff821681527fba82f25fed02cd2a23d9f5d11c2ef588d22af5437cbf23bfe61d87257c480e4c9060200160405180910390a15050505050565b6101ca61034a565b6101d583838361056a565b506040517f25adb19089b6a549831a273acdf7908cff8b7ee5f551f8d1d37996cf01c5df5b905f90a1505050565b5f61020e8383610598565b9392505050565b61021d61034a565b6102265f6105a4565b565b61023061034a565b5f61023c8484846103ca565b60405173ffffffffffffffffffffffffffffffffffffffff821681529091507fba82f25fed02cd2a23d9f5d11c2ef588d22af5437cbf23bfe61d87257c480e4c9060200160405180910390a150505050565b61029661034a565b73ffffffffffffffffffffffffffffffffffffffff811661033e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201527f646472657373000000000000000000000000000000000000000000000000000060648201526084015b60405180910390fd5b610347816105a4565b50565b5f5473ffffffffffffffffffffffffffffffffffffffff163314610226576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e65726044820152606401610335565b5f83471015610435576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f437265617465323a20696e73756666696369656e742062616c616e63650000006044820152606401610335565b81515f0361049f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f437265617465323a2062797465636f6465206c656e677468206973207a65726f6044820152606401610335565b8282516020840186f5905073ffffffffffffffffffffffffffffffffffffffff811661020e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f437265617465323a204661696c6564206f6e206465706c6f79000000000000006044820152606401610335565b606061020e83835f6040518060400160405280601e81526020017f416464726573733a206c6f772d6c6576656c2063616c6c206661696c65640000815250610618565b6060610590848484604051806060016040528060298152602001610b0860299139610618565b949350505050565b5f61020e83833061072d565b5f805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b6060824710156106aa576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f416464726573733a20696e73756666696369656e742062616c616e636520666f60448201527f722063616c6c00000000000000000000000000000000000000000000000000006064820152608401610335565b5f808673ffffffffffffffffffffffffffffffffffffffff1685876040516106d29190610a9c565b5f6040518083038185875af1925050503d805f811461070c576040519150601f19603f3d011682016040523d82523d5f602084013e610711565b606091505b509150915061072287838387610756565b979650505050505050565b5f604051836040820152846020820152828152600b8101905060ff815360559020949350505050565b606083156107eb5782515f036107e45773ffffffffffffffffffffffffffffffffffffffff85163b6107e4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e74726163740000006044820152606401610335565b5081610590565b61059083838151156108005781518083602001fd5b806040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016103359190610ab7565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f82601f830112610870575f80fd5b813567ffffffffffffffff8082111561088b5761088b610834565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019082821181831017156108d1576108d1610834565b816040528381528660208588010111156108e9575f80fd5b836020870160208301375f602085830101528094505050505092915050565b5f805f806080858703121561091b575f80fd5b8435935060208501359250604085013567ffffffffffffffff80821115610940575f80fd5b61094c88838901610861565b93506060870135915080821115610961575f80fd5b5061096e87828801610861565b91505092959194509250565b803573ffffffffffffffffffffffffffffffffffffffff8116811461099d575f80fd5b919050565b5f805f606084860312156109b4575f80fd5b6109bd8461097a565b9250602084013567ffffffffffffffff8111156109d8575f80fd5b6109e486828701610861565b925050604084013590509250925092565b5f8060408385031215610a06575f80fd5b50508035926020909101359150565b5f805f60608486031215610a27575f80fd5b8335925060208401359150604084013567ffffffffffffffff811115610a4b575f80fd5b610a5786828701610861565b9150509250925092565b5f60208284031215610a71575f80fd5b61020e8261097a565b5f5b83811015610a94578181015183820152602001610a7c565b50505f910152565b5f8251610aad818460208701610a7a565b9190910192915050565b602081525f8251806020840152610ad5816040850160208701610a7a565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016919091016040019291505056fe416464726573733a206c6f772d6c6576656c2063616c6c20776974682076616c7565206661696c6564a2646970667358221220330b94dc698c4d290bf55c23f13b473cde6a6bae0030cb902de18af54e35839f64736f6c63430008140033", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000e859276098f208d003ca6904c6cc26629ee364ce" + } + }, + "0xA98eD9c842d93e9E06038a107530Cb7bF975595f": { + "contractName": "ProxyAdmin", + "balance": "0", + "nonce": "1", + "code": "0x608060405260043610610079575f3560e01c80639623609d1161004c5780639623609d1461012357806399a88ec414610136578063f2fde38b14610155578063f3b7dead14610174575f80fd5b8063204e1c7a1461007d578063715018a6146100c55780637eff275e146100db5780638da5cb5b146100fa575b5f80fd5b348015610088575f80fd5b5061009c6100973660046105e8565b610193565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b3480156100d0575f80fd5b506100d9610244565b005b3480156100e6575f80fd5b506100d96100f536600461060a565b610257565b348015610105575f80fd5b505f5473ffffffffffffffffffffffffffffffffffffffff1661009c565b6100d961013136600461066e565b6102e0565b348015610141575f80fd5b506100d961015036600461060a565b610371565b348015610160575f80fd5b506100d961016f3660046105e8565b6103cd565b34801561017f575f80fd5b5061009c61018e3660046105e8565b610489565b5f805f8373ffffffffffffffffffffffffffffffffffffffff166040516101dd907f5c60da1b00000000000000000000000000000000000000000000000000000000815260040190565b5f60405180830381855afa9150503d805f8114610215576040519150601f19603f3d011682016040523d82523d5f602084013e61021a565b606091505b509150915081610228575f80fd5b8080602001905181019061023c919061075b565b949350505050565b61024c6104d3565b6102555f610553565b565b61025f6104d3565b6040517f8f28397000000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8281166004830152831690638f283970906024015b5f604051808303815f87803b1580156102c6575f80fd5b505af11580156102d8573d5f803e3d5ffd5b505050505050565b6102e86104d3565b6040517f4f1ef28600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff841690634f1ef28690349061033e9086908690600401610776565b5f604051808303818588803b158015610355575f80fd5b505af1158015610367573d5f803e3d5ffd5b5050505050505050565b6103796104d3565b6040517f3659cfe600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8281166004830152831690633659cfe6906024016102af565b6103d56104d3565b73ffffffffffffffffffffffffffffffffffffffff811661047d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201527f646472657373000000000000000000000000000000000000000000000000000060648201526084015b60405180910390fd5b61048681610553565b50565b5f805f8373ffffffffffffffffffffffffffffffffffffffff166040516101dd907ff851a44000000000000000000000000000000000000000000000000000000000815260040190565b5f5473ffffffffffffffffffffffffffffffffffffffff163314610255576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e65726044820152606401610474565b5f805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b73ffffffffffffffffffffffffffffffffffffffff81168114610486575f80fd5b5f602082840312156105f8575f80fd5b8135610603816105c7565b9392505050565b5f806040838503121561061b575f80fd5b8235610626816105c7565b91506020830135610636816105c7565b809150509250929050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f805f60608486031215610680575f80fd5b833561068b816105c7565b9250602084013561069b816105c7565b9150604084013567ffffffffffffffff808211156106b7575f80fd5b818601915086601f8301126106ca575f80fd5b8135818111156106dc576106dc610641565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f0116810190838211818310171561072257610722610641565b8160405282815289602084870101111561073a575f80fd5b826020860160208301375f6020848301015280955050505050509250925092565b5f6020828403121561076b575f80fd5b8151610603816105c7565b73ffffffffffffffffffffffffffffffffffffffff831681525f602060408184015283518060408501525f5b818110156107be578581018301518582016060015282016107a2565b505f6060828601015260607fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010192505050939250505056fea26469706673582212203083a4ccc2e42eed60bd19037f2efa77ed086dc7a5403f75bebb995dcba2221c64736f6c63430008140033", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000003e779571bfca47b12f3b6d11da0bd62321072fa6" + } + }, + "0x6d1ed73612F56782e5C0a090Ad04C41b94D10721": { + "contractName": "PolygonZkEVMBridge implementation", + "balance": "0", + "nonce": "1", + "code": "0x6080604052600436106101db575f3560e01c806383f24403116100fd578063ccaa2d1111610092578063ee25560b11610062578063ee25560b146105a9578063f5efcd79146105d4578063f811bff7146105f3578063fb57083414610612575f80fd5b8063ccaa2d111461053b578063cd5865791461055a578063d02103ca1461056d578063dbc1697614610595575f80fd5b8063bab161bf116100cd578063bab161bf146104b9578063be5831c7146104da578063c00f14ab146104fd578063cc4616321461051c575f80fd5b806383f244031461043d5780638ed7e3f21461045c578063aaa13cc21461047b578063b8b284d01461049a575f80fd5b80633cbc795b116101735780637843298b116101435780637843298b146103c257806379e2cf97146103e157806381b1c174146103f557806383c43a5514610429575f80fd5b80633cbc795b146103385780633e197043146103705780634b2f336d1461038f5780635ca1e165146103ae575f80fd5b806327aef4e8116101ae57806327aef4e81461026d5780632dfdf0b51461028e578063318aee3d146102b15780633c351e1014610319575f80fd5b806315064c96146101df5780632072f6c51461020d57806322e95f2c14610223578063240ff3781461025a575b5f80fd5b3480156101ea575f80fd5b506068546101f89060ff1681565b60405190151581526020015b60405180910390f35b348015610218575f80fd5b50610221610631565b005b34801561022e575f80fd5b5061024261023d366004612fb9565b610666565b6040516001600160a01b039091168152602001610204565b610221610268366004613040565b6106d0565b348015610278575f80fd5b50610281610759565b6040516102049190613102565b348015610299575f80fd5b506102a360535481565b604051908152602001610204565b3480156102bc575f80fd5b506102f56102cb36600461311b565b606b6020525f908152604090205463ffffffff81169064010000000090046001600160a01b031682565b6040805163ffffffff90931683526001600160a01b03909116602083015201610204565b348015610324575f80fd5b50606d54610242906001600160a01b031681565b348015610343575f80fd5b50606d5461035b90600160a01b900463ffffffff1681565b60405163ffffffff9091168152602001610204565b34801561037b575f80fd5b506102a361038a366004613144565b6107e5565b34801561039a575f80fd5b50606f54610242906001600160a01b031681565b3480156103b9575f80fd5b506102a361088e565b3480156103cd575f80fd5b506102426103dc3660046131be565b61096a565b3480156103ec575f80fd5b50610221610993565b348015610400575f80fd5b5061024261040f366004613204565b606a6020525f90815260409020546001600160a01b031681565b348015610434575f80fd5b506102816109b4565b348015610448575f80fd5b506102a361045736600461322c565b6109d3565b348015610467575f80fd5b50606c54610242906001600160a01b031681565b348015610486575f80fd5b5061024261049536600461332d565b610aa8565b3480156104a5575f80fd5b506102216104b43660046133c3565b610be7565b3480156104c4575f80fd5b5060685461035b90610100900463ffffffff1681565b3480156104e5575f80fd5b5060685461035b90600160c81b900463ffffffff1681565b348015610508575f80fd5b5061028161051736600461311b565b610cc2565b348015610527575f80fd5b506101f8610536366004613441565b610d07565b348015610546575f80fd5b50610221610555366004613472565b610d8f565b610221610568366004613556565b6112c0565b348015610578575f80fd5b50606854610242906501000000000090046001600160a01b031681565b3480156105a0575f80fd5b5061022161172c565b3480156105b4575f80fd5b506102a36105c3366004613204565b60696020525f908152604090205481565b3480156105df575f80fd5b506102216105ee366004613472565b61175f565b3480156105fe575f80fd5b5061022161060d3660046135e6565b611a25565b34801561061d575f80fd5b506101f861062c366004613689565b611d40565b606c546001600160a01b0316331461065c57604051631736745960e31b815260040160405180910390fd5b610664611d57565b565b6040805160e084901b6001600160e01b031916602080830191909152606084901b6bffffffffffffffffffffffff1916602483015282516018818403018152603890920183528151918101919091205f908152606a90915220546001600160a01b03165b92915050565b60685460ff16156106f457604051630bc011ff60e21b815260040160405180910390fd5b341580159061070d5750606f546001600160a01b031615155b15610744576040517f6f625c4000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610752858534868686611db2565b5050505050565b606e8054610766906136ce565b80601f0160208091040260200160405190810160405280929190818152602001828054610792906136ce565b80156107dd5780601f106107b4576101008083540402835291602001916107dd565b820191905f5260205f20905b8154815290600101906020018083116107c057829003601f168201915b505050505081565b6040517fff0000000000000000000000000000000000000000000000000000000000000060f889901b1660208201526001600160e01b031960e088811b821660218401526bffffffffffffffffffffffff19606089811b821660258601529188901b909216603984015285901b16603d82015260518101839052607181018290525f90609101604051602081830303815290604052805190602001209050979650505050505050565b6053545f90819081805b6020811015610961578083901c6001166001036108f557603381602081106108c2576108c2613706565b01546040805160208101929092528101859052606001604051602081830303815290604052805190602001209350610922565b60408051602081018690529081018390526060016040516020818303038152906040528051906020012093505b604080516020810184905290810183905260600160405160208183030381529060405280519060200120915080806109599061372e565b915050610898565b50919392505050565b5f61098b848461097985611e7c565b61098286611f66565b61049587612047565b949350505050565b605354606854600160c81b900463ffffffff16101561066457610664612114565b60405180611ba00160405280611b668152602001613d80611b66913981565b5f83815b6020811015610a9f57600163ffffffff8516821c81169003610a4257848160208110610a0557610a05613706565b602002013582604051602001610a25929190918252602082015260400190565b604051602081830303815290604052805190602001209150610a8d565b81858260208110610a5557610a55613706565b6020020135604051602001610a74929190918252602082015260400190565b6040516020818303038152906040528051906020012091505b80610a978161372e565b9150506109d7565b50949350505050565b6040516001600160e01b031960e087901b1660208201526bffffffffffffffffffffffff19606086901b1660248201525f9081906038016040516020818303038152906040528051906020012090505f60ff60f81b308360405180611ba00160405280611b668152602001613d80611b669139898989604051602001610b3093929190613746565b60408051601f1981840301815290829052610b4e929160200161377e565b60405160208183030381529060405280519060200120604051602001610bc394939291907fff0000000000000000000000000000000000000000000000000000000000000094909416845260609290921b6bffffffffffffffffffffffff191660018401526015830152603582015260550190565b60408051808303601f19018152919052805160209091012098975050505050505050565b60685460ff1615610c0b57604051630bc011ff60e21b815260040160405180910390fd5b606f546001600160a01b0316610c4d576040517fdde3cda700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b606f54604051632770a7eb60e21b8152336004820152602481018690526001600160a01b0390911690639dc29fac906044015f604051808303815f87803b158015610c96575f80fd5b505af1158015610ca8573d5f803e3d5ffd5b50505050610cba868686868686611db2565b505050505050565b6060610ccd82611e7c565b610cd683611f66565b610cdf84612047565b604051602001610cf193929190613746565b6040516020818303038152906040529050919050565b6068545f908190610100900463ffffffff16158015610d2c575063ffffffff83166001145b15610d3e575063ffffffff8316610d66565b610d5364010000000063ffffffff85166137ac565b610d639063ffffffff86166137c3565b90505b600881901c5f90815260696020526040902054600160ff9092169190911b908116149392505050565b60685460ff1615610db357604051630bc011ff60e21b815260040160405180910390fd5b60685463ffffffff8681166101009092041614610de3576040516302caf51760e11b815260040160405180910390fd5b610e168c8c8c8c8c610e115f8e8e8e8e8e8e8e604051610e049291906137d6565b60405180910390206107e5565b6121c2565b6001600160a01b038616610f6057606f546001600160a01b0316610efa575f6001600160a01b03851684825b6040519080825280601f01601f191660200182016040528015610e6c576020820181803683370190505b50604051610e7a91906137e5565b5f6040518083038185875af1925050503d805f8114610eb4576040519150601f19603f3d011682016040523d82523d5f602084013e610eb9565b606091505b5050905080610ef4576040517f6747a28800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b50611256565b606f546040516340c10f1960e01b81526001600160a01b03868116600483015260248201869052909116906340c10f19906044015f604051808303815f87803b158015610f45575f80fd5b505af1158015610f57573d5f803e3d5ffd5b50505050611256565b606d546001600160a01b038781169116148015610f8e5750606d5463ffffffff888116600160a01b90920416145b15610fa5575f6001600160a01b0385168482610e42565b60685463ffffffff610100909104811690881603610fd657610fd16001600160a01b0387168585612354565b611256565b6040516001600160e01b031960e089901b1660208201526bffffffffffffffffffffffff19606088901b1660248201525f9060380160408051601f1981840301815291815281516020928301205f818152606a9093529120549091506001600160a01b0316806111f5575f6110808386868080601f0160208091040260200160405190810160405280939291908181526020018383808284375f920191909152506123d592505050565b6040516340c10f1960e01b81526001600160a01b03898116600483015260248201899052919250908216906340c10f19906044015f604051808303815f87803b1580156110cb575f80fd5b505af11580156110dd573d5f803e3d5ffd5b5050505080606a5f8581526020019081526020015f205f6101000a8154816001600160a01b0302191690836001600160a01b0316021790555060405180604001604052808b63ffffffff1681526020018a6001600160a01b0316815250606b5f836001600160a01b03166001600160a01b031681526020019081526020015f205f820151815f015f6101000a81548163ffffffff021916908363ffffffff1602179055506020820151815f0160046101000a8154816001600160a01b0302191690836001600160a01b031602179055509050507f490e59a1701b938786ac72570a1efeac994a3dbe96e2e883e19e902ace6e6a398a8a8388886040516111e7959493929190613828565b60405180910390a150611253565b6040516340c10f1960e01b81526001600160a01b038781166004830152602482018790528216906340c10f19906044015f604051808303815f87803b15801561123c575f80fd5b505af115801561124e573d5f803e3d5ffd5b505050505b50505b604080518b815263ffffffff891660208201526001600160a01b0388811682840152861660608201526080810185905290517f1df3f2a973a00d6635911755c260704e95e8a5876997546798770f76396fda4d9181900360a00190a1505050505050505050505050565b60685460ff16156112e457604051630bc011ff60e21b815260040160405180910390fd5b6112ec612468565b60685463ffffffff61010090910481169088160361131d576040516302caf51760e11b815260040160405180910390fd5b5f806060876001600160a01b03881661141957883414611369576040517fb89240f500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b606d54606e80546001600160a01b0383169650600160a01b90920463ffffffff16945090611396906136ce565b80601f01602080910402602001604051908101604052809291908181526020018280546113c2906136ce565b801561140d5780601f106113e45761010080835404028352916020019161140d565b820191905f5260205f20905b8154815290600101906020018083116113f057829003601f168201915b505050505091506116a3565b3415611451576040517f798ee6f100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b606f546001600160a01b03908116908916036114c757604051632770a7eb60e21b8152336004820152602481018a90526001600160a01b03891690639dc29fac906044015f604051808303815f87803b1580156114ac575f80fd5b505af11580156114be573d5f803e3d5ffd5b505050506116a3565b6001600160a01b038089165f908152606b602090815260409182902082518084019093525463ffffffff811683526401000000009004909216918101829052901561157957604051632770a7eb60e21b8152336004820152602481018b90526001600160a01b038a1690639dc29fac906044015f604051808303815f87803b158015611551575f80fd5b505af1158015611563573d5f803e3d5ffd5b5050505080602001519450805f01519350611696565b851561158b5761158b898b89896124c1565b6040516370a0823160e01b81523060048201525f906001600160a01b038b16906370a0823190602401602060405180830381865afa1580156115cf573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906115f39190613860565b905061160a6001600160a01b038b1633308e612860565b6040516370a0823160e01b81523060048201525f906001600160a01b038c16906370a0823190602401602060405180830381865afa15801561164e573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906116729190613860565b905061167e8282613877565b6068548c9850610100900463ffffffff169650935050505b61169f89610cc2565b9250505b7f501781209a1f8899323b96b4ef08b168df93e0a90c673d1e4cce39366cb62f9b5f84868e8e86886053546040516116e298979695949392919061388a565b60405180910390a16117086117035f85878f8f8789805190602001206107e5565b6128b1565b861561171657611716612114565b5050505061172360018055565b50505050505050565b606c546001600160a01b0316331461175757604051631736745960e31b815260040160405180910390fd5b6106646129b2565b60685460ff161561178357604051630bc011ff60e21b815260040160405180910390fd5b60685463ffffffff86811661010090920416146117b3576040516302caf51760e11b815260040160405180910390fd5b6117d58c8c8c8c8c610e1160018e8e8e8e8e8e8e604051610e049291906137d6565b606f545f906001600160a01b031661188857846001600160a01b031684888a868660405160240161180994939291906138f3565b60408051601f198184030181529181526020820180516001600160e01b0316630c035af960e11b1790525161183e91906137e5565b5f6040518083038185875af1925050503d805f8114611878576040519150601f19603f3d011682016040523d82523d5f602084013e61187d565b606091505b505080915050611983565b606f546040516340c10f1960e01b81526001600160a01b03878116600483015260248201879052909116906340c10f19906044015f604051808303815f87803b1580156118d3575f80fd5b505af11580156118e5573d5f803e3d5ffd5b50505050846001600160a01b03168789858560405160240161190a94939291906138f3565b60408051601f198184030181529181526020820180516001600160e01b0316630c035af960e11b1790525161193f91906137e5565b5f604051808303815f865af19150503d805f8114611978576040519150601f19603f3d011682016040523d82523d5f602084013e61197d565b606091505b50909150505b806119ba576040517f37e391c300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b604080518c815263ffffffff8a1660208201526001600160a01b0389811682840152871660608201526080810186905290517f1df3f2a973a00d6635911755c260704e95e8a5876997546798770f76396fda4d9181900360a00190a150505050505050505050505050565b5f54610100900460ff1615808015611a4357505f54600160ff909116105b80611a5c5750303b158015611a5c57505f5460ff166001145b611ad35760405162461bcd60e51b815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a656400000000000000000000000000000000000060648201526084015b60405180910390fd5b5f805460ff191660011790558015611af4575f805461ff0019166101001790555b606880547fffffffffffffff000000000000000000000000000000000000000000000000ff1661010063ffffffff8a16027fffffffffffffff0000000000000000000000000000000000000000ffffffffff1617650100000000006001600160a01b038781169190910291909117909155606c805473ffffffffffffffffffffffffffffffffffffffff19168583161790558616611bcf5763ffffffff851615611bca576040517f1a874c1200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b611ceb565b606d805463ffffffff8716600160a01b027fffffffffffffffff0000000000000000000000000000000000000000000000009091166001600160a01b03891617179055606e611c1e8382613970565b50611cbd5f801b6012604051602001611ca991906060808252600d908201527f5772617070656420457468657200000000000000000000000000000000000000608082015260a0602082018190526004908201527f574554480000000000000000000000000000000000000000000000000000000060c082015260ff91909116604082015260e00190565b6040516020818303038152906040526123d5565b606f805473ffffffffffffffffffffffffffffffffffffffff19166001600160a01b03929092169190911790555b611cf3612a22565b8015611723575f805461ff0019169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a150505050505050565b5f81611d4d8686866109d3565b1495945050505050565b60685460ff1615611d7b57604051630bc011ff60e21b815260040160405180910390fd5b6068805460ff191660011790556040517f2261efe5aef6fedc1fd1550b25facc9181745623049c7901287030b9ad1a5497905f90a1565b60685463ffffffff610100909104811690871603611de3576040516302caf51760e11b815260040160405180910390fd5b7f501781209a1f8899323b96b4ef08b168df93e0a90c673d1e4cce39366cb62f9b6001606860019054906101000a900463ffffffff16338989898888605354604051611e3799989796959493929190613a2c565b60405180910390a1611e6e6117036001606860019054906101000a900463ffffffff16338a8a8a8989604051610e049291906137d6565b8215610cba57610cba612114565b60408051600481526024810182526020810180516001600160e01b03167f06fdde030000000000000000000000000000000000000000000000000000000017905290516060915f9182916001600160a01b03861691611edb91906137e5565b5f60405180830381855afa9150503d805f8114611f13576040519150601f19603f3d011682016040523d82523d5f602084013e611f18565b606091505b509150915081611f5d576040518060400160405280600781526020017f4e4f5f4e414d450000000000000000000000000000000000000000000000000081525061098b565b61098b81612a94565b60408051600481526024810182526020810180516001600160e01b03167f95d89b410000000000000000000000000000000000000000000000000000000017905290516060915f9182916001600160a01b03861691611fc591906137e5565b5f60405180830381855afa9150503d805f8114611ffd576040519150601f19603f3d011682016040523d82523d5f602084013e612002565b606091505b509150915081611f5d576040518060400160405280600981526020017f4e4f5f53594d424f4c000000000000000000000000000000000000000000000081525061098b565b60408051600481526024810182526020810180516001600160e01b03167f313ce5670000000000000000000000000000000000000000000000000000000017905290515f91829182916001600160a01b038616916120a591906137e5565b5f60405180830381855afa9150503d805f81146120dd576040519150601f19603f3d011682016040523d82523d5f602084013e6120e2565b606091505b50915091508180156120f5575080516020145b61210057601261098b565b8080602001905181019061098b9190613a97565b6053546068805463ffffffff909216600160c81b027fffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffffff90921691909117908190556001600160a01b0365010000000000909104166333d6247d61217561088e565b6040518263ffffffff1660e01b815260040161219391815260200190565b5f604051808303815f87803b1580156121aa575f80fd5b505af11580156121bc573d5f803e3d5ffd5b50505050565b606854604080516020808201879052818301869052825180830384018152606083019384905280519101207f257b36320000000000000000000000000000000000000000000000000000000090925260648101919091525f916501000000000090046001600160a01b03169063257b3632906084016020604051808303815f875af1158015612253573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906122779190613860565b9050805f036122b1576040517e2f6fad00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f80680100000000000000008716156122f5578691506122d3848a8489611d40565b6122f0576040516338105f3b60e21b815260040160405180910390fd5b61233f565b602087901c612305816001613ab2565b9150879250612320612318868c866109d3565b8a8389611d40565b61233d576040516338105f3b60e21b815260040160405180910390fd5b505b6123498282612c64565b505050505050505050565b6040516001600160a01b0383166024820152604481018290526123d09084907fa9059cbb00000000000000000000000000000000000000000000000000000000906064015b60408051601f198184030181529190526020810180516001600160e01b03166001600160e01b031990931692909217909152612d24565b505050565b5f8060405180611ba00160405280611b668152602001613d80611b6691398360405160200161240592919061377e565b6040516020818303038152906040529050838151602083015ff591506001600160a01b038216612461576040517fbefb092000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5092915050565b6002600154036124ba5760405162461bcd60e51b815260206004820152601f60248201527f5265656e7472616e637947756172643a207265656e7472616e742063616c6c006044820152606401611aca565b6002600155565b5f6124cf6004828486613acf565b6124d891613af6565b90507f2afa5331000000000000000000000000000000000000000000000000000000006001600160e01b03198216016126b2575f80808080808061251f896004818d613acf565b81019061252c9190613b26565b9650965096509650965096509650336001600160a01b0316876001600160a01b03161461256c5760405163912ecce760e01b815260040160405180910390fd5b6001600160a01b03861630146125955760405163750643af60e01b815260040160405180910390fd5b8a85146125ce576040517f03fffc4b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b604080516001600160a01b0389811660248301528881166044830152606482018890526084820187905260ff861660a483015260c4820185905260e48083018590528351808403909101815261010490920183526020820180516001600160e01b03167fd505accf000000000000000000000000000000000000000000000000000000001790529151918e169161266591906137e5565b5f604051808303815f865af19150503d805f811461269e576040519150601f19603f3d011682016040523d82523d5f602084013e6126a3565b606091505b50505050505050505050610752565b6001600160e01b031981166323f2ebc360e21b146126fc576040517fe282c0ba00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f808080808080806127118a6004818e613acf565b81019061271e9190613b75565b97509750975097509750975097509750336001600160a01b0316886001600160a01b0316146127605760405163912ecce760e01b815260040160405180910390fd5b6001600160a01b03871630146127895760405163750643af60e01b815260040160405180910390fd5b604080516001600160a01b038a811660248301528981166044830152606482018990526084820188905286151560a483015260ff861660c483015260e482018590526101048083018590528351808403909101815261012490920183526020820180516001600160e01b03166323f2ebc360e21b1790529151918f169161281091906137e5565b5f604051808303815f865af19150503d805f8114612849576040519150601f19603f3d011682016040523d82523d5f602084013e61284e565b606091505b50505050505050505050505050505050565b6040516001600160a01b03808516602483015283166044820152606481018290526121bc9085907f23b872dd0000000000000000000000000000000000000000000000000000000090608401612399565b8060016128c060206002613cd3565b6128ca9190613877565b60535410612904576040517fef5ccf6600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f60535f81546129139061372e565b918290555090505f5b60208110156129a3578082901c60011660010361294f57826033826020811061294757612947613706565b015550505050565b6033816020811061296257612962613706565b01546040805160208101929092528101849052606001604051602081830303815290604052805190602001209250808061299b9061372e565b91505061291c565b506123d0613cde565b60018055565b60685460ff166129ee576040517f5386698100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6068805460ff191690556040517f1e5e34eea33501aecf2ebec9fe0e884a40804275ea7fe10b2ba084c8374308b3905f90a1565b5f54610100900460ff16612a8c5760405162461bcd60e51b815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201526a6e697469616c697a696e6760a81b6064820152608401611aca565b610664612e08565b60606040825110612ab357818060200190518101906106ca9190613cf2565b8151602003612c26575f5b602081108015612b055750828181518110612adb57612adb613706565b01602001517fff000000000000000000000000000000000000000000000000000000000000001615155b15612b1c5780612b148161372e565b915050612abe565b805f03612b5e57505060408051808201909152601281527f4e4f545f56414c49445f454e434f44494e4700000000000000000000000000006020820152919050565b5f8167ffffffffffffffff811115612b7857612b78613268565b6040519080825280601f01601f191660200182016040528015612ba2576020820181803683370190505b5090505f5b82811015612c1e57848181518110612bc157612bc1613706565b602001015160f81c60f81b828281518110612bde57612bde613706565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a90535080612c168161372e565b915050612ba7565b509392505050565b505060408051808201909152601281527f4e4f545f56414c49445f454e434f44494e470000000000000000000000000000602082015290565b919050565b6068545f90610100900463ffffffff16158015612c87575063ffffffff82166001145b15612c99575063ffffffff8216612cc1565b612cae64010000000063ffffffff84166137ac565b612cbe9063ffffffff85166137c3565b90505b600881901c5f8181526069602052604081208054600160ff861690811b91821892839055929091908183169003611723576040517f646cf55800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f612d78826040518060400160405280602081526020017f5361666545524332303a206c6f772d6c6576656c2063616c6c206661696c6564815250856001600160a01b0316612e729092919063ffffffff16565b8051909150156123d05780806020019051810190612d969190613d64565b6123d05760405162461bcd60e51b815260206004820152602a60248201527f5361666545524332303a204552433230206f7065726174696f6e20646964206e60448201527f6f742073756363656564000000000000000000000000000000000000000000006064820152608401611aca565b5f54610100900460ff166129ac5760405162461bcd60e51b815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201526a6e697469616c697a696e6760a81b6064820152608401611aca565b606061098b84845f85855f80866001600160a01b03168587604051612e9791906137e5565b5f6040518083038185875af1925050503d805f8114612ed1576040519150601f19603f3d011682016040523d82523d5f602084013e612ed6565b606091505b5091509150612ee787838387612ef2565b979650505050505050565b60608315612f605782515f03612f59576001600160a01b0385163b612f595760405162461bcd60e51b815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e74726163740000006044820152606401611aca565b508161098b565b61098b8383815115612f755781518083602001fd5b8060405162461bcd60e51b8152600401611aca9190613102565b803563ffffffff81168114612c5f575f80fd5b6001600160a01b0381168114612fb6575f80fd5b50565b5f8060408385031215612fca575f80fd5b612fd383612f8f565b91506020830135612fe381612fa2565b809150509250929050565b8015158114612fb6575f80fd5b5f8083601f84011261300b575f80fd5b50813567ffffffffffffffff811115613022575f80fd5b602083019150836020828501011115613039575f80fd5b9250929050565b5f805f805f60808688031215613054575f80fd5b61305d86612f8f565b9450602086013561306d81612fa2565b9350604086013561307d81612fee565b9250606086013567ffffffffffffffff811115613098575f80fd5b6130a488828901612ffb565b969995985093965092949392505050565b5f5b838110156130cf5781810151838201526020016130b7565b50505f910152565b5f81518084526130ee8160208601602086016130b5565b601f01601f19169290920160200192915050565b602081525f61311460208301846130d7565b9392505050565b5f6020828403121561312b575f80fd5b813561311481612fa2565b60ff81168114612fb6575f80fd5b5f805f805f805f60e0888a03121561315a575f80fd5b873561316581613136565b965061317360208901612f8f565b9550604088013561318381612fa2565b945061319160608901612f8f565b935060808801356131a181612fa2565b9699959850939692959460a0840135945060c09093013592915050565b5f805f606084860312156131d0575f80fd5b6131d984612f8f565b925060208401356131e981612fa2565b915060408401356131f981612fa2565b809150509250925092565b5f60208284031215613214575f80fd5b5035919050565b8061040081018310156106ca575f80fd5b5f805f610440848603121561323f575f80fd5b83359250613250856020860161321b565b915061325f6104208501612f8f565b90509250925092565b634e487b7160e01b5f52604160045260245ffd5b604051601f8201601f1916810167ffffffffffffffff811182821017156132a5576132a5613268565b604052919050565b5f67ffffffffffffffff8211156132c6576132c6613268565b50601f01601f191660200190565b5f6132e66132e1846132ad565b61327c565b90508281528383830111156132f9575f80fd5b828260208301375f602084830101529392505050565b5f82601f83011261331e575f80fd5b613114838335602085016132d4565b5f805f805f60a08688031215613341575f80fd5b61334a86612f8f565b9450602086013561335a81612fa2565b9350604086013567ffffffffffffffff80821115613376575f80fd5b61338289838a0161330f565b94506060880135915080821115613397575f80fd5b506133a48882890161330f565b92505060808601356133b581613136565b809150509295509295909350565b5f805f805f8060a087890312156133d8575f80fd5b6133e187612f8f565b955060208701356133f181612fa2565b945060408701359350606087013561340881612fee565b9250608087013567ffffffffffffffff811115613423575f80fd5b61342f89828a01612ffb565b979a9699509497509295939492505050565b5f8060408385031215613452575f80fd5b61345b83612f8f565b915061346960208401612f8f565b90509250929050565b5f805f805f805f805f805f806109208d8f03121561348e575f80fd5b6134988e8e61321b565b9b506134a88e6104008f0161321b565b9a506108008d013599506108208d013598506108408d013597506134cf6108608e01612f8f565b96506134df6108808e0135612fa2565b6108808d013595506134f46108a08e01612f8f565b94506135046108c08e0135612fa2565b6108c08d013593506108e08d0135925067ffffffffffffffff6109008e0135111561352d575f80fd5b61353e8e6109008f01358f01612ffb565b81935080925050509295989b509295989b509295989b565b5f805f805f805f60c0888a03121561356c575f80fd5b61357588612f8f565b9650602088013561358581612fa2565b955060408801359450606088013561359c81612fa2565b935060808801356135ac81612fee565b925060a088013567ffffffffffffffff8111156135c7575f80fd5b6135d38a828b01612ffb565b989b979a50959850939692959293505050565b5f805f805f8060c087890312156135fb575f80fd5b61360487612f8f565b9550602087013561361481612fa2565b945061362260408801612f8f565b9350606087013561363281612fa2565b9250608087013561364281612fa2565b915060a087013567ffffffffffffffff81111561365d575f80fd5b8701601f8101891361366d575f80fd5b61367c898235602084016132d4565b9150509295509295509295565b5f805f80610460858703121561369d575f80fd5b843593506136ae866020870161321b565b92506136bd6104208601612f8f565b939692955092936104400135925050565b600181811c908216806136e257607f821691505b60208210810361370057634e487b7160e01b5f52602260045260245ffd5b50919050565b634e487b7160e01b5f52603260045260245ffd5b634e487b7160e01b5f52601160045260245ffd5b5f6001820161373f5761373f61371a565b5060010190565b606081525f61375860608301866130d7565b828103602084015261376a81866130d7565b91505060ff83166040830152949350505050565b5f835161378f8184602088016130b5565b8351908301906137a38183602088016130b5565b01949350505050565b80820281158282048414176106ca576106ca61371a565b808201808211156106ca576106ca61371a565b818382375f9101908152919050565b5f82516137f68184602087016130b5565b9190910192915050565b81835281816020850137505f828201602090810191909152601f909101601f19169091010190565b63ffffffff861681525f6001600160a01b03808716602084015280861660408401525060806060830152612ee7608083018486613800565b5f60208284031215613870575f80fd5b5051919050565b818103818111156106ca576106ca61371a565b5f61010060ff8b16835263ffffffff808b1660208501526001600160a01b03808b166040860152818a1660608601528089166080860152508660a08501528160c08501526138da828501876130d7565b925080851660e085015250509998505050505050505050565b6001600160a01b038516815263ffffffff84166020820152606060408201525f613921606083018486613800565b9695505050505050565b601f8211156123d0575f81815260208120601f850160051c810160208610156139515750805b601f850160051c820191505b81811015610cba5782815560010161395d565b815167ffffffffffffffff81111561398a5761398a613268565b61399e8161399884546136ce565b8461392b565b602080601f8311600181146139d1575f84156139ba5750858301515b5f19600386901b1c1916600185901b178555610cba565b5f85815260208120601f198616915b828110156139ff578886015182559484019460019091019084016139e0565b5085821015613a1c57878501515f19600388901b60f8161c191681555b5050505050600190811b01905550565b5f61010060ff8c16835263ffffffff808c1660208501526001600160a01b03808c166040860152818b166060860152808a166080860152508760a08501528160c0850152613a7d8285018789613800565b925080851660e085015250509a9950505050505050505050565b5f60208284031215613aa7575f80fd5b815161311481613136565b63ffffffff8181168382160190808211156124615761246161371a565b5f8085851115613add575f80fd5b83861115613ae9575f80fd5b5050820193919092039150565b6001600160e01b03198135818116916004851015613b1e5780818660040360031b1b83161692505b505092915050565b5f805f805f805f60e0888a031215613b3c575f80fd5b8735613b4781612fa2565b96506020880135613b5781612fa2565b9550604088013594506060880135935060808801356131a181613136565b5f805f805f805f80610100898b031215613b8d575f80fd5b8835613b9881612fa2565b97506020890135613ba881612fa2565b965060408901359550606089013594506080890135613bc681612fee565b935060a0890135613bd681613136565b979a969950949793969295929450505060c08201359160e0013590565b600181815b80851115613c2d57815f1904821115613c1357613c1361371a565b80851615613c2057918102915b93841c9390800290613bf8565b509250929050565b5f82613c43575060016106ca565b81613c4f57505f6106ca565b8160018114613c655760028114613c6f57613c8b565b60019150506106ca565b60ff841115613c8057613c8061371a565b50506001821b6106ca565b5060208310610133831016604e8410600b8410161715613cae575081810a6106ca565b613cb88383613bf3565b805f1904821115613ccb57613ccb61371a565b029392505050565b5f6131148383613c35565b634e487b7160e01b5f52600160045260245ffd5b5f60208284031215613d02575f80fd5b815167ffffffffffffffff811115613d18575f80fd5b8201601f81018413613d28575f80fd5b8051613d366132e1826132ad565b818152856020838501011115613d4a575f80fd5b613d5b8260208301602086016130b5565b95945050505050565b5f60208284031215613d74575f80fd5b815161311481612fee56fe6101006040523480156200001257600080fd5b5060405162001b6638038062001b6683398101604081905262000035916200028d565b82826003620000458382620003a1565b506004620000548282620003a1565b50503360c0525060ff811660e052466080819052620000739062000080565b60a052506200046d915050565b60007f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f620000ad6200012e565b805160209182012060408051808201825260018152603160f81b90840152805192830193909352918101919091527fc89efdaa54c0f20c7adf612882df0950f5a951637e0307cdcb4c672f298b8bc66060820152608081018390523060a082015260c001604051602081830303815290604052805190602001209050919050565b6060600380546200013f9062000312565b80601f01602080910402602001604051908101604052809291908181526020018280546200016d9062000312565b8015620001be5780601f106200019257610100808354040283529160200191620001be565b820191906000526020600020905b815481529060010190602001808311620001a057829003601f168201915b5050505050905090565b634e487b7160e01b600052604160045260246000fd5b600082601f830112620001f057600080fd5b81516001600160401b03808211156200020d576200020d620001c8565b604051601f8301601f19908116603f01168101908282118183101715620002385762000238620001c8565b816040528381526020925086838588010111156200025557600080fd5b600091505b838210156200027957858201830151818301840152908201906200025a565b600093810190920192909252949350505050565b600080600060608486031215620002a357600080fd5b83516001600160401b0380821115620002bb57600080fd5b620002c987838801620001de565b94506020860151915080821115620002e057600080fd5b50620002ef86828701620001de565b925050604084015160ff811681146200030757600080fd5b809150509250925092565b600181811c908216806200032757607f821691505b6020821081036200034857634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200039c57600081815260208120601f850160051c81016020861015620003775750805b601f850160051c820191505b81811015620003985782815560010162000383565b5050505b505050565b81516001600160401b03811115620003bd57620003bd620001c8565b620003d581620003ce845462000312565b846200034e565b602080601f8311600181146200040d5760008415620003f45750858301515b600019600386901b1c1916600185901b17855562000398565b600085815260208120601f198616915b828110156200043e578886015182559484019460019091019084016200041d565b50858210156200045d5787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b60805160a05160c05160e0516116aa620004bc6000396000610237015260008181610307015281816105c001526106a70152600061053a015260008181610379015261050401526116aa6000f3fe608060405234801561001057600080fd5b50600436106101775760003560e01c806370a08231116100d8578063a457c2d71161008c578063d505accf11610066578063d505accf1461039b578063dd62ed3e146103ae578063ffa1ad74146103f457600080fd5b8063a457c2d71461034e578063a9059cbb14610361578063cd0d00961461037457600080fd5b806395d89b41116100bd57806395d89b41146102e75780639dc29fac146102ef578063a3c573eb1461030257600080fd5b806370a08231146102915780637ecebe00146102c757600080fd5b806330adf81f1161012f5780633644e515116101145780633644e51514610261578063395093511461026957806340c10f191461027c57600080fd5b806330adf81f14610209578063313ce5671461023057600080fd5b806318160ddd1161016057806318160ddd146101bd57806320606b70146101cf57806323b872dd146101f657600080fd5b806306fdde031461017c578063095ea7b31461019a575b600080fd5b610184610430565b60405161019191906113e4565b60405180910390f35b6101ad6101a8366004611479565b6104c2565b6040519015158152602001610191565b6002545b604051908152602001610191565b6101c17f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f81565b6101ad6102043660046114a3565b6104dc565b6101c17f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c981565b60405160ff7f0000000000000000000000000000000000000000000000000000000000000000168152602001610191565b6101c1610500565b6101ad610277366004611479565b61055c565b61028f61028a366004611479565b6105a8565b005b6101c161029f3660046114df565b73ffffffffffffffffffffffffffffffffffffffff1660009081526020819052604090205490565b6101c16102d53660046114df565b60056020526000908152604090205481565b610184610680565b61028f6102fd366004611479565b61068f565b6103297f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610191565b6101ad61035c366004611479565b61075e565b6101ad61036f366004611479565b61082f565b6101c17f000000000000000000000000000000000000000000000000000000000000000081565b61028f6103a9366004611501565b61083d565b6101c16103bc366004611574565b73ffffffffffffffffffffffffffffffffffffffff918216600090815260016020908152604080832093909416825291909152205490565b6101846040518060400160405280600181526020017f310000000000000000000000000000000000000000000000000000000000000081525081565b60606003805461043f906115a7565b80601f016020809104026020016040519081016040528092919081815260200182805461046b906115a7565b80156104b85780601f1061048d576101008083540402835291602001916104b8565b820191906000526020600020905b81548152906001019060200180831161049b57829003601f168201915b5050505050905090565b6000336104d0818585610b73565b60019150505b92915050565b6000336104ea858285610d27565b6104f5858585610dfe565b506001949350505050565b60007f00000000000000000000000000000000000000000000000000000000000000004614610537576105324661106d565b905090565b507f000000000000000000000000000000000000000000000000000000000000000090565b33600081815260016020908152604080832073ffffffffffffffffffffffffffffffffffffffff871684529091528120549091906104d090829086906105a3908790611629565b610b73565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610672576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603060248201527f546f6b656e577261707065643a3a6f6e6c794272696467653a204e6f7420506f60448201527f6c79676f6e5a6b45564d4272696467650000000000000000000000000000000060648201526084015b60405180910390fd5b61067c8282611135565b5050565b60606004805461043f906115a7565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610754576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603060248201527f546f6b656e577261707065643a3a6f6e6c794272696467653a204e6f7420506f60448201527f6c79676f6e5a6b45564d427269646765000000000000000000000000000000006064820152608401610669565b61067c8282611228565b33600081815260016020908152604080832073ffffffffffffffffffffffffffffffffffffffff8716845290915281205490919083811015610822576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f45524332303a2064656372656173656420616c6c6f77616e63652062656c6f7760448201527f207a65726f0000000000000000000000000000000000000000000000000000006064820152608401610669565b6104f58286868403610b73565b6000336104d0818585610dfe565b834211156108cc576040517f08c379a0000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f546f6b656e577261707065643a3a7065726d69743a204578706972656420706560448201527f726d6974000000000000000000000000000000000000000000000000000000006064820152608401610669565b73ffffffffffffffffffffffffffffffffffffffff8716600090815260056020526040812080547f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c9918a918a918a9190866109268361163c565b9091555060408051602081019690965273ffffffffffffffffffffffffffffffffffffffff94851690860152929091166060840152608083015260a082015260c0810186905260e0016040516020818303038152906040528051906020012090506000610991610500565b6040517f19010000000000000000000000000000000000000000000000000000000000006020820152602281019190915260428101839052606201604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181528282528051602091820120600080855291840180845281905260ff89169284019290925260608301879052608083018690529092509060019060a0016020604051602081039080840390855afa158015610a55573d6000803e3d6000fd5b50506040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0015191505073ffffffffffffffffffffffffffffffffffffffff811615801590610ad057508973ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16145b610b5c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602760248201527f546f6b656e577261707065643a3a7065726d69743a20496e76616c696420736960448201527f676e6174757265000000000000000000000000000000000000000000000000006064820152608401610669565b610b678a8a8a610b73565b50505050505050505050565b73ffffffffffffffffffffffffffffffffffffffff8316610c15576040517f08c379a0000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f2061646460448201527f72657373000000000000000000000000000000000000000000000000000000006064820152608401610669565b73ffffffffffffffffffffffffffffffffffffffff8216610cb8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f20616464726560448201527f73730000000000000000000000000000000000000000000000000000000000006064820152608401610669565b73ffffffffffffffffffffffffffffffffffffffff83811660008181526001602090815260408083209487168084529482529182902085905590518481527f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92591015b60405180910390a3505050565b73ffffffffffffffffffffffffffffffffffffffff8381166000908152600160209081526040808320938616835292905220547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8114610df85781811015610deb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f45524332303a20696e73756666696369656e7420616c6c6f77616e63650000006044820152606401610669565b610df88484848403610b73565b50505050565b73ffffffffffffffffffffffffffffffffffffffff8316610ea1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f45524332303a207472616e736665722066726f6d20746865207a65726f20616460448201527f64726573730000000000000000000000000000000000000000000000000000006064820152608401610669565b73ffffffffffffffffffffffffffffffffffffffff8216610f44576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f45524332303a207472616e7366657220746f20746865207a65726f206164647260448201527f65737300000000000000000000000000000000000000000000000000000000006064820152608401610669565b73ffffffffffffffffffffffffffffffffffffffff831660009081526020819052604090205481811015610ffa576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f45524332303a207472616e7366657220616d6f756e742065786365656473206260448201527f616c616e636500000000000000000000000000000000000000000000000000006064820152608401610669565b73ffffffffffffffffffffffffffffffffffffffff848116600081815260208181526040808320878703905593871680835291849020805487019055925185815290927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a3610df8565b60007f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f611098610430565b8051602091820120604080518082018252600181527f310000000000000000000000000000000000000000000000000000000000000090840152805192830193909352918101919091527fc89efdaa54c0f20c7adf612882df0950f5a951637e0307cdcb4c672f298b8bc66060820152608081018390523060a082015260c001604051602081830303815290604052805190602001209050919050565b73ffffffffffffffffffffffffffffffffffffffff82166111b2576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f45524332303a206d696e7420746f20746865207a65726f2061646472657373006044820152606401610669565b80600260008282546111c49190611629565b909155505073ffffffffffffffffffffffffffffffffffffffff8216600081815260208181526040808320805486019055518481527fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a35050565b73ffffffffffffffffffffffffffffffffffffffff82166112cb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602160248201527f45524332303a206275726e2066726f6d20746865207a65726f2061646472657360448201527f73000000000000000000000000000000000000000000000000000000000000006064820152608401610669565b73ffffffffffffffffffffffffffffffffffffffff821660009081526020819052604090205481811015611381576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602260248201527f45524332303a206275726e20616d6f756e7420657863656564732062616c616e60448201527f63650000000000000000000000000000000000000000000000000000000000006064820152608401610669565b73ffffffffffffffffffffffffffffffffffffffff83166000818152602081815260408083208686039055600280548790039055518581529192917fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9101610d1a565b600060208083528351808285015260005b81811015611411578581018301518582016040015282016113f5565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8301168501019250505092915050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461147457600080fd5b919050565b6000806040838503121561148c57600080fd5b61149583611450565b946020939093013593505050565b6000806000606084860312156114b857600080fd5b6114c184611450565b92506114cf60208501611450565b9150604084013590509250925092565b6000602082840312156114f157600080fd5b6114fa82611450565b9392505050565b600080600080600080600060e0888a03121561151c57600080fd5b61152588611450565b965061153360208901611450565b95506040880135945060608801359350608088013560ff8116811461155757600080fd5b9699959850939692959460a0840135945060c09093013592915050565b6000806040838503121561158757600080fd5b61159083611450565b915061159e60208401611450565b90509250929050565b600181811c908216806115bb57607f821691505b6020821081036115f4577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b808201808211156104d6576104d66115fa565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361166d5761166d6115fa565b506001019056fea26469706673582212208d88fee561cff7120d381c345cfc534cef8229a272dc5809d4bbb685ad67141164736f6c63430008110033a2646970667358221220432f6d6b4446edbe1f73c19fd2115454d5c35d8b03b98a74fd46724151d7672264736f6c63430008140033", + "storage": null + }, + "0x27DAeD0badd500740762d1792F3277a7F3DAdd79": { + "contractName": "PolygonZkEVMBridge proxy", + "balance": "340282366920938463463374607431768211455", + "nonce": "1", + "code": "0x60806040526004361061005d575f3560e01c80635c60da1b116100425780635c60da1b146100a65780638f283970146100e3578063f851a440146101025761006c565b80633659cfe6146100745780634f1ef286146100935761006c565b3661006c5761006a610116565b005b61006a610116565b34801561007f575f80fd5b5061006a61008e366004610854565b610130565b61006a6100a136600461086d565b610178565b3480156100b1575f80fd5b506100ba6101eb565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b3480156100ee575f80fd5b5061006a6100fd366004610854565b610228565b34801561010d575f80fd5b506100ba610255565b61011e610282565b61012e610129610359565b610362565b565b610138610380565b73ffffffffffffffffffffffffffffffffffffffff1633036101705761016d8160405180602001604052805f8152505f6103bf565b50565b61016d610116565b610180610380565b73ffffffffffffffffffffffffffffffffffffffff1633036101e3576101de8383838080601f0160208091040260200160405190810160405280939291908181526020018383808284375f92019190915250600192506103bf915050565b505050565b6101de610116565b5f6101f4610380565b73ffffffffffffffffffffffffffffffffffffffff16330361021d57610218610359565b905090565b610225610116565b90565b610230610380565b73ffffffffffffffffffffffffffffffffffffffff1633036101705761016d816103e9565b5f61025e610380565b73ffffffffffffffffffffffffffffffffffffffff16330361021d57610218610380565b61028a610380565b73ffffffffffffffffffffffffffffffffffffffff16330361012e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f7879207461726760648201527f6574000000000000000000000000000000000000000000000000000000000000608482015260a4015b60405180910390fd5b5f61021861044a565b365f80375f80365f845af43d5f803e80801561037c573d5ff35b3d5ffd5b5f7fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b5473ffffffffffffffffffffffffffffffffffffffff16919050565b6103c883610471565b5f825111806103d45750805b156101de576103e383836104bd565b50505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f610412610380565b6040805173ffffffffffffffffffffffffffffffffffffffff928316815291841660208301520160405180910390a161016d816104e9565b5f7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc6103a3565b61047a816105f5565b60405173ffffffffffffffffffffffffffffffffffffffff8216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a250565b60606104e28383604051806060016040528060278152602001610977602791396106c0565b9392505050565b73ffffffffffffffffffffffffffffffffffffffff811661058c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201527f64647265737300000000000000000000000000000000000000000000000000006064820152608401610350565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9290921691909117905550565b73ffffffffffffffffffffffffffffffffffffffff81163b610699576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201527f6f74206120636f6e7472616374000000000000000000000000000000000000006064820152608401610350565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc6105af565b60605f808573ffffffffffffffffffffffffffffffffffffffff16856040516106e9919061090b565b5f60405180830381855af49150503d805f8114610721576040519150601f19603f3d011682016040523d82523d5f602084013e610726565b606091505b509150915061073786838387610741565b9695505050505050565b606083156107d65782515f036107cf5773ffffffffffffffffffffffffffffffffffffffff85163b6107cf576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e74726163740000006044820152606401610350565b50816107e0565b6107e083836107e8565b949350505050565b8151156107f85781518083602001fd5b806040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016103509190610926565b803573ffffffffffffffffffffffffffffffffffffffff8116811461084f575f80fd5b919050565b5f60208284031215610864575f80fd5b6104e28261082c565b5f805f6040848603121561087f575f80fd5b6108888461082c565b9250602084013567ffffffffffffffff808211156108a4575f80fd5b818601915086601f8301126108b7575f80fd5b8135818111156108c5575f80fd5b8760208285010111156108d6575f80fd5b6020830194508093505050509250925092565b5f5b838110156109035781810151838201526020016108eb565b50505f910152565b5f825161091c8184602087016108e9565b9190910192915050565b602081525f82518060208401526109448160408501602087016108e9565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016919091016040019291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564a26469706673582212202ac98acbfbb3d3ac1b74050e18c4e76db25a3ff2801ec69bf85d0c61414d502b64736f6c63430008140033", + "storage": { + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x000000000000000000000000a98ed9c842d93e9e06038a107530cb7bf975595f", + "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc": "0x0000000000000000000000006d1ed73612f56782e5c0a090ad04c41b94d10721" + } + }, + "0x6AeeF94ddD88bEe2Cb06c975bbA06beFF7f1D95E": { + "contractName": "PolygonZkEVMGlobalExitRootL2 implementation", + "balance": "0", + "nonce": "1", + "code": "0x608060405234801561000f575f80fd5b506004361061004a575f3560e01c806301fd90441461004e578063257b36321461006a57806333d6247d14610089578063a3c573eb1461009e575b5f80fd5b61005760015481565b6040519081526020015b60405180910390f35b61005761007836600461015e565b5f6020819052908152604090205481565b61009c61009736600461015e565b6100ea565b005b6100c57f00000000000000000000000027daed0badd500740762d1792f3277a7f3dadd7981565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610061565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000027daed0badd500740762d1792f3277a7f3dadd791614610159576040517fb49365dd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600155565b5f6020828403121561016e575f80fd5b503591905056fea26469706673582212205108c6c4f924146b736832a1bdf696e20d900450207b7452462368d150f2c71c64736f6c63430008140033", + "storage": null + }, + "0xa40d5f56745a118d0906a34e69aec8c0db1cb8fa": { + "contractName": "PolygonZkEVMGlobalExitRootL2 proxy", + "balance": "0", + "nonce": "1", + "code": "0x60806040523661001357610011610017565b005b6100115b61001f6101b7565b6001600160a01b0316336001600160a01b0316141561016f5760606001600160e01b031960003516631b2ce7f360e11b8114156100655761005e6101ea565b9150610167565b6001600160e01b0319811663278f794360e11b14156100865761005e610241565b6001600160e01b031981166308f2839760e41b14156100a75761005e610287565b6001600160e01b031981166303e1469160e61b14156100c85761005e6102b8565b6001600160e01b03198116635c60da1b60e01b14156100e95761005e6102f8565b60405162461bcd60e51b815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f78792074617267606482015261195d60f21b608482015260a4015b60405180910390fd5b815160208301f35b61017761030c565b565b606061019e83836040518060600160405280602781526020016108576027913961031c565b9392505050565b90565b6001600160a01b03163b151590565b60007fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b546001600160a01b0316919050565b60606101f4610394565b600061020336600481846106a2565b81019061021091906106e8565b905061022d8160405180602001604052806000815250600061039f565b505060408051602081019091526000815290565b606060008061025336600481846106a2565b8101906102609190610719565b915091506102708282600161039f565b604051806020016040528060008152509250505090565b6060610291610394565b60006102a036600481846106a2565b8101906102ad91906106e8565b905061022d816103cb565b60606102c2610394565b60006102cc6101b7565b604080516001600160a01b03831660208201529192500160405160208183030381529060405291505090565b6060610302610394565b60006102cc610422565b610177610317610422565b610431565b6060600080856001600160a01b0316856040516103399190610807565b600060405180830381855af49150503d8060008114610374576040519150601f19603f3d011682016040523d82523d6000602084013e610379565b606091505b509150915061038a86838387610455565b9695505050505050565b341561017757600080fd5b6103a8836104d3565b6000825111806103b55750805b156103c6576103c48383610179565b505b505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f6103f46101b7565b604080516001600160a01b03928316815291841660208301520160405180910390a161041f81610513565b50565b600061042c6105bc565b905090565b3660008037600080366000845af43d6000803e808015610450573d6000f35b3d6000fd5b606083156104c15782516104ba576001600160a01b0385163b6104ba5760405162461bcd60e51b815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e7472616374000000604482015260640161015e565b50816104cb565b6104cb83836105e4565b949350505050565b6104dc8161060e565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a250565b6001600160a01b0381166105785760405162461bcd60e51b815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201526564647265737360d01b606482015260840161015e565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80546001600160a01b0319166001600160a01b039290921691909117905550565b60007f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc6101db565b8151156105f45781518083602001fd5b8060405162461bcd60e51b815260040161015e9190610823565b6001600160a01b0381163b61067b5760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b606482015260840161015e565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61059b565b600080858511156106b257600080fd5b838611156106bf57600080fd5b5050820193919092039150565b80356001600160a01b03811681146106e357600080fd5b919050565b6000602082840312156106fa57600080fd5b61019e826106cc565b634e487b7160e01b600052604160045260246000fd5b6000806040838503121561072c57600080fd5b610735836106cc565b9150602083013567ffffffffffffffff8082111561075257600080fd5b818501915085601f83011261076657600080fd5b81358181111561077857610778610703565b604051601f8201601f19908116603f011681019083821181831017156107a0576107a0610703565b816040528281528860208487010111156107b957600080fd5b8260208601602083013760006020848301015280955050505050509250929050565b60005b838110156107f65781810151838201526020016107de565b838111156103c45750506000910152565b600082516108198184602087016107db565b9190910192915050565b60208152600082518060208401526108428160408501602087016107db565b601f01601f1916919091016040019291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564a264697066735822122012bb4f564f73959a03513dc74fc3c6e40e8386e6f02c16b78d6db00ce0aa16af64736f6c63430008090033", + "storage": { + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x000000000000000000000000a98ed9c842d93e9e06038a107530cb7bf975595f", + "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc": "0x0000000000000000000000006aeef94ddd88bee2cb06c975bba06beff7f1d95e" + } + }, + "0x3e779571BfCa47b12f3b6D11Da0bd62321072FA6": { + "contractName": "PolygonZkEVMTimelock", + "balance": "0", + "nonce": "1", + "code": "0x6080604052600436106101bd575f3560e01c806364d62353116100f2578063b1c5f42711610092578063d547741f11610062578063d547741f1461063a578063e38335e514610659578063f23a6e611461066c578063f27a0c92146106b0575f80fd5b8063b1c5f4271461058d578063bc197c81146105ac578063c4d252f5146105f0578063d45c44351461060f575f80fd5b80638f61f4f5116100cd5780638f61f4f5146104c557806391d14854146104f8578063a217fddf14610547578063b08e51c01461055a575f80fd5b806364d62353146104685780638065657f146104875780638f2a0bb0146104a6575f80fd5b8063248a9ca31161015d57806331d507501161013857806331d50750146103b357806336568abe146103d25780633a6aae72146103f1578063584b153e14610449575f80fd5b8063248a9ca3146103375780632ab0f529146103655780632f2ff15d14610394575f80fd5b80630d3cf6fc116101985780630d3cf6fc1461025e578063134008d31461029157806313bc9f20146102a4578063150b7a02146102c3575f80fd5b806301d5062a146101c857806301ffc9a7146101e957806307bd02651461021d575f80fd5b366101c457005b5f80fd5b3480156101d3575f80fd5b506101e76101e2366004611bf6565b6106c4565b005b3480156101f4575f80fd5b50610208610203366004611c65565b610757565b60405190151581526020015b60405180910390f35b348015610228575f80fd5b506102507fd8aa0f3194971a2a116679f7c2090f6939c8d4e01a2a8d7e41d55e5351469e6381565b604051908152602001610214565b348015610269575f80fd5b506102507f5f58e3a2316349923ce3780f8d587db2d72378aed66a8261c916544fa6846ca581565b6101e761029f366004611ca4565b6107b2565b3480156102af575f80fd5b506102086102be366004611d0b565b6108a7565b3480156102ce575f80fd5b506103066102dd366004611e28565b7f150b7a0200000000000000000000000000000000000000000000000000000000949350505050565b6040517fffffffff000000000000000000000000000000000000000000000000000000009091168152602001610214565b348015610342575f80fd5b50610250610351366004611d0b565b5f9081526020819052604090206001015490565b348015610370575f80fd5b5061020861037f366004611d0b565b5f908152600160208190526040909120541490565b34801561039f575f80fd5b506101e76103ae366004611e8c565b6108cc565b3480156103be575f80fd5b506102086103cd366004611d0b565b6108f5565b3480156103dd575f80fd5b506101e76103ec366004611e8c565b61090d565b3480156103fc575f80fd5b506104247f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610214565b348015610454575f80fd5b50610208610463366004611d0b565b6109c5565b348015610473575f80fd5b506101e7610482366004611d0b565b6109da565b348015610492575f80fd5b506102506104a1366004611ca4565b610aaa565b3480156104b1575f80fd5b506101e76104c0366004611ef7565b610ae8565b3480156104d0575f80fd5b506102507fb09aa5aeb3702cfd50b6b62bc4532604938f21248a27a1d5ca736082b6819cc181565b348015610503575f80fd5b50610208610512366004611e8c565b5f9182526020828152604080842073ffffffffffffffffffffffffffffffffffffffff93909316845291905290205460ff1690565b348015610552575f80fd5b506102505f81565b348015610565575f80fd5b506102507ffd643c72710c63c0180259aba6b2d05451e3591a24e58b62239378085726f78381565b348015610598575f80fd5b506102506105a7366004611fa0565b610d18565b3480156105b7575f80fd5b506103066105c63660046120be565b7fbc197c810000000000000000000000000000000000000000000000000000000095945050505050565b3480156105fb575f80fd5b506101e761060a366004611d0b565b610d5c565b34801561061a575f80fd5b50610250610629366004611d0b565b5f9081526001602052604090205490565b348015610645575f80fd5b506101e7610654366004611e8c565b610e56565b6101e7610667366004611fa0565b610e7a565b348015610677575f80fd5b50610306610686366004612161565b7ff23a6e610000000000000000000000000000000000000000000000000000000095945050505050565b3480156106bb575f80fd5b50610250611121565b7fb09aa5aeb3702cfd50b6b62bc4532604938f21248a27a1d5ca736082b6819cc16106ee81611200565b5f6106fd898989898989610aaa565b9050610709818461120d565b5f817f4cf4410cc57040e44862ef0f45f3dd5a5e02db8eb8add648d4b0e236f1d07dca8b8b8b8b8b8a60405161074496959493929190612208565b60405180910390a3505050505050505050565b5f7fffffffff0000000000000000000000000000000000000000000000000000000082167f4e2312e00000000000000000000000000000000000000000000000000000000014806107ac57506107ac82611359565b92915050565b5f80527fdae2aa361dfd1ca020a396615627d436107c35eff9fe7738a3512819782d70696020527f5ba6852781629bcdcd4bdaa6de76d786f1c64b16acdac474e55bebc0ea157951547fd8aa0f3194971a2a116679f7c2090f6939c8d4e01a2a8d7e41d55e5351469e639060ff1661082e5761082e81336113ef565b5f61083d888888888888610aaa565b905061084981856114a6565b610855888888886115e2565b5f817fc2617efa69bab66782fa219543714338489c4e9e178271560a91b82c3f612b588a8a8a8a60405161088c9493929190612252565b60405180910390a361089d816116e2565b5050505050505050565b5f818152600160205260408120546001811180156108c55750428111155b9392505050565b5f828152602081905260409020600101546108e681611200565b6108f0838361178a565b505050565b5f8181526001602052604081205481905b1192915050565b73ffffffffffffffffffffffffffffffffffffffff811633146109b7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602f60248201527f416363657373436f6e74726f6c3a2063616e206f6e6c792072656e6f756e636560448201527f20726f6c657320666f722073656c66000000000000000000000000000000000060648201526084015b60405180910390fd5b6109c18282611878565b5050565b5f818152600160208190526040822054610906565b333014610a69576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f54696d656c6f636b436f6e74726f6c6c65723a2063616c6c6572206d7573742060448201527f62652074696d656c6f636b00000000000000000000000000000000000000000060648201526084016109ae565b60025460408051918252602082018390527f11c24f4ead16507c69ac467fbd5e4eed5fb5c699626d2cc6d66421df253886d5910160405180910390a1600255565b5f868686868686604051602001610ac696959493929190612208565b6040516020818303038152906040528051906020012090509695505050505050565b7fb09aa5aeb3702cfd50b6b62bc4532604938f21248a27a1d5ca736082b6819cc1610b1281611200565b888714610ba1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f54696d656c6f636b436f6e74726f6c6c65723a206c656e677468206d69736d6160448201527f746368000000000000000000000000000000000000000000000000000000000060648201526084016109ae565b888514610c30576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f54696d656c6f636b436f6e74726f6c6c65723a206c656e677468206d69736d6160448201527f746368000000000000000000000000000000000000000000000000000000000060648201526084016109ae565b5f610c418b8b8b8b8b8b8b8b610d18565b9050610c4d818461120d565b5f5b8a811015610d0a5780827f4cf4410cc57040e44862ef0f45f3dd5a5e02db8eb8add648d4b0e236f1d07dca8e8e85818110610c8c57610c8c612291565b9050602002016020810190610ca191906122be565b8d8d86818110610cb357610cb3612291565b905060200201358c8c87818110610ccc57610ccc612291565b9050602002810190610cde91906122d7565b8c8b604051610cf296959493929190612208565b60405180910390a3610d0381612365565b9050610c4f565b505050505050505050505050565b5f8888888888888888604051602001610d38989796959493929190612447565b60405160208183030381529060405280519060200120905098975050505050505050565b7ffd643c72710c63c0180259aba6b2d05451e3591a24e58b62239378085726f783610d8681611200565b610d8f826109c5565b610e1b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603160248201527f54696d656c6f636b436f6e74726f6c6c65723a206f7065726174696f6e20636160448201527f6e6e6f742062652063616e63656c6c656400000000000000000000000000000060648201526084016109ae565b5f828152600160205260408082208290555183917fbaa1eb22f2a492ba1a5fea61b8df4d27c6c8b5f3971e63bb58fa14ff72eedb7091a25050565b5f82815260208190526040902060010154610e7081611200565b6108f08383611878565b5f80527fdae2aa361dfd1ca020a396615627d436107c35eff9fe7738a3512819782d70696020527f5ba6852781629bcdcd4bdaa6de76d786f1c64b16acdac474e55bebc0ea157951547fd8aa0f3194971a2a116679f7c2090f6939c8d4e01a2a8d7e41d55e5351469e639060ff16610ef657610ef681336113ef565b878614610f85576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f54696d656c6f636b436f6e74726f6c6c65723a206c656e677468206d69736d6160448201527f746368000000000000000000000000000000000000000000000000000000000060648201526084016109ae565b878414611014576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f54696d656c6f636b436f6e74726f6c6c65723a206c656e677468206d69736d6160448201527f746368000000000000000000000000000000000000000000000000000000000060648201526084016109ae565b5f6110258a8a8a8a8a8a8a8a610d18565b905061103181856114a6565b5f5b8981101561110b575f8b8b8381811061104e5761104e612291565b905060200201602081019061106391906122be565b90505f8a8a8481811061107857611078612291565b905060200201359050365f8a8a8681811061109557611095612291565b90506020028101906110a791906122d7565b915091506110b7848484846115e2565b84867fc2617efa69bab66782fa219543714338489c4e9e178271560a91b82c3f612b58868686866040516110ee9493929190612252565b60405180910390a3505050508061110490612365565b9050611033565b50611115816116e2565b50505050505050505050565b5f7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16158015906111ef57507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff166315064c966040518163ffffffff1660e01b8152600401602060405180830381865afa1580156111cb573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906111ef919061250c565b156111f957505f90565b5060025490565b61120a81336113ef565b50565b611216826108f5565b156112a3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602f60248201527f54696d656c6f636b436f6e74726f6c6c65723a206f7065726174696f6e20616c60448201527f7265616479207363686564756c6564000000000000000000000000000000000060648201526084016109ae565b6112ab611121565b81101561133a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f54696d656c6f636b436f6e74726f6c6c65723a20696e73756666696369656e7460448201527f2064656c6179000000000000000000000000000000000000000000000000000060648201526084016109ae565b611344814261252b565b5f928352600160205260409092209190915550565b5f7fffffffff0000000000000000000000000000000000000000000000000000000082167f7965db0b0000000000000000000000000000000000000000000000000000000014806107ac57507f01ffc9a7000000000000000000000000000000000000000000000000000000007fffffffff000000000000000000000000000000000000000000000000000000008316146107ac565b5f8281526020818152604080832073ffffffffffffffffffffffffffffffffffffffff8516845290915290205460ff166109c15761142c8161192d565b61143783602061194c565b604051602001611448929190612560565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152908290527f08c379a00000000000000000000000000000000000000000000000000000000082526109ae916004016125e0565b6114af826108a7565b61153b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602a60248201527f54696d656c6f636b436f6e74726f6c6c65723a206f7065726174696f6e20697360448201527f206e6f742072656164790000000000000000000000000000000000000000000060648201526084016109ae565b80158061155657505f81815260016020819052604090912054145b6109c1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f54696d656c6f636b436f6e74726f6c6c65723a206d697373696e67206465706560448201527f6e64656e6379000000000000000000000000000000000000000000000000000060648201526084016109ae565b5f8473ffffffffffffffffffffffffffffffffffffffff1684848460405161160b929190612630565b5f6040518083038185875af1925050503d805f8114611645576040519150601f19603f3d011682016040523d82523d5f602084013e61164a565b606091505b50509050806116db576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603360248201527f54696d656c6f636b436f6e74726f6c6c65723a20756e6465726c79696e67207460448201527f72616e73616374696f6e2072657665727465640000000000000000000000000060648201526084016109ae565b5050505050565b6116eb816108a7565b611777576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602a60248201527f54696d656c6f636b436f6e74726f6c6c65723a206f7065726174696f6e20697360448201527f206e6f742072656164790000000000000000000000000000000000000000000060648201526084016109ae565b5f90815260016020819052604090912055565b5f8281526020818152604080832073ffffffffffffffffffffffffffffffffffffffff8516845290915290205460ff166109c1575f8281526020818152604080832073ffffffffffffffffffffffffffffffffffffffff85168452909152902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117905561181a3390565b73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16837f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a45050565b5f8281526020818152604080832073ffffffffffffffffffffffffffffffffffffffff8516845290915290205460ff16156109c1575f8281526020818152604080832073ffffffffffffffffffffffffffffffffffffffff8516808552925280832080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016905551339285917ff6391f5c32d9c69d2a47ea670b442974b53935d1edc7fd64eb21e047a839171b9190a45050565b60606107ac73ffffffffffffffffffffffffffffffffffffffff831660145b60605f61195a83600261263f565b61196590600261252b565b67ffffffffffffffff81111561197d5761197d611d22565b6040519080825280601f01601f1916602001820160405280156119a7576020820181803683370190505b5090507f3000000000000000000000000000000000000000000000000000000000000000815f815181106119dd576119dd612291565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053507f780000000000000000000000000000000000000000000000000000000000000081600181518110611a3f57611a3f612291565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a9053505f611a7984600261263f565b611a8490600161252b565b90505b6001811115611b20577f303132333435363738396162636465660000000000000000000000000000000085600f1660108110611ac557611ac5612291565b1a60f81b828281518110611adb57611adb612291565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690815f1a90535060049490941c93611b1981612656565b9050611a87565b5083156108c5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f537472696e67733a20686578206c656e67746820696e73756666696369656e7460448201526064016109ae565b803573ffffffffffffffffffffffffffffffffffffffff81168114611bac575f80fd5b919050565b5f8083601f840112611bc1575f80fd5b50813567ffffffffffffffff811115611bd8575f80fd5b602083019150836020828501011115611bef575f80fd5b9250929050565b5f805f805f805f60c0888a031215611c0c575f80fd5b611c1588611b89565b965060208801359550604088013567ffffffffffffffff811115611c37575f80fd5b611c438a828b01611bb1565b989b979a50986060810135976080820135975060a09091013595509350505050565b5f60208284031215611c75575f80fd5b81357fffffffff00000000000000000000000000000000000000000000000000000000811681146108c5575f80fd5b5f805f805f8060a08789031215611cb9575f80fd5b611cc287611b89565b955060208701359450604087013567ffffffffffffffff811115611ce4575f80fd5b611cf089828a01611bb1565b979a9699509760608101359660809091013595509350505050565b5f60208284031215611d1b575f80fd5b5035919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715611d9657611d96611d22565b604052919050565b5f82601f830112611dad575f80fd5b813567ffffffffffffffff811115611dc757611dc7611d22565b611df860207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601611d4f565b818152846020838601011115611e0c575f80fd5b816020850160208301375f918101602001919091529392505050565b5f805f8060808587031215611e3b575f80fd5b611e4485611b89565b9350611e5260208601611b89565b925060408501359150606085013567ffffffffffffffff811115611e74575f80fd5b611e8087828801611d9e565b91505092959194509250565b5f8060408385031215611e9d575f80fd5b82359150611ead60208401611b89565b90509250929050565b5f8083601f840112611ec6575f80fd5b50813567ffffffffffffffff811115611edd575f80fd5b6020830191508360208260051b8501011115611bef575f80fd5b5f805f805f805f805f60c08a8c031215611f0f575f80fd5b893567ffffffffffffffff80821115611f26575f80fd5b611f328d838e01611eb6565b909b50995060208c0135915080821115611f4a575f80fd5b611f568d838e01611eb6565b909950975060408c0135915080821115611f6e575f80fd5b50611f7b8c828d01611eb6565b9a9d999c50979a969997986060880135976080810135975060a0013595509350505050565b5f805f805f805f8060a0898b031215611fb7575f80fd5b883567ffffffffffffffff80821115611fce575f80fd5b611fda8c838d01611eb6565b909a50985060208b0135915080821115611ff2575f80fd5b611ffe8c838d01611eb6565b909850965060408b0135915080821115612016575f80fd5b506120238b828c01611eb6565b999c989b509699959896976060870135966080013595509350505050565b5f82601f830112612050575f80fd5b8135602067ffffffffffffffff82111561206c5761206c611d22565b8160051b61207b828201611d4f565b9283528481018201928281019087851115612094575f80fd5b83870192505b848310156120b35782358252918301919083019061209a565b979650505050505050565b5f805f805f60a086880312156120d2575f80fd5b6120db86611b89565b94506120e960208701611b89565b9350604086013567ffffffffffffffff80821115612105575f80fd5b61211189838a01612041565b94506060880135915080821115612126575f80fd5b61213289838a01612041565b93506080880135915080821115612147575f80fd5b5061215488828901611d9e565b9150509295509295909350565b5f805f805f60a08688031215612175575f80fd5b61217e86611b89565b945061218c60208701611b89565b93506040860135925060608601359150608086013567ffffffffffffffff8111156121b5575f80fd5b61215488828901611d9e565b81835281816020850137505f602082840101525f60207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b73ffffffffffffffffffffffffffffffffffffffff8716815285602082015260a060408201525f61223d60a0830186886121c1565b60608301949094525060800152949350505050565b73ffffffffffffffffffffffffffffffffffffffff85168152836020820152606060408201525f6122876060830184866121c1565b9695505050505050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52603260045260245ffd5b5f602082840312156122ce575f80fd5b6108c582611b89565b5f8083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe184360301811261230a575f80fd5b83018035915067ffffffffffffffff821115612324575f80fd5b602001915036819003821315611bef575f80fd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361239557612395612338565b5060010190565b8183525f6020808501808196508560051b81019150845f5b8781101561243a57828403895281357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18836030181126123f2575f80fd5b8701858101903567ffffffffffffffff81111561240d575f80fd5b80360382131561241b575f80fd5b6124268682846121c1565b9a87019a95505050908401906001016123b4565b5091979650505050505050565b60a080825281018890525f8960c08301825b8b8110156124945773ffffffffffffffffffffffffffffffffffffffff61247f84611b89565b16825260209283019290910190600101612459565b5083810360208501528881527f07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8911156124cc575f80fd5b8860051b9150818a602083013701828103602090810160408501526124f4908201878961239c565b60608401959095525050608001529695505050505050565b5f6020828403121561251c575f80fd5b815180151581146108c5575f80fd5b808201808211156107ac576107ac612338565b5f5b83811015612558578181015183820152602001612540565b50505f910152565b7f416363657373436f6e74726f6c3a206163636f756e742000000000000000000081525f835161259781601785016020880161253e565b7f206973206d697373696e6720726f6c652000000000000000000000000000000060179184019182015283516125d481602884016020880161253e565b01602801949350505050565b602081525f82518060208401526125fe81604085016020870161253e565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169190910160400192915050565b818382375f9101908152919050565b80820281158282048414176107ac576107ac612338565b5f8161266457612664612338565b507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff019056fea2646970667358221220e28ae7494480ab1c619fd775dc5ff665588c808a910d66178a982c2e7c76a1e664736f6c63430008140033", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000000000000000000000000e10", + "0x580ea523c817b7b0a491684930b2d0bbbd7f9820fbc448b3f67c1876abe58519": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0xc2e9d10796f7171cc61e57d07d60529d24d3e40996fa4e22cfedd42afda9b3bb": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x64494413541ff93b31aa309254e3fed72a7456e9845988b915b4c7a7ceba8814": "0x5f58e3a2316349923ce3780f8d587db2d72378aed66a8261c916544fa6846ca5", + "0xf3f956d9198defa1c0b1f8072a9758bad31b030b8fae5436d2f2fe3651065fd2": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x3412d5605ac6cd444957cedb533e5dacad6378b4bc819ebe3652188a665066d6": "0x5f58e3a2316349923ce3780f8d587db2d72378aed66a8261c916544fa6846ca5", + "0x081cd28f65cd36f0edaf0cb7b44bda2f3e93dd0058c257031739a015dca687c3": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0xdae2aa361dfd1ca020a396615627d436107c35eff9fe7738a3512819782d706a": "0x5f58e3a2316349923ce3780f8d587db2d72378aed66a8261c916544fa6846ca5", + "0xe81fd704e480aa1f75c10c8e4e206f915bcbdae96aa426229f2a9f2ade9ccaa4": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0xc3ad33e20b0c56a223ad5104fff154aa010f8715b9c981fd38fdc60a4d1a52fc": "0x5f58e3a2316349923ce3780f8d587db2d72378aed66a8261c916544fa6846ca5" + } + }, + "0xF7F76004Ac4660267040e7fa2A0Fbe8e74B9Bb3B": { + "contractName": null, + "balance": "0", + "nonce": "1", + "code": null, + "storage": null + }, + "0xe859276098f208D003ca6904C6cC26629Ee364Ce": { + "contractName": null, + "balance": "100000000000000000000000", + "nonce": "8", + "code": null, + "storage": null + } +} \ No newline at end of file diff --git a/zk/tests/nightly-l1-recovery/dynamic-integration8-chainspec.json b/zk/tests/nightly-l1-recovery/dynamic-integration8-chainspec.json new file mode 100644 index 00000000000..2af8897bdd8 --- /dev/null +++ b/zk/tests/nightly-l1-recovery/dynamic-integration8-chainspec.json @@ -0,0 +1,24 @@ +{ + "ChainName": "dynamic-integration8", + "chainId": 779, + "consensus": "ethash", + "homesteadBlock": 0, + "daoForkBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 9999999999999999999999999999999999999999999999999, + "arrowGlacierBlock": 9999999999999999999999999999999999999999999999999, + "grayGlacierBlock": 9999999999999999999999999999999999999999999999999, + "terminalTotalDifficulty": 58750000000000000000000, + "terminalTotalDifficultyPassed": false, + "shanghaiTime": 9999999999999999999999999999999999999999999999999, + "cancunTime": 9999999999999999999999999999999999999999999999999, + "pragueTime": 9999999999999999999999999999999999999999999999999, + "ethash": {} +} \ No newline at end of file diff --git a/zk/tests/nightly-l1-recovery/dynamic-integration8-conf.json b/zk/tests/nightly-l1-recovery/dynamic-integration8-conf.json new file mode 100644 index 00000000000..d829a9204f2 --- /dev/null +++ b/zk/tests/nightly-l1-recovery/dynamic-integration8-conf.json @@ -0,0 +1,6 @@ +{ + "root": "0xea8d1e415c7615b49413cb381afef93a0f1cd45c9b179af5fba9b90134d04966", + "timestamp": 1722454116, + "gasLimit": 0, + "difficulty": 0 +} \ No newline at end of file diff --git a/zk/tests/nightly-l1-recovery/network8-config.yaml b/zk/tests/nightly-l1-recovery/network8-config.yaml new file mode 100644 index 00000000000..4d035185ee2 --- /dev/null +++ b/zk/tests/nightly-l1-recovery/network8-config.yaml @@ -0,0 +1,45 @@ +datadir : './datadir' +chain : "dynamic-integration8" +http : true +private.api.addr : "localhost:9096" +zkevm.l2-chain-id: 779 +zkevm.l2-sequencer-rpc-url: "http://34.175.214.161:18123" +zkevm.l2-datastreamer-url: "34.175.214.161:16900" +zkevm.l1-chain-id: 11155111 +zkevm.l1-rpc-url: "http://cache:6969?endpoint=https://rpc.sepolia.org&chainid=779" + +zkevm.address-sequencer: "0x153724F17B1eb206e31CAbA82f6b45E865879D94" +zkevm.address-zkevm: "0xA24686d989DCd70fBb4D8311694820d74872f061" +zkevm.address-admin: "0xe859276098f208D003ca6904C6cC26629Ee364Ce" +zkevm.address-rollup: "0xeE6F5B532b67ee594B372f7a3eBD276A45Ea6777" +zkevm.address-ger-manager: "0x33ff0546a9ce00D9b2B43Fe52Eab336D919eAD36" + +zkevm.l1-matic-contract-address: "0xdC66C280f5E8bBbd2F2d92FaD1489863c8F55915" +zkevm.l1-block-range: 20000 +zkevm.l1-query-delay: 6000 +zkevm.l1-first-block: 6411787 +zkevm.rpc-ratelimit: 250 +zkevm.data-stream-port: 6900 +zkevm.datastream-version: 2 +zkevm.data-stream-host: "127.0.0.1" +# zkevm.sequencer-initial-fork-id: 9 +zkevm.executor-strict: false +zkevm.witness-full: false +zkevm.sequencer-block-seal-time: "5s" +zkevm.sequencer-batch-seal-time: "15m" +zkevm.allow-pre-eip155-transactions: true +zkevm.disable-virtual-counters: true + +log.console.verbosity: warn + +zkevm.l1-sync-start-block: 6032365 + +externalcl: true +http.api : ["eth","debug","net","trace","web3","erigon","txpool","zkevm"] +http.addr: "0.0.0.0" +http.port: 8123 +http.vhosts: '*' +http.corsdomain: '*' + +ws: true +rpc.batch.limit: 500 diff --git a/zk/tests/nightly-l1-recovery/network8-sync-config.yaml b/zk/tests/nightly-l1-recovery/network8-sync-config.yaml new file mode 100644 index 00000000000..5a5a7aee8b9 --- /dev/null +++ b/zk/tests/nightly-l1-recovery/network8-sync-config.yaml @@ -0,0 +1,43 @@ +datadir : './datadir' +chain : "dynamic-integration8" +http : true +private.api.addr : "localhost:9096" +zkevm.l2-chain-id: 779 +zkevm.l2-sequencer-rpc-url: "http://erigon:8545" +zkevm.l2-datastreamer-url: "erigon:6900" +zkevm.l1-chain-id: 11155111 +zkevm.l1-rpc-url: "http://cache:6969?endpoint=https://rpc.sepolia.org&chainid=779" + +zkevm.address-sequencer: "0x153724F17B1eb206e31CAbA82f6b45E865879D94" +zkevm.address-zkevm: "0xA24686d989DCd70fBb4D8311694820d74872f061" +zkevm.address-admin: "0xe859276098f208D003ca6904C6cC26629Ee364Ce" +zkevm.address-rollup: "0xeE6F5B532b67ee594B372f7a3eBD276A45Ea6777" +zkevm.address-ger-manager: "0x33ff0546a9ce00D9b2B43Fe52Eab336D919eAD36" + +zkevm.l1-matic-contract-address: "0xdC66C280f5E8bBbd2F2d92FaD1489863c8F55915" +zkevm.l1-block-range: 20000 +zkevm.l1-query-delay: 6000 +zkevm.l1-first-block: 6411787 +zkevm.rpc-ratelimit: 250 +#zkevm.data-stream-port: 6900 +zkevm.datastream-version: 2 +#zkevm.data-stream-host: "127.0.0.1" +# zkevm.sequencer-initial-fork-id: 9 +zkevm.executor-strict: false +zkevm.witness-full: false +zkevm.sequencer-block-seal-time: "5s" +zkevm.sequencer-batch-seal-time: "15m" +zkevm.allow-pre-eip155-transactions: true +zkevm.disable-virtual-counters: true + +txpool.disable: true + +externalcl: true +http.api : ["eth","debug","net","trace","web3","erigon","txpool","zkevm"] +http.addr: "0.0.0.0" +http.port: 8123 +http.vhosts: '*' +http.corsdomain: '*' + +ws: true +rpc.batch.limit: 500 diff --git a/zk/tests/zk_counters_test.go b/zk/tests/zk_counters_test.go index f4be1f2a36a..dcc2f38d8e0 100644 --- a/zk/tests/zk_counters_test.go +++ b/zk/tests/zk_counters_test.go @@ -348,6 +348,8 @@ func runTest(t *testing.T, blockReader services.FullBlockReader, test vector, er if err = txCounters.ProcessTx(ibs, result.ReturnData); err != nil { t.Fatal(err) } + + batchCollector.UpdateExecutionAndProcessingCountersCache(txCounters) } } diff --git a/zk/txpool/pool.go b/zk/txpool/pool.go index 602cf06ec56..96e7b469957 100644 --- a/zk/txpool/pool.go +++ b/zk/txpool/pool.go @@ -212,18 +212,17 @@ func (r DiscardReason) String() string { // metaTx holds transaction and some metadata type metaTx struct { - Tx *types.TxSlot - minFeeCap uint256.Int - nonceDistance uint64 // how far their nonces are from the state's nonce for the sender - cumulativeBalanceDistance uint64 // how far their cumulativeRequiredBalance are from the state's balance for the sender - minTip uint64 - bestIndex int - worstIndex int - timestamp uint64 // when it was added to pool - subPool SubPoolMarker - currentSubPool SubPoolType - alreadyYielded bool - overflowZkCountersDuringExecution bool + Tx *types.TxSlot + minFeeCap uint256.Int + nonceDistance uint64 // how far their nonces are from the state's nonce for the sender + cumulativeBalanceDistance uint64 // how far their cumulativeRequiredBalance are from the state's balance for the sender + minTip uint64 + bestIndex int + worstIndex int + timestamp uint64 // when it was added to pool + subPool SubPoolMarker + currentSubPool SubPoolType + alreadyYielded bool } func newMetaTx(slot *types.TxSlot, isLocal bool, timestmap uint64) *metaTx { @@ -304,6 +303,7 @@ type TxPool struct { newPendingTxs chan types.Announcements // notifications about new txs in Pending sub-pool all *BySenderAndNonce // senderID => (sorted map of tx nonce => *metaTx) deletedTxs []*metaTx // list of discarded txs since last db commit + overflowZkCounters []*metaTx promoted types.Announcements cfg txpoolcfg.Config chainID uint256.Int @@ -327,6 +327,13 @@ type TxPool struct { limbo *Limbo } +func CreateTxPoolBuckets(tx kv.RwTx) error { + if err := tx.CreateBucket(TablePoolLimbo); err != nil { + return err + } + return nil +} + func New(newTxs chan types.Announcements, coreDB kv.RoDB, cfg txpoolcfg.Config, ethCfg *ethconfig.Config, cache kvcache.Cache, chainID uint256.Int, shanghaiTime *big.Int, londonBlock *big.Int, aclDB kv.RwDB) (*TxPool, error) { var err error localsHistory, err := simplelru.NewLRU[string, struct{}](10_000, nil) @@ -411,14 +418,7 @@ func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChang return err } - var baseFee uint64 - if !p.ethCfg.AllowFreeTransactions { - baseFee = stateChanges.PendingBlockBaseFee - } else { - baseFee = uint64(0) - } - - pendingBaseFee, baseFeeChanged := p.setBaseFee(baseFee) + pendingBaseFee, baseFeeChanged := p.setBaseFee(stateChanges.PendingBlockBaseFee, p.ethCfg.AllowFreeTransactions) // Update pendingBase for all pool queues and slices if baseFeeChanged { p.pending.best.pendingBaseFee = pendingBaseFee @@ -468,7 +468,7 @@ func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChang //log.Debug("[txpool] new block", "unwinded", len(unwindTxs.txs), "mined", len(minedTxs.txs), "baseFee", baseFee, "blockHeight", blockHeight) - announcements, err := addTxsOnNewBlock( + announcements, err := p.addTxsOnNewBlock( blockNum, cacheView, stateChanges, @@ -492,7 +492,6 @@ func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChang p.pending.EnforceWorstInvariants() p.baseFee.EnforceInvariants() p.queued.EnforceInvariants() - promoteZk(p.pending, p.baseFee, p.queued, pendingBaseFee, p.discardLocked, &announcements) p.pending.EnforceBestInvariants() p.promoted.Reset() p.promoted.AppendOther(announcements) @@ -558,7 +557,7 @@ func (p *TxPool) processRemoteTxs(ctx context.Context) error { return err } - announcements, _, err := addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, newTxs, + announcements, _, err := p.addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, newTxs, p.pendingBaseFee.Load(), p.blockGasLimit.Load(), p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked, true) if err != nil { return err @@ -944,7 +943,7 @@ func (p *TxPool) AddLocalTxs(ctx context.Context, newTransactions types.TxSlots, return nil, err } - announcements, addReasons, err := addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, newTxs, + announcements, addReasons, err := p.addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, newTxs, p.pendingBaseFee.Load(), p.blockGasLimit.Load(), p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked, true) if err == nil { for i, reason := range addReasons { @@ -989,7 +988,7 @@ func (p *TxPool) cache() kvcache.Cache { return p._stateCache } -func addTxs(blockNum uint64, cacheView kvcache.CacheView, senders *sendersBatch, +func (p *TxPool) addTxs(blockNum uint64, cacheView kvcache.CacheView, senders *sendersBatch, newTxs types.TxSlots, pendingBaseFee, blockGasLimit uint64, pending *PendingPool, baseFee, queued *SubPool, byNonce *BySenderAndNonce, byHash map[string]*metaTx, add func(*metaTx, *types.Announcements) DiscardReason, discard func(*metaTx, DiscardReason), collect bool) (types.Announcements, []DiscardReason, error) { @@ -1034,24 +1033,44 @@ func addTxs(blockNum uint64, cacheView kvcache.CacheView, senders *sendersBatch, sendersWithChangedState[mt.Tx.SenderID] = struct{}{} } + for _, mt := range p.overflowZkCounters { + pending.Remove(mt) + discard(mt, OverflowZkCounters) + sendersWithChangedState[mt.Tx.SenderID] = struct{}{} + } + p.overflowZkCounters = p.overflowZkCounters[:0] + for senderID := range sendersWithChangedState { nonce, balance, err := senders.info(cacheView, senderID) if err != nil { return announcements, discardReasons, err } - onSenderStateChange(senderID, nonce, balance, byNonce, + p.onSenderStateChange(senderID, nonce, balance, byNonce, protocolBaseFee, blockGasLimit, pending, baseFee, queued, discard) } - promoteZk(pending, baseFee, queued, pendingBaseFee, discard, &announcements) - pending.EnforceBestInvariants() + promote(pending, baseFee, queued, pendingBaseFee, discard, &announcements) return announcements, discardReasons, nil } -func addTxsOnNewBlock(blockNum uint64, cacheView kvcache.CacheView, stateChanges *remote.StateChangeBatch, - senders *sendersBatch, newTxs types.TxSlots, pendingBaseFee uint64, blockGasLimit uint64, - pending *PendingPool, baseFee, queued *SubPool, - byNonce *BySenderAndNonce, byHash map[string]*metaTx, sendersWithChangedStateBeforeLimboTrim *LimboSendersWithChangedState, add func(*metaTx, *types.Announcements) DiscardReason, discard func(*metaTx, DiscardReason)) (types.Announcements, error) { + +func (p *TxPool) addTxsOnNewBlock( + blockNum uint64, + cacheView kvcache.CacheView, + stateChanges *remote.StateChangeBatch, + senders *sendersBatch, + newTxs types.TxSlots, + pendingBaseFee uint64, + blockGasLimit uint64, + pending *PendingPool, + baseFee, + queued *SubPool, + byNonce *BySenderAndNonce, + byHash map[string]*metaTx, + sendersWithChangedStateBeforeLimboTrim *LimboSendersWithChangedState, + add func(*metaTx, *types.Announcements) DiscardReason, + discard func(*metaTx, DiscardReason), +) (types.Announcements, error) { protocolBaseFee := calcProtocolBaseFee(pendingBaseFee) if assert.Enable { for _, txn := range newTxs.Txs { @@ -1109,22 +1128,40 @@ func addTxsOnNewBlock(blockNum uint64, cacheView kvcache.CacheView, stateChanges } } + for _, mt := range p.overflowZkCounters { + pending.Remove(mt) + discard(mt, OverflowZkCounters) + sendersWithChangedState[mt.Tx.SenderID] = struct{}{} + } + p.overflowZkCounters = p.overflowZkCounters[:0] + for senderID := range sendersWithChangedState { nonce, balance, err := senders.info(cacheView, senderID) if err != nil { return announcements, err } - onSenderStateChange(senderID, nonce, balance, byNonce, + p.onSenderStateChange(senderID, nonce, balance, byNonce, protocolBaseFee, blockGasLimit, pending, baseFee, queued, discard) } + promote(pending, baseFee, queued, pendingBaseFee, discard, &announcements) + return announcements, nil } -func (p *TxPool) setBaseFee(baseFee uint64) (uint64, bool) { +func (p *TxPool) setBaseFee(baseFee uint64, allowFreeTransactions bool) (uint64, bool) { changed := false changed = baseFee != p.pendingBaseFee.Load() p.pendingBaseFee.Store(baseFee) + if allowFreeTransactions { + changed = uint64(0) != p.pendingBaseFee.Load() + p.pendingBaseFee.Store(0) + return 0, changed + } + + changed = baseFee != p.pendingBaseFee.Load() + p.pendingBaseFee.Store(baseFee) + return p.pendingBaseFee.Load(), changed } @@ -1162,6 +1199,9 @@ func (p *TxPool) addLocked(mt *metaTx, announcements *types.Announcements) Disca } p.discardLocked(found, ReplacedByHigherTip) + } else if p.pending.IsFull() { + // new transaction will be denied if pending pool is full unless it will replace an old transaction + return PendingPoolOverflow } p.byHash[string(mt.Tx.IDHash[:])] = mt @@ -1589,6 +1629,11 @@ func (p *TxPool) fromDB(ctx context.Context, tx kv.Tx, coreTx kv.Tx) error { if err != nil { return err } + + if err = p.fromDBLimbo(ctx, tx, cacheView); err != nil { + return err + } + it, err := tx.Range(kv.RecentLocalTransaction, nil, nil) if err != nil { return err @@ -1632,7 +1677,7 @@ func (p *TxPool) fromDB(ctx context.Context, tx kv.Tx, coreTx kv.Tx) error { isLocalTx := p.isLocalLRU.Contains(string(k)) if reason := p.validateTx(txn, isLocalTx, cacheView, addr); reason != NotSet && reason != Success { - return nil + continue } txs.Resize(uint(i + 1)) txs.Txs[i] = txn @@ -1655,16 +1700,12 @@ func (p *TxPool) fromDB(ctx context.Context, tx kv.Tx, coreTx kv.Tx) error { if err != nil { return err } - if _, _, err := addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, txs, + if _, _, err := p.addTxs(p.lastSeenBlock.Load(), cacheView, p.senders, txs, pendingBaseFee, math.MaxUint64 /* blockGasLimit */, p.pending, p.baseFee, p.queued, p.all, p.byHash, p.addLocked, p.discardLocked, false); err != nil { return err } p.pendingBaseFee.Store(pendingBaseFee) - if err = p.fromDBLimbo(ctx, tx, cacheView); err != nil { - return err - } - return nil } func LastSeenBlock(tx kv.Getter) (uint64, error) { @@ -2082,10 +2123,11 @@ func (b *BySenderAndNonce) replaceOrInsert(mt *metaTx) *metaTx { // It's more expensive to maintain "slice sort" invariant, but it allow do cheap copy of // pending.best slice for mining (because we consider txs and metaTx are immutable) type PendingPool struct { - best *bestSlice - worst *WorstQueue - limit int - t SubPoolType + sorted bool // means `PendingPool.best` is sorted or not + best *bestSlice + worst *WorstQueue + limit int + t SubPoolType } func NewPendingSubPool(t SubPoolType, limit int) *PendingPool { @@ -2122,7 +2164,10 @@ func (p *PendingPool) EnforceWorstInvariants() { heap.Init(p.worst) } func (p *PendingPool) EnforceBestInvariants() { - sort.Sort(p.best) + if !p.sorted { + sort.Sort(p.best) + p.sorted = true + } } func (p *PendingPool) Best() *metaTx { //nolint @@ -2147,8 +2192,8 @@ func (p *PendingPool) PopWorst() *metaTx { //nolint func (p *PendingPool) Updated(mt *metaTx) { heap.Fix(p.worst, mt.worstIndex) } -func (p *PendingPool) Len() int { return len(p.best.ms) } - +func (p *PendingPool) Len() int { return len(p.best.ms) } +func (p *PendingPool) IsFull() bool { return p.Len() >= p.limit } func (p *PendingPool) Remove(i *metaTx) { if i.worstIndex >= 0 { heap.Remove(p.worst, i.worstIndex) @@ -2156,6 +2201,9 @@ func (p *PendingPool) Remove(i *metaTx) { if i.bestIndex >= 0 { p.best.UnsafeRemove(i) } + if i.bestIndex != p.best.Len()-1 { + p.sorted = false + } i.currentSubPool = 0 } @@ -2166,6 +2214,7 @@ func (p *PendingPool) Add(i *metaTx) { i.currentSubPool = p.t heap.Push(p.worst, i) p.best.UnsafeAdd(i) + p.sorted = false } func (p *PendingPool) DebugPrint(prefix string) { for i, it := range p.best.ms { diff --git a/zk/txpool/pool_zk.go b/zk/txpool/pool_zk.go index 740842ea8a0..ce106ed1188 100644 --- a/zk/txpool/pool_zk.go +++ b/zk/txpool/pool_zk.go @@ -2,6 +2,7 @@ package txpool import ( "bytes" + "context" "fmt" mapset "github.com/deckarep/golang-set/v2" @@ -13,8 +14,8 @@ import ( "github.com/ledgerwatch/erigon-lib/types" types2 "github.com/ledgerwatch/erigon-lib/types" "github.com/ledgerwatch/erigon/common/math" - "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/zk/utils" + "github.com/ledgerwatch/log/v3" ) /* @@ -34,7 +35,7 @@ func calcProtocolBaseFee(baseFee uint64) uint64 { // which sub pool they will need to go to. Sice this depends on other transactions from the same sender by with lower // nonces, and also affect other transactions from the same sender with higher nonce, it loops through all transactions // for a given senderID -func onSenderStateChange(senderID uint64, senderNonce uint64, senderBalance uint256.Int, byNonce *BySenderAndNonce, +func (p *TxPool) onSenderStateChange(senderID uint64, senderNonce uint64, senderBalance uint256.Int, byNonce *BySenderAndNonce, protocolBaseFee, blockGasLimit uint64, pending *PendingPool, baseFee, queued *SubPool, discard func(*metaTx, DiscardReason)) { noGapsNonce := senderNonce cumulativeRequiredBalance := uint256.NewInt(0) @@ -170,6 +171,8 @@ func (p *TxPool) best(n uint16, txs *types.TxsRlp, tx kv.Tx, onTopOf, availableG var toRemove []*metaTx count := 0 + p.pending.EnforceBestInvariants() + for i := 0; count < int(n) && i < len(best.ms); i++ { // if we wouldn't have enough gas for a standard transaction then quit out early if availableGas < fixedgas.TxGas { @@ -257,29 +260,26 @@ func (p *TxPool) MarkForDiscardFromPendingBest(txHash common.Hash) { for i := 0; i < len(best.ms); i++ { mt := best.ms[i] if bytes.Equal(mt.Tx.IDHash[:], txHash[:]) { - mt.overflowZkCountersDuringExecution = true + p.overflowZkCounters = append(p.overflowZkCounters, mt) break } } } -// Discard a metaTx from the best pending pool if it has overflow the zk-counters during execution -func promoteZk(pending *PendingPool, baseFee, queued *SubPool, pendingBaseFee uint64, discard func(*metaTx, DiscardReason), announcements *types.Announcements) { - invalidMts := []*metaTx{} - - for i := 0; i < len(pending.best.ms); i++ { - mt := pending.best.ms[i] - if mt.overflowZkCountersDuringExecution { - invalidMts = append(invalidMts, mt) +func (p *TxPool) StartIfNotStarted(ctx context.Context, txPoolDb kv.RoDB, coreTx kv.Tx) error { + if !p.started.Load() { + txPoolDbTx, err := txPoolDb.BeginRo(ctx) + if err != nil { + return err } - } + defer txPoolDbTx.Rollback() - for _, mt := range invalidMts { - pending.Remove(mt) - discard(mt, OverflowZkCounters) + if err := p.fromDB(ctx, txPoolDbTx, coreTx); err != nil { + return fmt.Errorf("loading txs from DB: %w", err) + } } - promote(pending, baseFee, queued, pendingBaseFee, discard, announcements) + return nil } func markAsLocal(txSlots *types2.TxSlots) { diff --git a/zk/txpool/pool_zk_limbo.go b/zk/txpool/pool_zk_limbo.go index 8e72ca86f41..a18931259d1 100644 --- a/zk/txpool/pool_zk_limbo.go +++ b/zk/txpool/pool_zk_limbo.go @@ -532,9 +532,14 @@ func (p *TxPool) fromDBLimbo(ctx context.Context, tx kv.Tx, cacheView kvcache.Ca txn.SenderID, txn.Traced = p.senders.getOrCreateID(addr) binary.BigEndian.Uint64(v) - if reason := p.validateTx(txn, true, cacheView, addr); reason != NotSet && reason != Success { - return nil - } + // ValidateTx function validates a tx against current network state. + // Limbo transactions are expected to be invalid according to current network state. + // That's why there is no point to check it while recovering the pool from a database. + // These transactions may become valid after some of the current tx in the pool are executed + // so leave the decision whether a limbo transaction (or any other transaction that has been unwound) to the execution stage. + // if reason := p.validateTx(txn, true, cacheView, addr); reason != NotSet && reason != Success { + // return nil + // } p.limbo.limboSlots.Append(txn, addr[:], true) case DbKeyBatchesPrefix: batchesI := binary.LittleEndian.Uint32(k[1:5]) @@ -579,6 +584,8 @@ func (p *TxPool) fromDBLimbo(ctx context.Context, tx kv.Tx, cacheView kvcache.Ca } else { p.limbo.awaitingBlockHandling.Store(true) } + default: + panic("Invalid key") } } diff --git a/zk/txpool/pool_zk_limbo_test.go b/zk/txpool/pool_zk_limbo_test.go index 45d864acae8..faeaf3a9a7e 100644 --- a/zk/txpool/pool_zk_limbo_test.go +++ b/zk/txpool/pool_zk_limbo_test.go @@ -23,9 +23,24 @@ import ( func Test_Persistency(t *testing.T) { dbPath := "/tmp/limbo-persistency" - db, tx, aclDb := initDb(t, dbPath) + + pSource := store(t, dbPath) + + restoreRo(t, dbPath, pSource) + + t.Cleanup(func() { + os.RemoveAll(dbPath) + }) +} + +func store(t *testing.T, dbPath string) *TxPool { + db, tx, aclDb := initDb(t, dbPath, true) + defer db.Close() + defer tx.Rollback() + defer aclDb.Close() newTxs := make(chan types.Announcements, 1024) + defer close(newTxs) ethCfg := ðconfig.Defaults ethCfg.Zk.Limbo = true @@ -108,7 +123,8 @@ func Test_Persistency(t *testing.T) { pSource.limbo.awaitingBlockHandling.Store(true) - pSource.flushLockedLimbo(tx) + err = pSource.flushLockedLimbo(tx) + assert.NilError(t, err) // restore pTarget, err := New(make(chan types.Announcements), db, txpoolcfg.DefaultConfig, ethCfg, kvcache.NewDummy(), *uint256.NewInt(1101), big.NewInt(0), big.NewInt(0), aclDb) @@ -116,7 +132,12 @@ func Test_Persistency(t *testing.T) { cacheView, err := pTarget._stateCache.View(context.Background(), tx) assert.NilError(t, err) - pTarget.fromDBLimbo(context.Background(), tx, cacheView) + + tx.CreateBucket(TablePoolLimbo) + err = pTarget.fromDBLimbo(context.Background(), tx, cacheView) + assert.NilError(t, err) + err = pTarget.fromDBLimbo(context.Background(), tx, cacheView) + assert.NilError(t, err) assert.DeepEqual(t, pSource.limbo.invalidTxsMap, pTarget.limbo.invalidTxsMap) assert.DeepEqual(t, pSource.limbo.limboSlots, pTarget.limbo.limboSlots) @@ -125,18 +146,51 @@ func Test_Persistency(t *testing.T) { err = tx.Commit() assert.NilError(t, err) + return pSource +} - t.Cleanup(func() { - close(newTxs) - db.Close() - os.RemoveAll(dbPath) +func restoreRo(t *testing.T, dbPath string, pSource *TxPool) { + db, tx, aclDb := initDb(t, dbPath, false) + defer db.Close() + defer tx.Rollback() + defer aclDb.Close() + + err := tx.CreateBucket(TablePoolLimbo) + assert.NilError(t, err) + + tx.Commit() // Close the tx because we don't need it + + newTxs := make(chan types.Announcements, 1024) + defer close(newTxs) + + pTarget, err := New(newTxs, db, txpoolcfg.DefaultConfig, ðconfig.Defaults, kvcache.NewDummy(), *uint256.NewInt(1101), big.NewInt(0), big.NewInt(0), aclDb) + assert.NilError(t, err) + + err = db.View(context.Background(), func(tx kv.Tx) error { + cacheView, err := pTarget._stateCache.View(context.Background(), tx) + if err != nil { + return err + } + + return pTarget.fromDBLimbo(context.Background(), tx, cacheView) }) + assert.NilError(t, err) + + assert.DeepEqual(t, pSource.limbo.invalidTxsMap, pTarget.limbo.invalidTxsMap) + assert.DeepEqual(t, pSource.limbo.limboSlots, pTarget.limbo.limboSlots) + assert.DeepEqual(t, pSource.limbo.limboBatches, pTarget.limbo.limboBatches) + assert.Equal(t, pSource.limbo.awaitingBlockHandling.Load(), pTarget.limbo.awaitingBlockHandling.Load()) + + err = tx.Commit() + assert.NilError(t, err) } -func initDb(t *testing.T, dbPath string) (kv.RwDB, kv.RwTx, kv.RwDB) { +func initDb(t *testing.T, dbPath string, wipe bool) (kv.RwDB, kv.RwTx, kv.RwDB) { ctx := context.Background() - os.RemoveAll(dbPath) + if wipe { + os.RemoveAll(dbPath) + } dbOpts := mdbx.NewMDBX(log.Root()).Path(dbPath).Label(kv.ChainDB).GrowthStep(16 * datasize.MB).RoTxsLimiter(semaphore.NewWeighted(128)) database, err := dbOpts.Open(ctx) @@ -146,12 +200,12 @@ func initDb(t *testing.T, dbPath string) (kv.RwDB, kv.RwTx, kv.RwDB) { txRw, err := database.BeginRw(ctx) if err != nil { - t.Fatalf("Cannot craete db transaction") + t.Fatalf("Cannot create db transaction %e", err) } aclDB, err := OpenACLDB(ctx, dbPath) if err != nil { - t.Fatalf("Cannot craete acl db") + t.Fatalf("Cannot create acl db %e", err) } return database, txRw, aclDB diff --git a/zk/utils/utils.go b/zk/utils/utils.go index 96570599f1a..fd7b952c496 100644 --- a/zk/utils/utils.go +++ b/zk/utils/utils.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/ledgerwatch/erigon-lib/chain" - "github.com/ledgerwatch/erigon-lib/common" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/state" @@ -81,7 +80,7 @@ type ForkConfigWriter interface { } type DbReader interface { - GetLocalExitRootForBatchNo(batchNo uint64) (common.Hash, error) + GetLocalExitRootForBatchNo(batchNo uint64) (libcommon.Hash, error) GetHighestBlockInBatch(batchNo uint64) (uint64, error) } diff --git a/zk/witness/witness.go b/zk/witness/witness.go index cf6d6395c7d..e9bb68bd505 100644 --- a/zk/witness/witness.go +++ b/zk/witness/witness.go @@ -19,6 +19,7 @@ import ( "github.com/ledgerwatch/erigon/core/systemcontracts" eritypes "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" db2 "github.com/ledgerwatch/erigon/smt/pkg/db" @@ -48,6 +49,7 @@ type Generator struct { agg *libstate.Aggregator blockReader services.FullBlockReader chainCfg *chain.Config + zkConfig *ethconfig.Zk engine consensus.EngineReader } @@ -57,6 +59,7 @@ func NewGenerator( agg *libstate.Aggregator, blockReader services.FullBlockReader, chainCfg *chain.Config, + zkConfig *ethconfig.Zk, engine consensus.EngineReader, ) *Generator { return &Generator{ @@ -65,11 +68,12 @@ func NewGenerator( agg: agg, blockReader: blockReader, chainCfg: chainCfg, + zkConfig: zkConfig, engine: engine, } } -func (g *Generator) GetWitnessByBatch(tx kv.Tx, ctx context.Context, batchNum uint64, debug, witnessFull bool) ([]byte, error) { +func (g *Generator) GetWitnessByBatch(tx kv.Tx, ctx context.Context, batchNum uint64, debug, witnessFull bool) (witness []byte, err error) { t := zkUtils.StartTimer("witness", "getwitnessbybatch") defer t.LogTimer() @@ -78,10 +82,12 @@ func (g *Generator) GetWitnessByBatch(tx kv.Tx, ctx context.Context, batchNum ui if err != nil { return nil, err } - var witness []byte if badBatch { // we need the header of the block prior to this batch to build up the blocks previousHeight, err := reader.GetHighestBlockInBatch(batchNum - 1) + if err != nil { + return nil, err + } previousHeader := rawdb.ReadHeaderByNumber(tx, previousHeight) if previousHeader == nil { return nil, fmt.Errorf("failed to get header for block %d", previousHeight) @@ -120,8 +126,7 @@ func (g *Generator) GetWitnessByBatch(tx kv.Tx, ctx context.Context, batchNum ui blocks[i] = block } - witness, err = g.generateWitness(tx, ctx, blocks, debug, witnessFull) - + return g.generateWitness(tx, ctx, blocks, debug, witnessFull) } else { blockNumbers, err := reader.GetL2BlockNosByBatch(batchNum) if err != nil { @@ -140,10 +145,8 @@ func (g *Generator) GetWitnessByBatch(tx kv.Tx, ctx context.Context, batchNum ui blocks[idx] = block idx++ } - witness, err = g.generateWitness(tx, ctx, blocks, debug, witnessFull) + return g.generateWitness(tx, ctx, blocks, debug, witnessFull) } - - return witness, nil } func (g *Generator) GetWitnessByBlockRange(tx kv.Tx, ctx context.Context, startBlock, endBlock uint64, debug, witnessFull bool) ([]byte, error) { @@ -187,7 +190,7 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, blocks []*eri return nil, fmt.Errorf("block number is in the future latest=%d requested=%d", latestBlock, endBlock) } - batch := membatchwithdb.NewMemoryBatch(tx, g.dirs.Tmp, log.New()) + batch := membatchwithdb.NewMemoryBatchWithSize(tx, g.dirs.Tmp, g.zkConfig.WitnessMemdbSize) defer batch.Rollback() if err = populateDbTables(batch); err != nil { return nil, err @@ -208,14 +211,13 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, blocks []*eri hashStageCfg := stagedsync.StageHashStateCfg(nil, g.dirs, g.historyV3, g.agg) if err := stagedsync.UnwindHashStateStage(unwindState, stageState, batch, hashStageCfg, ctx, log.New()); err != nil { - return nil, err + return nil, fmt.Errorf("unwind hash state: %w", err) } interHashStageCfg := zkStages.StageZkInterHashesCfg(nil, true, true, false, g.dirs.Tmp, g.blockReader, nil, g.historyV3, g.agg, nil) - err = zkStages.UnwindZkIntermediateHashesStage(unwindState, stageState, batch, interHashStageCfg, ctx) - if err != nil { - return nil, err + if err = zkStages.UnwindZkIntermediateHashesStage(unwindState, stageState, batch, interHashStageCfg, ctx); err != nil { + return nil, fmt.Errorf("unwind intermediate hashes: %w", err) } tx = batch @@ -333,7 +335,7 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, blocks []*eri witness, err := smt.BuildWitness(smtTrie, rl, ctx) if err != nil { - return nil, err + return nil, fmt.Errorf("build witness: %v", err) } return getWitnessBytes(witness, debug) @@ -348,7 +350,7 @@ func getWitnessBytes(witness *trie.Witness, debug bool) ([]byte, error) { return buf.Bytes(), nil } -func populateDbTables(batch *membatchwithdb.MemoryMutation) error { +func populateDbTables(batch kv.RwTx) error { tables := []string{ db2.TableSmt, db2.TableAccountValues,