From 23e12d687d9487beb82c0cd686b9219765d155b5 Mon Sep 17 00:00:00 2001 From: VM Date: Tue, 18 Jun 2024 20:46:34 +0800 Subject: [PATCH] pathdb: load diff layers from freezer db --- cmd/geth/chaincmd.go | 38 ++++++ cmd/geth/dbcmd.go | 23 ++++ cmd/geth/main.go | 1 + trie/database.go | 18 +++ trie/triedb/pathdb/database.go | 230 ++++++++++++++++++++++++++++++++ trie/triedb/pathdb/disklayer.go | 38 ++++++ trie/triedb/pathdb/history.go | 34 +++++ trie/trienode/node.go | 5 + trie/triestate/state.go | 166 +++++++++++++++++++++++ 9 files changed, 553 insertions(+) diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 0733a29392..21e884ca53 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -39,6 +39,9 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" + "github.com/olekukonko/tablewriter" "github.com/urfave/cli/v2" ) @@ -156,6 +159,18 @@ It's deprecated, please use "geth db import" instead. This command dumps out the state for a given block (or latest, if none provided). `, } + + dumpRootHashCommand = &cli.Command{ + Action: dumpAllRootHashInPath, + Name: "dump-roothash", + Usage: "Dump all available state root hash in path mode", + Flags: flags.Merge([]cli.Flag{}, utils.DatabaseFlags), + Description: ` +The dump-roothash command dump all available state root hash in path mode. +If you use "dump" command in path mode, please note that it only keeps at most 129 blocks which belongs to diffLayer or diskLayer. +Therefore, you must specify the blockNumber or blockHash that locates in diffLayer or diskLayer. +"geth" will print all available blockNumber and related block state root hash, and you can query block hash by block number. +`} ) // initGenesis will initialise the given JSON format genesis file and writes it as @@ -483,3 +498,26 @@ func hashish(x string) bool { _, err := strconv.Atoi(x) return err != nil } + +func dumpAllRootHashInPath(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() + triedb := trie.NewDatabase(db, &trie.Config{PathDB: pathdb.ReadOnly}) + defer triedb.Close() + + scheme, err := rawdb.ParseStateScheme(ctx.String(utils.StateSchemeFlag.Name), db) + if err != nil { + return err + } + if scheme == rawdb.HashScheme { + return errors.New("incorrect state scheme, you should use it in path mode") + } + + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Block Number", "Block State Root Hash"}) + table.AppendBulk(triedb.GetAllRooHash()) + table.Render() + return nil +} diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index 7835cc5048..0f872b6278 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -76,6 +76,7 @@ Remove blockchain and state databases`, dbPruneHashTrieCmd, dbTrieGetCmd, dbTrieDeleteCmd, + ancientToDiffLayerCmd, }, } dbInspectCmd = &cli.Command{ @@ -259,6 +260,13 @@ WARNING: This is a low-level operation which may cause database corruption!`, }, utils.NetworkFlags, utils.DatabaseFlags), Description: "Shows metadata about the chain status.", } + ancientToDiffLayerCmd = &cli.Command{ + Name: "ancient-to-dl", + Usage: "Convert the data in ancientDB into diffLayer", + Description: "A convenient test tool to for path db diffLayer converting", + Action: ancientToDiffLayer, + Flags: flags.Merge(utils.DatabaseFlags), + } ) func removeDB(ctx *cli.Context) error { @@ -1090,6 +1098,21 @@ func hbss2pbss(ctx *cli.Context) error { return nil } +func ancientToDiffLayer(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + db := utils.MakeChainDatabase(ctx, stack, true) + defer db.Close() + triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false) + // triedb := trie.NewDatabase(db, &trie.Config{PathDB: nil}) + defer triedb.Close() + + if err := triedb.DiffLayerConvertTool(); err != nil { + log.Error("Failed to get diff layer from ancient db", "error", err) + } + return nil +} + func pruneHashTrie(ctx *cli.Context) error { if ctx.NArg() != 0 { return fmt.Errorf("required none argument") diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 23761964c4..f4b960b1fa 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -238,6 +238,7 @@ func init() { removedbCommand, dumpCommand, dumpGenesisCommand, + dumpRootHashCommand, // See accountcmd.go: accountCommand, walletCommand, diff --git a/trie/database.go b/trie/database.go index a49049c61b..fdad14c386 100644 --- a/trie/database.go +++ b/trie/database.go @@ -363,3 +363,21 @@ func (db *Database) IsVerkle() bool { func (db *Database) Config() *Config { return db.config } + +// DiffLayerConvertTool +func (db *Database) DiffLayerConvertTool() error { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + return errors.New("not supported") + } + return pdb.ConvertTool1(&trieLoader{db: db}) +} + +func (db *Database) GetAllRooHash() [][]string { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + log.Error("Not supported") + return nil + } + return pdb.GetAllRooHash() +} diff --git a/trie/triedb/pathdb/database.go b/trie/triedb/pathdb/database.go index 4139dfc8b3..d3c7f8380e 100644 --- a/trie/triedb/pathdb/database.go +++ b/trie/triedb/pathdb/database.go @@ -21,6 +21,9 @@ import ( "fmt" "io" "os" + "sort" + "strconv" + "strings" "sync" "time" @@ -159,6 +162,7 @@ type Database struct { freezer *rawdb.ResettableFreezer // Freezer for storing trie histories, nil possible in tests lock sync.RWMutex // Lock to prevent mutations from happening at the same time capLock sync.Mutex + tree2 *layerTree } // New attempts to load an already existing layer from a persistent key-value @@ -580,3 +584,229 @@ func (db *Database) DeleteTrieJournal(writer ethdb.KeyValueWriter) error { } return nil } + +func (db *Database) ConvertTool1(loader triestate.TrieLoader) error { + dl := db.tree.bottom() + stateID := rawdb.ReadStateID(db.diskdb, dl.rootHash()) + persistentStateID := rawdb.ReadPersistentStateID(db.diskdb) + + ancient, err := db.diskdb.AncientDatadir() + if err != nil { + log.Error("Failed to get ancient datadir", "error", err) + return err + } + freezer, err := rawdb.NewStateFreezer(ancient, true) + if err != nil { + log.Error("Failed to new state freezer", "error", err) + return err + } + db.freezer = freezer + + freezerLength, err := db.freezer.Ancients() + if err != nil { + log.Error("Failed to get freezer ancients", "error", err) + return err + } + tail, err := db.freezer.Tail() + if err != nil { + log.Error("Failed to get freezer tail", "error", err) + return err + } + log.Info("Print ancient db meta", "state id", *stateID, "persistent state id", persistentStateID, + "freezer length", freezerLength, "freezer tail", tail, "config", db.config.StateHistory, + "bottom stateID", dl.stateID(), "bottom root", dl.rootHash().String()) + + db.tree2 = newLayerTree(dl) + waitingRecoverNum := freezerLength - tail + start := time.Now() + historySize := 0 + diffSize := uint64(0) + for i := uint64(0); i < waitingRecoverNum; i++ { + h, err := readHistory(db.freezer, *stateID-i) + if err != nil { + if checkError(err) { + log.Info("There are no more states in disk db", "state id", *stateID-i) + continue + } + log.Error("Failed to read history from freezer db", "error", err) + return err + } + historySize += h.Size() + log.Info("print history size", "size", common.StorageSize(h.Size()), "history root", h.meta.root.String(), + "history parent root", h.meta.parent.String(), "current state id", *stateID-i) + + incomplete := make(map[common.Address]struct{}) + for _, addr := range h.meta.incomplete { + incomplete[addr] = struct{}{} + } + states := triestate.New(h.accounts, h.storages, incomplete) + + size, err := db.addDiffLayer(h.meta.root, h.meta.parent, *stateID-i, h.meta.block, nil, states) + if err != nil { + log.Error("Failed to add diff layer", "error", err) + return err + } + diffSize += size + } + layerTreeSize := uint64(db.tree2.len() * 32) + log.Info("Succeed to add diff layer", "elapsed", common.PrettyDuration(time.Since(start)), + "waitingRecoverNum", waitingRecoverNum, "total history size", common.StorageSize(historySize), + "total diff size", common.StorageSize(diffSize), "layer tree size", common.StorageSize(layerTreeSize)) + + return nil +} + +func (db *Database) ConvertTool(loader triestate.TrieLoader) error { + dl := db.tree.bottom() + stateID := rawdb.ReadStateID(db.diskdb, dl.rootHash()) + persistentStateID := rawdb.ReadPersistentStateID(db.diskdb) + + ancient, err := db.diskdb.AncientDatadir() + if err != nil { + log.Error("Failed to get ancient datadir", "error", err) + return err + } + freezer, err := rawdb.NewStateFreezer(ancient, true) + if err != nil { + log.Error("Failed to new state freezer", "error", err) + return err + } + db.freezer = freezer + + freezerLength, err := db.freezer.Ancients() + if err != nil { + log.Error("Failed to get freezer ancients", "error", err) + return err + } + tail, err := db.freezer.Tail() + if err != nil { + log.Error("Failed to get freezer tail", "error", err) + return err + } + log.Info("Print ancient db meta", "state id", *stateID, "persistent state id", persistentStateID, + "freezer length", freezerLength, "freezer tail", tail, "config", db.config.StateHistory, + "bottom stateID", dl.stateID(), "bottom root", dl.rootHash().String()) + + db.tree2 = newLayerTree(dl) + waitingRecoverNum := freezerLength - persistentStateID + start := time.Now() + var ( + nodes *trienode.MergedNodeSet + count = uint64(2) + ) + for { + h, err := readHistory(db.freezer, *stateID+count) + if err != nil { + if checkError(err) { + log.Info("There are no more states in disk db", "count", count) + break + } + log.Error("Failed to read history from freezer db", "error", err) + return err + } + log.Info("print history size", "size", h.Size(), "history root", h.meta.root.String(), + "history parent root", h.meta.parent.String(), "current state id", *stateID+count) + + if count > 2 { + break + } + dl, nodes, err = dl.apply(dl.rootHash(), h, loader) + if err != nil { + log.Error("Failed to revert", "error", err) + return err + } + db.tree.reset(dl) + + incomplete := make(map[common.Address]struct{}) + for _, addr := range h.meta.incomplete { + incomplete[addr] = struct{}{} + } + states := triestate.New(h.accounts, h.storages, incomplete) + + if _, err = db.addDiffLayer(h.meta.root, h.meta.parent, *stateID+count, h.meta.block, nodes, states); err != nil { + log.Error("Failed to add diff layer", "error", err) + return err + } + count++ + } + // for i := uint64(0); i < 2; i++ { + // h, err := readHistory(db.freezer, *stateID-i) + // if err != nil { + // log.Error("Failed to read history from freezer db", "error", err) + // return err + // } + // log.Info("print history size", "size", h.Size(), "history root", h.meta.root.String(), + // "history parent root", h.meta.parent.String(), "current state id", *stateID-i) + // + // dl, nodes, err = dl.revert1(h, loader) + // if err != nil { + // log.Error("Failed to revert", "error", err) + // return err + // } + // db.tree.reset(dl) + // + // incomplete := make(map[common.Address]struct{}) + // for _, addr := range h.meta.incomplete { + // incomplete[addr] = struct{}{} + // } + // states := triestate.New(h.accounts, h.storages, incomplete) + // + // if err = db.addDiffLayer(h.meta.root, h.meta.parent, *stateID-i, h.meta.block, nodes, states); err != nil { + // log.Error("Failed to add diff layer", "error", err) + // return err + // } + // } + log.Info("Succeed to add diff layer", "elapsed", common.PrettyDuration(time.Since(start)), + "waitingRecoverNum", waitingRecoverNum) + + return nil +} + +func (db *Database) addDiffLayer(root common.Hash, parentRoot common.Hash, stateID uint64, block uint64, + nodes *trienode.MergedNodeSet, states *triestate.Set) (uint64, error) { + // Hold the lock to prevent concurrent mutations. + db.lock.Lock() + defer db.lock.Unlock() + + root, parentRoot = types.TrieRootHash(root), types.TrieRootHash(parentRoot) + if root == parentRoot { + return 0, errors.New("layer cycle") + } + // TODO: parent now is nil + l := newDiffLayer(nil, root, stateID, block, nil, states) + + // TODO: no need to use lock now + // db.tree2.lock.Lock() + db.tree2.layers[l.rootHash()] = l + // db.tree2.lock.Unlock() + + log.Info("done", "layer tree length", db.tree2.len(), "size", common.StorageSize(l.memory)) + return l.memory, nil +} + +func checkError(err error) bool { + if strings.Contains(err.Error(), "state history not found") { + return true + } + return false +} + +func (db *Database) GetAllRooHash() [][]string { + db.lock.Lock() + defer db.lock.Unlock() + + data := make([][]string, 0, len(db.tree.layers)) + for _, v := range db.tree.layers { + if dl, ok := v.(*diffLayer); ok { + data = append(data, []string{fmt.Sprintf("%d", dl.block), dl.rootHash().String()}) + } + } + sort.Slice(data, func(i, j int) bool { + block1, _ := strconv.Atoi(data[i][0]) + block2, _ := strconv.Atoi(data[j][0]) + return block1 > block2 + }) + + data = append(data, []string{"-1", db.tree.bottom().rootHash().String()}) + return data +} diff --git a/trie/triedb/pathdb/disklayer.go b/trie/triedb/pathdb/disklayer.go index 60c891fd2c..d1bcfade6a 100644 --- a/trie/triedb/pathdb/disklayer.go +++ b/trie/triedb/pathdb/disklayer.go @@ -408,6 +408,44 @@ func (dl *diskLayer) revert(h *history, loader triestate.TrieLoader) (*diskLayer return newDiskLayer(h.meta.parent, dl.id-1, dl.db, dl.cleans, dl.buffer), nil } +func (dl *diskLayer) apply(prevRoot common.Hash, h *history, loader triestate.TrieLoader) (*diskLayer, *trienode.MergedNodeSet, error) { + // if h.meta.parent != dl.rootHash() { + // return nil, nil, errUnexpectedHistory + // } + // Reject if the provided state history is incomplete. It's due to + // a large construct SELF-DESTRUCT which can't be handled because + // of memory limitation. + if len(h.meta.incomplete) > 0 { + return nil, nil, errors.New("incomplete state history") + } + if dl.id == 0 { + return nil, nil, fmt.Errorf("%w: zero state id", errStateUnrecoverable) + } + // Apply the reverse state changes upon the current state. This must + // be done before holding the lock in order to access state in "this" + // layer. + set, err := triestate.ApplyForDiff(prevRoot, h.meta.parent, h.accounts, h.storages, loader) + if err != nil { + log.Error("Failed to apply state diffs", "error", err) + return nil, nil, err + } + // Mark the diskLayer as stale before applying any mutations on top. + dl.lock.Lock() + defer dl.lock.Unlock() + + dl.stale = true + + // nodes := set.Flatten() + // batch := dl.db.diskdb.NewBatch() + // writeNodes(batch, nodes, dl.cleans) + // rawdb.WritePersistentStateID(batch, dl.id+1) + // if err = batch.Write(); err != nil { + // log.Crit("Failed to write states", "err", err) + // } + + return newDiskLayer(h.meta.parent, dl.id+1, dl.db, dl.cleans, dl.buffer), set, nil +} + // setBufferSize sets the node buffer size to the provided value. func (dl *diskLayer) setBufferSize(size int) error { dl.lock.RLock() diff --git a/trie/triedb/pathdb/history.go b/trie/triedb/pathdb/history.go index 7d7cc71f48..be58de0811 100644 --- a/trie/triedb/pathdb/history.go +++ b/trie/triedb/pathdb/history.go @@ -489,6 +489,40 @@ func (h *history) decode(accountData, storageData, accountIndexes, storageIndexe return nil } +func (h *history) Size() int { + size := 0 + + // calculate size of meta + if h.meta != nil { + metaSize := 1 + metaSize += len(h.meta.parent) + len(h.meta.root) + metaSize += 8 + metaSize += len(h.meta.incomplete) * (common.AddressLength) + + size += metaSize + } + // calculate size of accounts + for address, data := range h.accounts { + size += len(address) + size += len(data) + } + // calculate size of accountList + size += len(h.accountList) * common.AddressLength + // calculate size of storages + for _, storage := range h.storages { + for slotHash, data := range storage { + size += len(slotHash) + size += len(data) + } + } + // calculate size of storageList + for _, slots := range h.storageList { + size += len(slots) * common.HashLength + } + + return size +} + // readHistory reads and decodes the state history object by the given id. func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error) { blob := rawdb.ReadStateHistoryMeta(freezer, id) diff --git a/trie/trienode/node.go b/trie/trienode/node.go index 95315c2e9a..3c58d40fc1 100644 --- a/trie/trienode/node.go +++ b/trie/trienode/node.go @@ -197,3 +197,8 @@ func (set *MergedNodeSet) Flatten() map[common.Hash]map[string]*Node { } return nodes } + +// // ReverseFlatten reverts flattened two-dimensional map to merge node set +// func ReverseFlatten(nodes map[common.Hash]map[string]*Node) MergedNodeSet { +// +// } diff --git a/trie/triestate/state.go b/trie/triestate/state.go index 4c47e9c397..7c73c8acaf 100644 --- a/trie/triestate/state.go +++ b/trie/triestate/state.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie/trienode" "golang.org/x/crypto/sha3" @@ -142,6 +143,47 @@ func Apply(prevRoot common.Hash, postRoot common.Hash, accounts map[common.Addre return ctx.nodes.Flatten(), nil } +func ApplyForDiff(prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, + loader TrieLoader) (*trienode.MergedNodeSet, error) { + tr, err := loader.OpenTrie(prevRoot) + if err != nil { + log.Error("Failed to open trie", "error", err) + return nil, err + } + ctx := &context{ + prevRoot: prevRoot, + postRoot: postRoot, + accounts: accounts, + storages: storages, + accountTrie: tr, + nodes: trienode.NewMergedNodeSet(), + } + for addr, account := range accounts { + var err error + if len(account) == 0 { + err = deleteAccountForRecovering(ctx, loader, addr) + } else { + err = updateAccountForRecovering(ctx, loader, addr) + } + // err = updateAccountForRecovering(ctx, loader, addr) + if err != nil { + return nil, fmt.Errorf("failed to apply state, err: %w", err) + } + } + root, result, err := tr.Commit(false) + + if err != nil { + return nil, err + } + if root != postRoot { + return nil, fmt.Errorf("failed to revert state, want %#x, got %#x", postRoot, root) + } + if err := ctx.nodes.Merge(result); err != nil { + return nil, err + } + return ctx.nodes, nil +} + // updateAccount the account was present in prev-state, and may or may not // existent in post-state. Apply the reverse diff and verify if the storage // root matches the one in prev-state account. @@ -217,6 +259,7 @@ func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error { addrHash := h.hash(addr.Bytes()) blob, err := ctx.accountTrie.Get(addrHash.Bytes()) if err != nil { + log.Error("9") return err } if len(blob) == 0 { @@ -240,6 +283,7 @@ func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error { } root, result, err := st.Commit(false) if err != nil { + log.Error("13") return err } if root != types.EmptyRootHash { @@ -256,6 +300,128 @@ func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error { return ctx.accountTrie.Delete(addrHash.Bytes()) } +// updateAccountForRecovering the account is present in post-state, and may or may not +// be existent in prev-state. Apply the diff and verify if the storage root matches the +// one in post-state account. +func updateAccountForRecovering(ctx *context, loader TrieLoader, addr common.Address) error { + // The account was present in post-state, decode it from the + // 'slim-rlp' format bytes. + h := newHasher() + defer h.release() + + addrHash := h.hash(addr.Bytes()) + post, err := types.FullAccount(ctx.accounts[addr]) + if err != nil { + log.Error("Failed to full account for updating", "error", err, "addr", addr.String()) + return err + } + // The account may or may not be existent in prev-state, try to + // load it and decode if it's found. + blob, err := ctx.accountTrie.Get(addrHash.Bytes()) + if err != nil { + log.Error("Failed to get for updating", "error", err) + return err + } + prev := types.NewEmptyStateAccount() + if len(blob) != 0 { + if err = rlp.DecodeBytes(blob, &prev); err != nil { + log.Error("Failed to decode bytes for updating", "error", err) + return err + } + } + // Apply all storage changes into the prev-state storage trie + st, err := loader.OpenStorageTrie(ctx.prevRoot, addrHash, prev.Root) + if err != nil { + log.Error("Failed to open storage trie for updating", "error", err) + return err + } + for k, v := range ctx.storages[addr] { + if len(v) == 0 { + err = st.Delete(k.Bytes()) + } else { + err = st.Update(k.Bytes(), v) + } + if err != nil { + log.Error("Failed to delete or update", "error", err) + return err + } + } + root, result, err := st.Commit(false) + if err != nil { + log.Error("Failed to commit for updating", "error", err) + return err + } + if root != post.Root { + return errors.New("failed to reset storage trie") + } + if result != nil { + if err = ctx.nodes.Merge(result); err != nil { + log.Error("Failed to merge for updating", "error", err) + return err + } + } + // Write the post-state account into the main trie + full, err := rlp.EncodeToBytes(post) + if err != nil { + log.Error("Failed to encode bytes", "error", err) + return err + } + return ctx.accountTrie.Update(addrHash.Bytes(), full) +} + +// deleteAccountForRecovering the account is not present in post-state, and was expected +// to be existent in prev-state. Apply the diff and verify if the account and storage +// is wiped out correctly. +func deleteAccountForRecovering(ctx *context, loader TrieLoader, addr common.Address) error { + // The account must be existent in prev-state, load the account + h := newHasher() + defer h.release() + + addrHash := h.hash(addr.Bytes()) + blob, err := ctx.accountTrie.Get(addrHash.Bytes()) + if err != nil { + return err + } + if len(blob) == 0 { + return fmt.Errorf("account is nonexistent %#x", addrHash) + } + var prev types.StateAccount + if err = rlp.DecodeBytes(blob, prev); err != nil { + log.Error("Failed to decode bytes for deleting accounts", "error", err) + return err + } + st, err := loader.OpenStorageTrie(ctx.prevRoot, addrHash, prev.Root) + if err != nil { + log.Error("Failed to open storage trie for del", "error", err) + return err + } + for k, v := range ctx.storages[addr] { + if len(v) != 0 { + return errors.New("expect storage deletion") + } + if err = st.Delete(k.Bytes()); err != nil { + log.Error("Failed to delete for del", "error", err) + return err + } + } + root, result, err := st.Commit(false) + if err != nil { + log.Error("Failed to commit for del", "error", err) + return err + } + if root != types.EmptyRootHash { + return errors.New("failed to clear storage trie") + } + if result != nil { + if err = ctx.nodes.Merge(result); err != nil { + log.Error("Failed to merge for del", "error", err) + return err + } + } + // Delete the prev-state account from the main trie. + return ctx.accountTrie.Delete(addrHash.Bytes()) +} + // hasher is used to compute the sha256 hash of the provided data. type hasher struct{ sha crypto.KeccakState }