您正在查看: 2024年11月

CDK 部署或重置后第一笔交易

交易组装

 // Add the first batch of the created rollup
    const newZKEVMContract = (await PolygonconsensusFactory.attach(newZKEVMAddress)) as PolygonZkEVMV2;
    const batchData = {
        transactions: await newZKEVMContract.generateInitializeTransaction(
            rollupID,
            gasTokenAddress,
            gasTokenNetwork,
            gasTokenMetadata as any
        ),
        globalExitRoot: globalExitRoot,
        timestamp: timestampReceipt,
        sequencer: trustedSequencer,
    };

    outputJson.firstBatchData = batchData;

测试数据

"firstBatchData": {
  "transactions": "0xf901e480808401c9c38094d66bd4a99f128cab5723ab98443bdda0a8ae65d280b901c4f811bff700000000000000000000000000000000000000000000000000000000000000010000000000000000000000003ec3d234625cde1e0f3267014e26e193610e50ac0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a40d5f56745a118d0906a34e69aec8c0db1cb8fa000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000084d4158546f6b656e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000034d4158000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005ca1ab1e0000000000000000000000000000000000000000000000000000000005ca1ab1e1bff",
  "globalExitRoot": "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5",
  "timestamp": 1731804000,
  "sequencer": "0x85472577b11837bC157F45643047Cb86136985f2"
 }

第一个batch信息为

  1. 一笔zkEVM合约初始化交易
  2. globalExitRoot
  3. timestamp
  4. trustedSequencer

error: latest Synced BlockNumber (7089137) is higher than the latest Proposed block (1008) in the network

问题

cdk 跨链桥启动后,发起一笔跨链复现networkID数据错乱,重启后报错

2024-11-16T14:36:29.512Z        INFO    cmd/run.go:59   main network id: 0      {"pid": 1, "version": "v0.6.0-RC1"}
2024-11-16T14:36:29.512Z        INFO    cmd/run.go:64   l2 network id: 0        {"pid": 1, "version": "v0.6.0-RC1"}
2024-11-16T14:36:29.542Z        DEBUG   cmd/run.go:102  trusted sequencer URL http://172.18.39.103:8123 {"pid": 1, "version": "v0.6.0-RC1"}
2024-11-16T14:36:29.542Z        INFO    server/server.go:93     gRPC Server is serving at 9090  {"pid": 1, "version": "v0.6.0-RC1"}
2024-11-16T14:36:29.542Z        INFO    server/server.go:166    Restful Server is serving at 8080       {"pid": 1, "version": "v0.6.0-RC1"}
2024-11-16T14:36:29.548Z        INFO    synchronizer/synchronizer.go:104        NetworkID: 0, Synchronization started   {"pid": 1, "version": "v0.6.0-RC1"}
2024-11-16T14:36:29.549Z        DEBUG   synchronizer/synchronizer.go:119        NetworkID: 0, initial lastBlockSynced: &{ID:3 BlockNumber:7089137 BlockHash:0xbcb0638af2a8014b561ee72c2f568bda819039ca93f16c7de22896fc1037c1a3 ParentHash:0x919c0185dfa6dbc92fb9e13f535c09130d04d1f980749b952b057ffed68f77b6 NetworkID:0 GlobalExitRoots:[] Deposits:[] Claims:[] Tokens:[] VerifiedBatches:[] ActivateEtrog:[] ReceivedAt:2024-11-16 13:48:00 +0000 UTC}     {"pid": 1, "version": "v0.6.0-RC1"}
2024-11-16T14:36:29.549Z        DEBUG   synchronizer/synchronizer.go:126        NetworkID: 0, syncing...        {"pid": 1, "version": "v0.6.0-RC1"}
2024-11-16T14:36:29.550Z        INFO    synchronizer/synchronizer.go:543        NetworkID: 0, [checkReorg function] Checking Block 7089137 in L1        {"pid": 1, "version": "v0.6.0-RC1"}
2024-11-16T14:36:29.553Z        ERROR   synchronizer/synchronizer.go:546        networkID: 0, error getting latest block synced from blockchain. Block: 7089137, error: Not found%!(EXTRA string=
/src/log/log.go:142 github.com/0xPolygonHermez/zkevm-bridge-service/log.appendStackTraceMaybeArgs()
/src/log/log.go:251 github.com/0xPolygonHermez/zkevm-bridge-service/log.Errorf()
/src/synchronizer/synchronizer.go:546 github.com/0xPolygonHermez/zkevm-bridge-service/synchronizer.(*ClientSynchronizer).checkReorg()
/src/synchronizer/synchronizer.go:239 github.com/0xPolygonHermez/zkevm-bridge-service/synchronizer.(*ClientSynchronizer).syncBlocks()
/src/synchronizer/synchronizer.go:128 github.com/0xPolygonHermez/zkevm-bridge-service/synchronizer.(*ClientSynchronizer).Sync()
/src/cmd/run.go:245 main.runSynchronizer()
)       {"pid": 1, "version": "v0.6.0-RC1"}
github.com/0xPolygonHermez/zkevm-bridge-service/synchronizer.(*ClientSynchronizer).checkReorg
        /src/synchronizer/synchronizer.go:546
github.com/0xPolygonHermez/zkevm-bridge-service/synchronizer.(*ClientSynchronizer).syncBlocks
        /src/synchronizer/synchronizer.go:239
github.com/0xPolygonHermez/zkevm-bridge-service/synchronizer.(*ClientSynchronizer).Sync
        /src/synchronizer/synchronizer.go:128
main.runSynchronizer
        /src/cmd/run.go:245
2024-11-16T14:36:29.553Z        ERROR   synchronizer/synchronizer.go:241        networkID: 0, error checking reorgs. Retrying... Err: Not found {"pid": 1, "version": "v0.6.0-RC1"}
github.com/0xPolygonHermez/zkevm-bridge-service/synchronizer.(*ClientSynchronizer).syncBlocks
        /src/synchronizer/synchronizer.go:241
github.com/0xPolygonHermez/zkevm-bridge-service/synchronizer.(*ClientSynchronizer).Sync
        /src/synchronizer/synchronizer.go:128
main.runSynchronizer
        /src/cmd/run.go:245
2024-11-16T14:36:29.553Z        WARN    synchronizer/synchronizer.go:129        networkID: 0, error syncing blocks: networkID: 0, error checking reorgs {"pid": 1, "version": "v0.6.0-RC1"}
github.com/0xPolygonHermez/zkevm-bridge-service/synchronizer.(*ClientSynchronizer).Sync
        /src/synchronizer/synchronizer.go:129
main.runSynchronizer
        /src/cmd/run.go:245
2024-11-16T14:36:29.554Z        FATAL   synchronizer/synchronizer.go:161        networkID: 0, error: latest Synced BlockNumber (7089137) is higher than the latest Proposed block (1008) in the network {"pid": 1, "version": "v0.6.0-RC1"}
github.com/0xPolygonHermez/zkevm-bridge-service/synchronizer.(*ClientSynchronizer).Sync
        /src/synchronizer/synchronizer.go:161
main.runSynchronizer
        /src/cmd/run.go:245
2024-11-16T14:37:29.896Z        INFO    cmd/run.go:26   Starting application    {"pid": 1, "version": "v0.6.0-RC1", "gitRevision": "002f436", "gitBranch": "HEAD", "goVersion": "go1.21.13", "built": "Thu, 26 Sep 2024 10:05:53 +0000", "os/arch": "linux/amd64"}
2024-11-16T14:37:29.915Z        INFO    pgstorage/utils.go:63   successfully ran 0 migrations   {"pid": 1, "version": "v0.6.0-RC1"}
2024-11-16T14:37:29.923Z        WARN    etherman/etherman.go:187        Claim compressor Address not configured {"pid": 1, "version": "v0.6.0-RC1"}
github.com/0xPolygonHermez/zkevm-bridge-service/etherman.NewL2Client
        /src/etherman/etherman.go:187
main.newEthermans
        /src/cmd/run.go:230
main.start
        /src/cmd/run.go:52
github.com/urfave/cli/v2.(*Command).Run
        /go/pkg/mod/github.com/urfave/cli/v2@v2.27.4/command.go:276
github.com/urfave/cli/v2.(*Command).Run
        /go/pkg/mod/github.com/urfave/cli/v2@v2.27.4/command.go:269
github.com/urfave/cli/v2.(*App).RunContext
        /go/pkg/mod/github.com/urfave/cli/v2@v2.27.4/app.go:333
github.com/urfave/cli/v2.(*App).Run
        /go/pkg/mod/github.com/urfave/cli/v2@v2.27.4/app.go:307
main.main
        /src/cmd/main.go:56
runtime.main
        /usr/local/go/src/runtime/proc.go:267
func (s *ClientSynchronizer) Sync() error {
    lastBlockSynced, err := s.storage.GetLastBlock(s.ctx, s.networkID, nil)
    ....
    if !s.synced {
                // Check latest Block
                header, err := s.etherMan.HeaderByNumber(s.ctx, nil)
                if err != nil {
                    log.Warnf("networkID: %d, error getting latest block from. Error: %s", s.networkID, err.Error())
                    continue
                }
                lastKnownBlock := header.Number.Uint64()
                if lastBlockSynced.BlockNumber == lastKnownBlock && !s.synced {
                    log.Infof("NetworkID %d Synced!", s.networkID)
                    waitDuration = s.cfg.SyncInterval.Duration
                    s.synced = true
                    s.chSynced <- s.networkID
                }
                if lastBlockSynced.BlockNumber > lastKnownBlock {
                    if s.networkID == 0 {
                        log.Fatalf("networkID: %d, error: latest Synced BlockNumber (%d) is higher than the latest Proposed block (%d) in the network", s.networkID, lastBlockSynced.BlockNumber, lastKnownBlock)
                    } 

lastBlockSynced = 7089137
lastKnownBlock = 1008

lastBlockSynced为L1数据,lastKnownBlock为L2数据,说明s.etherMan应使用L1 RPC,目前使用了L2RPC导致出错
s.storage.GetLastBlock查询时有networkID区分,所以被对比数据是正确的
s.networkID等于0, s.etherMan使用的L2 RPC, 也就是networkID与etherMan对应关系乱了

func runSynchronizer(ctx context.Context, genBlockNumber uint64, brdigeCtrl *bridgectrl.BridgeController, etherman *etherman.Client, cfg synchronizer.Config, storage db.Storage, zkEVMClient *client.Client, chExitRootEventL2 chan *etherman.GlobalExitRoot, chsExitRootEvent []chan *etherman.GlobalExitRoot, chSynced chan uint, allNetworkIDs []uint) {
    sy, err := synchronizer.NewSynchronizer(ctx, storage, brdigeCtrl, etherman, zkEVMClient, genBlockNumber, chExitRootEventL2, chsExitRootEvent, chSynced, cfg, allNetworkIDs)
    if err != nil {
        log.Fatal(err)
    }
    if err := sy.Sync(); err != nil {
        log.Fatal(err)
    }
}
for i, l2EthermanClient := range l2Ethermans {
        log.Debug("trusted sequencer URL ", c.Etherman.L2URLs[i])
        zkEVMClient := client.NewClient(c.Etherman.L2URLs[i])
        chExitRootEventL2 := make(chan *etherman.GlobalExitRoot)
        chSyncedL2 := make(chan uint)
        chsExitRootEvent = append(chsExitRootEvent, chExitRootEventL2)
        chsSyncedL2 = append(chsSyncedL2, chSyncedL2)
        go runSynchronizer(ctx.Context, 0, bridgeController, l2EthermanClient, c.Synchronizer, storage, zkEVMClient, chExitRootEventL2, nil, chSyncedL2, []uint{})
    }
    chSynced := make(chan uint)
    go runSynchronizer(ctx.Context, c.NetworkConfig.GenBlockNumber, bridgeController, l1Etherman, c.Synchronizer, storage, nil, nil, chsExitRootEvent, chSynced, networkIDs)
func newEthermans(c *config.Config) (*etherman.Client, []*etherman.Client, error) {
    l1Etherman, err := etherman.NewClient(c.Etherman,
        c.NetworkConfig.PolygonBridgeAddress,
        c.NetworkConfig.PolygonZkEVMGlobalExitRootAddress,
        c.NetworkConfig.PolygonRollupManagerAddress)
    if err != nil {
        log.Error("L1 etherman error: ", err)
        return nil, nil, err
    }
    if len(c.L2PolygonBridgeAddresses) != len(c.Etherman.L2URLs) {
        log.Fatal("environment configuration error. zkevm bridge addresses and zkevm node urls mismatch")
    }
    var l2Ethermans []*etherman.Client
    for i, addr := range c.L2PolygonBridgeAddresses {
        l2Etherman, err := etherman.NewL2Client(c.Etherman.L2URLs[i], addr, c.NetworkConfig.L2ClaimCompressorAddress)
        if err != nil {
            log.Error("L2 etherman ", i, c.Etherman.L2URLs[i], ", error: ", err)
            return l1Etherman, nil, err
        }
        l2Ethermans = append(l2Ethermans, l2Etherman)
    }
    return l1Etherman, l2Ethermans, nil
}

对于l1Etherman networkID直接没有赋值,所以等于0

func NewClient(cfg Config, polygonBridgeAddr, polygonZkEVMGlobalExitRootAddress, polygonRollupManagerAddress common.Address) (*Client, error) {
    logger := log.WithFields("networkID", 0)
    // Connect to ethereum node
    ethClient, err := ethclient.Dial(cfg.L1URL)

对于l2Etherman,经过NewL2Client

func NewL2Client(url string, polygonBridgeAddr, claimCompressorAddress common.Address) (*Client, error) {
    // Connect to ethereum node
    ethClient, err := ethclient.Dial(url)
    if err != nil {
        log.Errorf("error connecting to %s: %+v", url, err)
        return nil, err
    }
    // Create smc clients
    bridge, err := polygonzkevmbridge.NewPolygonzkevmbridge(polygonBridgeAddr, ethClient)
    if err != nil {
        return nil, err
    }
    oldpolygonBridge, err := oldpolygonzkevmbridge.NewOldpolygonzkevmbridge(polygonBridgeAddr, ethClient)
    if err != nil {
        return nil, err
    }
    var claimCompressor *claimcompressor.Claimcompressor
    if claimCompressorAddress == (common.Address{}) {
        log.Warn("Claim compressor Address not configured")
    } else {
        log.Infof("Grouping claims allowed, claimCompressor=%s", claimCompressorAddress.String())
        claimCompressor, err = claimcompressor.NewClaimcompressor(claimCompressorAddress, ethClient)
        if err != nil {
            log.Errorf("error creating claimCompressor: %+v", err)
            return nil, err
        }
    }
    networkID, err := bridge.NetworkID(&bind.CallOpts{Pending: false}) // 从合约读取
    if err != nil {
        return nil, err
    }
    scAddresses := []common.Address{polygonBridgeAddr}
    logger := log.WithFields("networkID", networkID)

    return &Client{
        logger:           logger,
        EtherClient:      ethClient,
        PolygonBridge:    bridge,
        OldPolygonBridge: oldpolygonBridge,
        SCAddresses:      scAddresses,
        ClaimCompressor:  claimCompressor,
        NetworkID:        networkID,
    }, nil
}

对于L2 会使用对应的L2 RPC从二层桥合约读取对应的NetworkID

// Solidity: function networkID() view returns(uint32)
func (_Polygonzkevmbridge *PolygonzkevmbridgeCaller) NetworkID(opts *bind.CallOpts) (uint32, error) {
    var out []interface{}
    err := _Polygonzkevmbridge.contract.Call(opts, &out, "networkID")

    if err != nil {
        return *new(uint32), err
    }

    out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32)

    return out0, err

}

查询二层networkID,确实是0
目前的问题就是因为二层桥合约错误的设置了networkID,导致逻辑错乱,进一步分析下,二层networkID什么时候设置的,如何导致的错误
经过确认,PolygonZkEVMBridgeV2合约没有初始化

结论

经过确认和重置对比测试,该错误是因为当时部署时RPC不稳定造成的交易丢失,所以部署时尽量选择稳定的RPC以及对应链相对空闲时段部署,并且部署完成后检查一下L2的networkID是否为1

polygon cdk 跨链桥交易处理流程分析

数据产生

synchronizer/synchronizer.go

func (s *ClientSynchronizer) syncBlocks(lastBlockSynced *etherman.Block) (*etherman.Block, error) {
...
blocks, order, err := s.etherMan.GetRollupInfoByBlockRange(s.ctx, fromBlock, &toBlock)

etherMan.readEvents

func (etherMan *Client) GetRollupInfoByBlockRange(ctx context.Context, fromBlock uint64, toBlock *uint64) ([]Block, map[common.Hash][]Order, error) {
    // Filter query
    query := ethereum.FilterQuery{
        FromBlock: new(big.Int).SetUint64(fromBlock),
        Addresses: etherMan.SCAddresses,
        Topics:    [][]common.Hash{{updateGlobalExitRootSignatureHash, updateL1InfoTreeSignatureHash, depositEventSignatureHash, claimEventSignatureHash, oldClaimEventSignatureHash, newWrappedTokenEventSignatureHash, verifyBatchesTrustedAggregatorSignatureHash, rollupManagerVerifyBatchesSignatureHash}},
    }
    if toBlock != nil {
        query.ToBlock = new(big.Int).SetUint64(*toBlock)
    }
    blocks, blocksOrder, err := etherMan.readEvents(ctx, query)
    if err != nil {
        return nil, nil, err
    }
    return blocks, blocksOrder, nil
}
func (etherMan *Client) readEvents(ctx context.Context, query ethereum.FilterQuery) ([]Block, map[common.Hash][]Order, error) {
    logs, err := etherMan.EtherClient.FilterLogs(ctx, query)
    if err != nil {
        return nil, nil, err
    }
    var blocks []Block
    blocksOrder := make(map[common.Hash][]Order)
    for _, vLog := range logs {
        err := etherMan.processEvent(ctx, vLog, &blocks, &blocksOrder)
func (etherMan *Client) processEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error {
    switch vLog.Topics[0] {
    case updateGlobalExitRootSignatureHash:
        return etherMan.updateGlobalExitRootEvent(ctx, vLog, blocks, blocksOrder)
updateGlobalExitRootSignatureHash              = crypto.Keccak256Hash([]byte("UpdateGlobalExitRoot(bytes32,bytes32)"))
func (etherMan *Client) updateGlobalExitRootEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error {
    etherMan.logger.Debug("UpdateGlobalExitRoot event detected. Processing...")
    return etherMan.processUpdateGlobalExitRootEvent(ctx, vLog.Topics[1], vLog.Topics[2], vLog, blocks, blocksOrder)
}
func (etherMan *Client) processUpdateGlobalExitRootEvent(ctx context.Context, mainnetExitRoot, rollupExitRoot common.Hash, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error {
    var gExitRoot GlobalExitRoot
    gExitRoot.ExitRoots = make([]common.Hash, 0)
    gExitRoot.ExitRoots = append(gExitRoot.ExitRoots, mainnetExitRoot)
    gExitRoot.ExitRoots = append(gExitRoot.ExitRoots, rollupExitRoot)
    gExitRoot.GlobalExitRoot = hash(mainnetExitRoot, rollupExitRoot)
    gExitRoot.BlockNumber = vLog.BlockNumber

    if len(*blocks) == 0 || ((*blocks)[len(*blocks)-1].BlockHash != vLog.BlockHash || (*blocks)[len(*blocks)-1].BlockNumber != vLog.BlockNumber) {
        fullBlock, err := etherMan.EtherClient.HeaderByHash(ctx, vLog.BlockHash)
        if err != nil {
            return fmt.Errorf("error getting hashParent. BlockNumber: %d. Error: %v", vLog.BlockNumber, err)
        }
        t := time.Unix(int64(fullBlock.Time), 0)
        block := prepareBlock(vLog, t, fullBlock)
        block.GlobalExitRoots = append(block.GlobalExitRoots, gExitRoot)
        *blocks = append(*blocks, block)
    } else if (*blocks)[len(*blocks)-1].BlockHash == vLog.BlockHash && (*blocks)[len(*blocks)-1].BlockNumber == vLog.BlockNumber {
        (*blocks)[len(*blocks)-1].GlobalExitRoots = append((*blocks)[len(*blocks)-1].GlobalExitRoots, gExitRoot)
    } else {
        etherMan.logger.Error("Error processing UpdateGlobalExitRoot event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber)
        return fmt.Errorf("error processing UpdateGlobalExitRoot event")
    }
    or := Order{
        Name: GlobalExitRootsOrder,
        Pos:  len((*blocks)[len(*blocks)-1].GlobalExitRoots) - 1,
    }
    (*blocksOrder)[(*blocks)[len(*blocks)-1].BlockHash] = append((*blocksOrder)[(*blocks)[len(*blocks)-1].BlockHash], or)
    return nil
}
GlobalExitRootsOrder EventOrder = "GlobalExitRoot"
func (s *ClientSynchronizer) syncBlocks(lastBlockSynced *etherman.Block) (*etherman.Block, error) {
...
err = s.processBlockRange(blocks, order)
func (s *ClientSynchronizer) processBlockRange(blocks []etherman.Block, order map[common.Hash][]etherman.Order) error {
    ...
    for _, element := range order[blocks[i].BlockHash] {
            switch element.Name {
            case etherman.GlobalExitRootsOrder:
                isNewGer = true
                err = s.processGlobalExitRoot(blocks[i].GlobalExitRoots[element.Pos], blockID, dbTx)
func (s *ClientSynchronizer) processGlobalExitRoot(globalExitRoot etherman.GlobalExitRoot, blockID uint64, dbTx pgx.Tx) error {
    // Store GlobalExitRoot
    globalExitRoot.BlockID = blockID
    err := s.storage.AddGlobalExitRoot(s.ctx, &globalExitRoot, dbTx)
func (p *PostgresStorage) AddGlobalExitRoot(ctx context.Context, exitRoot *etherman.GlobalExitRoot, dbTx pgx.Tx) error {
    const addExitRootSQL = "INSERT INTO sync.exit_root (block_id, global_exit_root, exit_roots) VALUES ($1, $2, $3)"
    e := p.getExecQuerier(dbTx)
    _, err := e.Exec(ctx, addExitRootSQL, exitRoot.BlockID, exitRoot.GlobalExitRoot, pq.Array([][]byte{exitRoot.ExitRoots[0][:], exitRoot.ExitRoots[1][:]}))
    return err
}

数据消费

var chsExitRootEvent []chan *etherman.GlobalExitRoot
    var chsSyncedL2 []chan uint
    for i, l2EthermanClient := range l2Ethermans {
        log.Debug("trusted sequencer URL ", c.Etherman.L2URLs[i])
        zkEVMClient := client.NewClient(c.Etherman.L2URLs[i])
        chExitRootEventL2 := make(chan *etherman.GlobalExitRoot)
        chSyncedL2 := make(chan uint)
        chsExitRootEvent = append(chsExitRootEvent, chExitRootEventL2)
        chsSyncedL2 = append(chsSyncedL2, chSyncedL2)
        go runSynchronizer(ctx.Context, 0, bridgeController, l2EthermanClient, c.Synchronizer, storage, zkEVMClient, chExitRootEventL2, nil, chSyncedL2, []uint{})
    }
func runSynchronizer(ctx context.Context, genBlockNumber uint64, brdigeCtrl *bridgectrl.BridgeController, etherman *etherman.Client, cfg synchronizer.Config, storage db.Storage, zkEVMClient *client.Client, chExitRootEventL2 chan *etherman.GlobalExitRoot, chsExitRootEvent []chan *etherman.GlobalExitRoot, chSynced chan uint, allNetworkIDs []uint) {
    sy, err := synchronizer.NewSynchronizer(ctx, storage, brdigeCtrl, etherman, zkEVMClient, genBlockNumber, chExitRootEventL2, chsExitRootEvent, chSynced, cfg, allNetworkIDs)
func (s *ClientSynchronizer) processBlockRange(blocks []etherman.Block, order map[common.Hash][]etherman.Order) error {
    var isNewGer bool
    for i := range blocks {
    ...
    for _, element := range order[blocks[i].BlockHash] {
            switch element.Name {
            case etherman.GlobalExitRootsOrder:
                isNewGer = true
                err = s.processGlobalExitRoot(blocks[i].GlobalExitRoots[element.Pos], blockID, dbTx)
    ...
    if isNewGer {
        // Send latest GER stored to claimTxManager
        ger, err := s.storage.GetLatestL1SyncedExitRoot(s.ctx, nil)
        if err != nil {
            log.Errorf("networkID: %d, error getting latest GER stored on database. Error: %v", s.networkID, err)
            return err
        }
        if s.l1RollupExitRoot != ger.ExitRoots[1] {
            log.Debugf("Updating ger: %+v", ger)
            s.l1RollupExitRoot = ger.ExitRoots[1]
            for _, ch := range s.chsExitRootEvent {
                ch <- ger
            }
        }
    }
func (s *ClientSynchronizer) processGlobalExitRoot(globalExitRoot etherman.GlobalExitRoot, blockID uint64, dbTx pgx.Tx) error {
    // Store GlobalExitRoot
    globalExitRoot.BlockID = blockID
    err := s.storage.AddGlobalExitRoot(s.ctx, &globalExitRoot, dbTx)
func (p *PostgresStorage) GetLatestL1SyncedExitRoot(ctx context.Context, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) {
    var (
        ger       etherman.GlobalExitRoot
        exitRoots [][]byte
    )
    const getLatestL1SyncedExitRootSQL = "SELECT block_id, global_exit_root, exit_roots FROM sync.exit_root WHERE block_id > 0 AND network_id = 0 ORDER BY id DESC LIMIT 1"
    err := p.getExecQuerier(dbTx).QueryRow(ctx, getLatestL1SyncedExitRootSQL).Scan(&ger.BlockID, &ger.GlobalExitRoot, pq.Array(&exitRoots))
    if err != nil {
        if errors.Is(err, pgx.ErrNoRows) {
            return &ger, gerror.ErrStorageNotFound
        }
        return nil, err
    }
    ger.ExitRoots = []common.Hash{common.BytesToHash(exitRoots[0]), common.BytesToHash(exitRoots[1])}
    return &ger, nil
}
func start(ctx *cli.Context) error {
    if c.ClaimTxManager.Enabled {
        for i := 0; i < len(c.Etherman.L2URLs); i++ {
            ...
            claimTxManager, err := claimtxman.NewClaimTxManager(ctx, c.ClaimTxManager, chsExitRootEvent[i], chsSyncedL2[i],
                c.Etherman.L2URLs[i], networkIDs[i+1], c.NetworkConfig.L2PolygonBridgeAddresses[i], bridgeService, storage, rollupID, l2Ethermans[i], nonceCache, auth)
func (tm *ClaimTxManager) Start() {
        ...
        case ger = <-tm.chExitRootEvent:
            if tm.synced {
                log.Debugf("RollupID: %d UpdateDepositsStatus for ger: %s", tm.rollupID, ger.GlobalExitRoot.String())
                if tm.cfg.GroupingClaims.Enabled {
                    log.Debugf("rollupID: %d, Ger value updated and ready to be processed...", tm.rollupID)
                    continue
                }
                go func() {
                    err := tm.updateDepositsStatus(ger)
                    if err != nil {
                        log.Errorf("rollupID: %d, failed to update deposits status: %v", tm.rollupID, err)
                    }
                }()
func (tm *ClaimTxManager) updateDepositsStatus(ger *etherman.GlobalExitRoot) error {
    dbTx, err := tm.storage.BeginDBTransaction(tm.ctx)
    if err != nil {
        return err
    }
    err = tm.processDepositStatus(ger, dbTx)
func (tm *ClaimTxManager) processDepositStatus(ger *etherman.GlobalExitRoot, dbTx pgx.Tx) error {
    if ger.BlockID != 0 { // L2 exit root is updated
...
    } else { // L1 exit root is updated in the trusted state
        log.Infof("RollupID: %d, Mainnet exitroot %v is updated", tm.rollupID, ger.ExitRoots[0])
        deposits, err := tm.storage.UpdateL1DepositsStatus(tm.ctx, ger.ExitRoots[0][:], tm.l2NetworkID, dbTx)
func (p *PostgresStorage) UpdateL1DepositsStatus(ctx context.Context, exitRoot []byte, destinationNetwork uint, dbTx pgx.Tx) ([]*etherman.Deposit, error) {
    const updateDepositsStatusSQL = `UPDATE sync.deposit SET ready_for_claim = true 

合约分析

PolygonZkEVMGlobalExitRoot.sol

function updateExitRoot(bytes32 newRoot) external {
        ...
        // If it already exists, do not modify the timestamp
        if (globalExitRootMap[newGlobalExitRoot] == 0) {
            globalExitRootMap[newGlobalExitRoot] = block.timestamp;
            emit UpdateGlobalExitRoot(
                cacheLastMainnetExitRoot,
                cacheLastRollupExitRoot
            );
        }
    }

PolygonZkEVMGlobalExitRootV2.sol

function updateExitRoot(bytes32 newRoot) external {
        ...
            emit UpdateL1InfoTree(
                cacheLastMainnetExitRoot,
                cacheLastRollupExitRoot
            );

            emit UpdateL1InfoTreeV2(
                currentL1InfoRoot,
                uint32(depositCount),
                lastBlockHash,
                currentTimestmap
            );
        }
    }
func (etherMan *Client) processEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error {
    switch vLog.Topics[0] {
...
    case updateL1InfoTreeSignatureHash:
        return etherMan.updateL1InfoTreeEvent(ctx, vLog, blocks, blocksOrder)
func (etherMan *Client) updateL1InfoTreeEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error {
    etherMan.logger.Debug("UpdateL1InfoTree event detected")
    globalExitRoot, err := etherMan.PolygonZkEVMGlobalExitRoot.ParseUpdateL1InfoTree(vLog)
    if err != nil {
        return err
    }
    return etherMan.processUpdateGlobalExitRootEvent(ctx, globalExitRoot.MainnetExitRoot, globalExitRoot.RollupExitRoot, vLog, blocks, blocksOrder)
}
if isNewGer {
        // Send latest GER stored to claimTxManager
        ger, err := s.storage.GetLatestL1SyncedExitRoot(s.ctx, nil)
        if err != nil {
            log.Errorf("networkID: %d, error getting latest GER stored on database. Error: %v", s.networkID, err)
            return err
        }
        if s.l1RollupExitRoot != ger.ExitRoots[1] {
            log.Debugf("Updating ger: %+v", ger)
            s.l1RollupExitRoot = ger.ExitRoots[1]
            for _, ch := range s.chsExitRootEvent {
                ch <- ger
            }
        }
    }
func (p *PostgresStorage) GetLatestL1SyncedExitRoot(ctx context.Context, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) {
    var (
        ger       etherman.GlobalExitRoot
        exitRoots [][]byte
    )
    const getLatestL1SyncedExitRootSQL = "SELECT block_id, global_exit_root, exit_roots FROM sync.exit_root WHERE block_id > 0 AND network_id = 0 ORDER BY id DESC LIMIT 1"
    err := p.getExecQuerier(dbTx).QueryRow(ctx, getLatestL1SyncedExitRootSQL).Scan(&ger.BlockID, &ger.GlobalExitRoot, pq.Array(&exitRoots))
    if err != nil {
        if errors.Is(err, pgx.ErrNoRows) {
            return &ger, gerror.ErrStorageNotFound
        }
        return nil, err
    }
    ger.ExitRoots = []common.Hash{common.BytesToHash(exitRoots[0]), common.BytesToHash(exitRoots[1])}
    return &ger, nil
}

// TODO 继续分析

总和分析

  1. PolygonZkEVMGlobalExitRoot合约升级到V2后,UpdateGlobalExitRoot事件由UpdateL1InfoTree代替,但是桥合约内部是统一处理的,不存在兼容性
  2. 数据库查看对应数据
    SELECT block_id, global_exit_root, exit_roots FROM sync.exit_root;

    block_id | global_exit_root| exit_roots

    1 | \x304fb89608cfb05e5eac3b9e7c70df5821e03b6f8166baf4192f4d921c44f651 | {"\\x76ab2578418da24f3754ecfdef1de5ff8631ca577f998799287eee91b4aace2c","\\x0000000000000000000000000000000000000000000000000000000000000000"}
    2 | \x8c9a2fea84f58dc994dac422ab7ee94238643f976de700a25ec3c5701b5acf32 | {"\\xcc7402af30b35a4ee3d5b793cfed1678ae8529e7ffa79e75ae2e4baaa890df21","\\x0000000000000000000000000000000000000000000000000000000000000000"}
    0 | \x8c9a2fea84f58dc994dac422ab7ee94238643f976de700a25ec3c5701b5acf32 | {"\\xcc7402af30b35a4ee3d5b793cfed1678ae8529e7ffa79e75ae2e4baaa890df21","\\x0000000000000000000000000000000000000000000000000000000000000000"}

因RPC限速导致sequence-sender异常退出

问题

因RPC限速导致sequence-sender异常退出

2024-11-15T11:49:59.657Z        ERROR   sync/evmdownloader.go:248       error calling FilterLogs to eth client: 429 Too Many Requests: {"id":9,"jsonrpc":"2.0","error":{"code":-32005,"message":"rate limit exceeded"}}{"pid": 40, "version": "v0.4.0-beta5", "syncer": "l1infotreesync"}
github.com/0xPolygon/cdk/sync.(*EVMDownloaderImplementation).GetLogs
        /go/src/github.com/0xPolygon/cdk/sync/evmdownloader.go:248
github.com/0xPolygon/cdk/sync.(*EVMDownloaderImplementation).GetEventsByBlockRange
        /go/src/github.com/0xPolygon/cdk/sync/evmdownloader.go:184
github.com/0xPolygon/cdk/sync.(*EVMDownloader).Download
        /go/src/github.com/0xPolygon/cdk/sync/evmdownloader.go:97
2024/11/15 11:49:59 getLogs failed too many times (0)

相关代码

type RetryHandler struct {
    RetryAfterErrorPeriod      time.Duration
    MaxRetryAttemptsAfterError int
}

func (h *RetryHandler) Handle(funcName string, attempts int) {
    if h.MaxRetryAttemptsAfterError > -1 && attempts >= h.MaxRetryAttemptsAfterError {
        log.Fatalf(
            "%s failed too many times (%d)",
            funcName, h.MaxRetryAttemptsAfterError,
        )
    }
    time.Sleep(h.RetryAfterErrorPeriod)
}

解决

在sequence-sender 配置->L1InfoTreeSync下增加两个配置

  • MaxRetryAttemptsAfterError 总共重试多少次后,异常退出,可以设置一个较大值
  • RetryAfterErrorPeriod:每次重试的间隔时间

error getting sequences: error no leaves on L1InfoTree yet and GetInitL1InfoRootMap

错误

启动sequence-sender时报以下错误

2024-11-15T11:11:46.117Z        ERROR   sequencesender/sequencesender.go:312    error getting sequences: error no leaves on L1InfoTree yet and GetInitL1InfoRootMap fails: %!w(<nil>)   {"pid": 39, "version": "v0.4.0-beta5", "module": "sequence-sender"}
github.com/0xPolygon/cdk/sequencesender.(*SequenceSender).tryToSendSequence
        /go/src/github.com/0xPolygon/cdk/sequencesender/sequencesender.go:312
github.com/0xPolygon/cdk/sequencesender.(*SequenceSender).sequenceSending
        /go/src/github.com/0xPolygon/cdk/sequencesender/sequencesender.go:243

相关代码

// Returns CounterL1InfoRoot to use for this batch
func (t *TxBuilderBananaBase) GetCounterL1InfoRoot(ctx context.Context, highestL1IndexInBatch uint32) (uint32, error) {
    header, err := t.ethClient.HeaderByNumber(ctx, t.blockFinality)
    if err != nil {
        return 0, fmt.Errorf("error calling HeaderByNumber, with block finality %d: %w", t.blockFinality.Int64(), err)
    }
    var resL1InfoCounter uint32

    info, err := t.l1InfoTree.GetLatestInfoUntilBlock(ctx, header.Number.Uint64())
    if err == nil {
        resL1InfoCounter = info.L1InfoTreeIndex + 1
    }
    if errors.Is(err, l1infotreesync.ErrNotFound) {
        // There are no L1 Info tree leaves yet, so we can try to use L1InfoRootMap event
        l1infotreeInitial, err := t.l1InfoTree.GetInitL1InfoRootMap(ctx)
        if l1infotreeInitial == nil || err != nil {
            return 0, fmt.Errorf("error no leaves on L1InfoTree yet and GetInitL1InfoRootMap fails: %w", err)
        }
        // We use this leaf as first one
        resL1InfoCounter = l1infotreeInitial.LeafCount
    } else if err != nil {
        return 0, fmt.Errorf("error calling GetLatestInfoUntilBlock with block num %d: %w", header.Number.Uint64(), err)
    }

分析

l1InfoTree.GetInitL1InfoRootMap 没有获取到数据
GetInitL1InfoRootMap 通过事件InitL1InfoRootMap进行写入的

if event.InitL1InfoRootMap != nil {
            log.Debugf("handle InitL1InfoRootMap event %s", event.InitL1InfoRootMap.String())
            err = processEventInitL1InfoRootMap(tx, block.Num, event.InitL1InfoRootMap)
            if err != nil {
                err = fmt.Errorf("initL1InfoRootMap. Err: %w", err)
                log.Errorf("error processing InitL1InfoRootMap: %v", err)
                return err
            }
        }

对应合约PolygonZkEVMGlobalExitRootV2.sol->emit InitL1InfoRootMap

 function initialize() external virtual initializer {
        // Get the current historic root
        bytes32 currentL1InfoRoot = getRoot();

        // Store L1InfoRoot
        l1InfoRootMap[uint32(depositCount)] = currentL1InfoRoot;

        emit InitL1InfoRootMap(uint32(depositCount), currentL1InfoRoot);
    }

继续排查,看链是否代码没有执行对应逻辑
l1infotreesync/processor.go

func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error {
    ...
        if event.InitL1InfoRootMap != nil {
            log.Debugf("handle InitL1InfoRootMap event %s", event.InitL1InfoRootMap.String())
            err = processEventInitL1InfoRootMap(tx, block.Num, event.InitL1InfoRootMap)
            if err != nil {
                err = fmt.Errorf("initL1InfoRootMap. Err: %w", err)
                log.Errorf("error processing InitL1InfoRootMap: %v", err)
                return err
            }
        }
func (d *EVMDriver) handleNewBlock(ctx context.Context, b EVMBlock) {
    ...
    err := d.processor.ProcessBlock(ctx, blockToProcess)
2024-11-16T01:17:02.244Z        DEBUG   sync/evmdriver.go:100   handleNewBlock blockNum: 7085616 blockHash: 0x72c30608c656bfd164e1e5836db12d6b36ad2f7ddbd5e26557ea18fcdce98d94  {"pid": 40, "version": "v0.4.0-beta5", "syncer": "l1infotreesync"}
2024-11-16T01:17:02.244Z        INFO    l1infotreesync/processor.go:343 block 7085616 processed with 0 events   {"pid": 40, "version": "v0.4.0-beta5"}

执行逻辑中已经包含 event.InitL1InfoRootMap

继续排查

排查两个角度

  1. 合约中并未发出
  2. 区块高度有错误
    由于事件是由合约PolygonZkEVMGlobalExitRootV2.sol发出,对应的合约地址为deploy_output.json->polygonZkEVMGlobalExitRootAddress
    查看当前地址
    https://sepolia.etherscan.io/tx/0xc3b16bdaa7054689230e976e20badcb65c92ad2a623ea89b1b33147a1fc0f227#eventlog

    高度:7085224
    合约event.InitL1InfoRootMap已发出,排除问题1

继续看下配置的高度deploy_output.json->deploymentRollupManagerBlockNumber
https://sepolia.etherscan.io/tx/0x03f41442db2e20f2551436df6ec607be307c89e7b05fe32fc462fceb225ce24a
高度:7085226

问题汇总

当前高度没有包含PolygonZkEVMGlobalExitRootV2->initialize 高度,所以导致事件event.InitL1InfoRootMap没有扫描到,从而导致GetInitL1InfoRootMap数据没有本地存储,最终导致sequence-sender报错

解决方式

查询deploy_output.json->polygonZkEVMGlobalExitRootAddress合约地址交易对应的高度,并更新到sequence-sender配置
L1InfoTreeSync->InitialBlock

https://sepolia.etherscan.io/address/0xEEc7988853B40B65FaBB3E9A3393E093D5710515