您正在查看: Layer2 分类下的文章

op-stack + zk + DA 方案测试 TODO

空余看SP1,顺便想了下,目前主流的几个L2,都有些不足?

  • zk类的 prover 运行成本高,或者 EVM兼容性,RPC 性能低
  • op 类的,有个7天
  • 必要的DA支持,降低提交成本

zkSync,Polygon cdk,arbitrum, op-stack 等主流的项目代码大部分都看过了
从代码实现,Evm兼容性,代码架构,稳定性,功能定制复杂度,op-stack 是最合适的?

目前各个方案综合来看,最优的组合是 op-stack + zk + DA ?

方案组成调研

TODO

  1. op-succinct的部署,prover 实际机器成本,产出性能
  2. EigenDA私有化部署

op-succinct 代码分析- proposer succinct server

代码分析

proposer/succinct/bin/server.rs

let app = Router::new()
    .route("/request_span_proof", post(request_span_proof)) // 请求对一系列区块的证明。
    .route("/request_agg_proof", post(request_agg_proof)) // 请求一组子证明的聚合证明。
    .route("/request_mock_span_proof", post(request_mock_span_proof)) // 请求对一系列区块的Mock证明。
    .route("/request_mock_agg_proof", post(request_mock_agg_proof))// 请求一组子证明的Mock聚合证明。
    .route("/status/:proof_id", get(get_proof_status)) // 获取证明的状态。
    .route("/validate_config", post(validate_config)) // 验证 L2 输出 Oracle 的配置
    .layer(DefaultBodyLimit::disable())
    .layer(RequestBodyLimitLayer::new(102400 * 1024 * 1024))
    .with_state(global_hashes);

let port = env::var("PORT").unwrap_or_else(|_| "3000".to_string());
let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", port))
.await
.unwrap();

info!("Server listening on {}", listener.local_addr().unwrap());

request_span_proof

async fn request_span_proof(
    State(state): State<ContractConfig>,
    Json(payload): Json<SpanProofRequest>,
) -> Result<(StatusCode, Json<ProofResponse>), AppError> {
    info!("Received span proof request: {:?}", payload);
    let fetcher = match OPSuccinctDataFetcher::new_with_rollup_config(RunContext::Docker).await { // get_rpcs() 通过环境变量 L1_RPC L1_BEACON_RPC L2_RPC L2_NODE_RPC 获取相应配置,并通过fetch_and_save_rollup_config 获取L2的rollup_config
        Ok(f) => f,
        Err(e) => {
            error!("Failed to create data fetcher: {}", e);
            return Err(AppError(e));
        }
    };

    let host_cli = match fetcher
        .get_host_cli_args( // 获取给定块号的 L2 输出数据,并将启动信息保存到数据目录中带有 block_number 的文件中。返回要传递给 datagen 的本机主机的参数。
            payload.start,
            payload.end,
            ProgramType::Multi,
            CacheMode::DeleteCache,
        )
        .await
    {
        Ok(cli) => cli,
        Err(e) => {
            error!("Failed to get host CLI args: {}", e);
            return Err(AppError(anyhow::anyhow!(
                "Failed to get host CLI args: {}",
                e
            )));
        }
    };

    // 启动服务器和本机客户端并设置超时时间。
    // 注意:理想情况下,服务器应调用执行本机的单独进程
    // 主机,并返回客户端可以轮询以检查证明是否已提交的 ID。
    let mut witnessgen_executor = WitnessGenExecutor::new(WITNESSGEN_TIMEOUT, RunContext::Docker);
    if let Err(e) = witnessgen_executor.spawn_witnessgen(&host_cli).await { // 为给定的主机 CLI 生成见证生成进程,并将其添加到正在进行的进程列表中。
        error!("Failed to spawn witness generation: {}", e);
        return Err(AppError(anyhow::anyhow!(
            "Failed to spawn witness generation: {}",
            e
        )));
    }
    // 记录运行见证生成过程时出现的任何错误。
    if let Err(e) = witnessgen_executor.flush().await {
        error!("Failed to generate witness: {}", e);
        return Err(AppError(anyhow::anyhow!(
            "Failed to generate witness: {}",
            e
        )));
    }

    let sp1_stdin = match get_proof_stdin(&host_cli) { // 获取标准输入来为给定的 L2 声明生成证明。
        Ok(stdin) => stdin,
        Err(e) => {
            error!("Failed to get proof stdin: {}", e);
            return Err(AppError(anyhow::anyhow!(
                "Failed to get proof stdin: {}",
                e
            )));
        }
    };

    let private_key = match env::var("SP1_PRIVATE_KEY") {
        Ok(private_key) => private_key,
        Err(e) => {
            error!("Failed to get SP1 private key: {}", e);
            return Err(AppError(anyhow::anyhow!(
                "Failed to get SP1 private key: {}",
                e
            )));
        }
    };
    let rpc_url = match env::var("PROVER_NETWORK_RPC") {
        Ok(rpc_url) => rpc_url,
        Err(e) => {
            error!("Failed to get PROVER_NETWORK_RPC: {}", e);
            return Err(AppError(anyhow::anyhow!(
                "Failed to get PROVER_NETWORK_RPC: {}",
                e
            )));
        }
    };
    let mut prover = NetworkProverV2::new(&private_key, Some(rpc_url.to_string()), false); // 根据private key和rpc 创建prover
    // 使用预留策略路由到特定集群。
    prover.with_strategy(FulfillmentStrategy::Reserved);

    // 由于范围证明很大,因此将模拟设置为 false。
    env::set_var("SKIP_SIMULATION", "true");
    let vk_hash = match prover.register_program(&state.range_vk, RANGE_ELF).await { // 注册elf/range-elf
        Ok(vk_hash) => vk_hash,
        Err(e) => {
            error!("Failed to register program: {}", e);
            return Err(AppError(anyhow::anyhow!(
                "Failed to register program: {}",
                e
            )));
        }
    };
    let proof_id = match prover
        .request_proof( // 向证明者网络请求证明,并返回请求 ID。
            &vk_hash,
            &sp1_stdin,
            ProofMode::Compressed, // 压缩证明模式。还支持 Core,Plonk,Groth16
            1_000_000_000_000,
            None,
        )
        .await
    {
        Ok(proof_id) => proof_id,
        Err(e) => {
            error!("Failed to request proof: {}", e);
            return Err(AppError(anyhow::anyhow!("Failed to request proof: {}", e)));
        }
    };
    env::set_var("SKIP_SIMULATION", "false");

    Ok((StatusCode::OK, Json(ProofResponse { proof_id })))
}
pub async fn request_proof(
        &self,
        vk_hash: &[u8],
        stdin: &SP1Stdin,
        mode: ProofMode,
        cycle_limit: u64,
        timeout: Option<Duration>,
    ) -> Result<Vec<u8>> {
        // Get the timeout.
        let timeout_secs = timeout.map(|dur| dur.as_secs()).unwrap_or(TIMEOUT_SECS);

        log::info!("Requesting proof with cycle limit: {}", cycle_limit);

        // Request the proof with retries.
        let response = with_retry(
            || async {
                self.client
                    .request_proof(
                        vk_hash,
                        stdin,
                        mode,
                        SP1_CIRCUIT_VERSION,
                        self.strategy,
                        timeout_secs,
                        cycle_limit,
                    )
                    .await
            },
            timeout,
            "requesting proof",
        )
        .await?;

        // 记录请求 ID 和交易哈希。
        let tx_hash_hex = "0x".to_string() + &hex::encode(response.tx_hash);
        let request_id = response.body.unwrap().request_id;
        let request_id_hex = "0x".to_string() + &hex::encode(request_id.clone());
        log::info!("Created request {} in transaction {}", request_id_hex, tx_hash_hex);

        if self.client.rpc_url() == DEFAULT_PROVER_NETWORK_RPC { // "https://rpc.production.succinct.tools/"
            log::info!("View in explorer: https://network.succinct.xyz/request/{}", request_id_hex);
        }

        Ok(request_id)
    }
// 使用给定的验证密钥哈希和标准输入创建证明请求。
pub async fn request_proof(
        &self,
        vk_hash: &[u8],
        stdin: &SP1Stdin,
        mode: ProofMode,
        version: &str,
        strategy: FulfillmentStrategy,
        timeout_secs: u64,
        cycle_limit: u64,
    ) -> Result<RequestProofResponse> {
        // 计算截止期限。
        let start = SystemTime::now();
        let since_the_epoch = start.duration_since(UNIX_EPOCH).expect("Invalid start time");
        let deadline = since_the_epoch.as_secs() + timeout_secs;

        // 创建 stdin artifact.
        let mut store = self.get_store().await?;
        let stdin_uri = self.create_artifact_with_content(&mut store, &stdin).await?;

        // 发送请求
        let mut rpc = self.get_rpc().await?; // 获取 ProverNetwork rpc
        let nonce = self.get_nonce().await?; // 从ProverNetwork中获取nonce
        let request_body = RequestProofRequestBody { // 组装请求体
            nonce,
            version: format!("sp1-{}", version),
            vk_hash: vk_hash.to_vec(),
            mode: mode.into(),
            strategy: strategy.into(),
            stdin_uri,
            deadline,
            cycle_limit,
        };
        let request_response = rpc
            .request_proof(RequestProofRequest { // 调用的为sp1客户端服务 /network.ProverNetwork/RequestProof
                format: MessageFormat::Binary.into(),
                signature: request_body.sign(&self.signer).into(), // 使用私钥进行签名
                body: Some(request_body),
            })
            .await?
            .into_inner();

        Ok(request_response)
    }

https://github.com/succinctlabs/sp1/blob/bfb0c6d8e045b5f40422b9c06cb0e9ee21b3c19c/crates/sdk/src/network/proto/network.rs#L3

// This file is @generated by prost-build.
#[derive(serde::Serialize, serde::Deserialize, Clone, PartialEq, ::prost::Message)]
pub struct RequestProofRequest {
    /// The message format of the body.
    #[prost(enumeration = "MessageFormat", tag = "1")]
    pub format: i32,
    /// The signature of the sender.
    #[prost(bytes = "vec", tag = "2")]
    pub signature: ::prost::alloc::vec::Vec<u8>,
    /// The body of the request.
    #[prost(message, optional, tag = "3")]
    pub body: ::core::option::Option<RequestProofRequestBody>,
}

具体SP1服务相关逻辑,后续文章单独分析

request_agg_proof

// 请求一组子证明的聚合证明。

async fn request_agg_proof(
    State(state): State<ContractConfig>,
    Json(payload): Json<AggProofRequest>,
) -> Result<(StatusCode, Json<ProofResponse>), AppError> {
    info!("Received agg proof request");
    let mut proofs_with_pv: Vec<SP1ProofWithPublicValues> = payload
        .subproofs
        .iter()
        .map(|sp| bincode::deserialize(sp).unwrap())
        .collect();

    let boot_infos: Vec<BootInfoStruct> = proofs_with_pv
        .iter_mut()
        .map(|proof| proof.public_values.read())
        .collect();

    let proofs: Vec<SP1Proof> = proofs_with_pv // 从payload中获取多个证明
        .iter_mut()
        .map(|proof| proof.proof.clone())
        .collect();

    let l1_head_bytes = hex::decode(
        payload
            .head
            .strip_prefix("0x")
            .expect("Invalid L1 head, no 0x prefix."),
    )?;
    let l1_head: [u8; 32] = l1_head_bytes.try_into().unwrap();

    let fetcher = match OPSuccinctDataFetcher::new_with_rollup_config(RunContext::Docker).await {
        Ok(f) => f,
        Err(e) => return Err(AppError(anyhow::anyhow!("Failed to create fetcher: {}", e))),
    };

    let headers = match fetcher
        .get_header_preimages(&boot_infos, l1_head.into()) // 获取与启动信息对应的标头的原映像。具体来说,获取与启动信息和最新的 L1 头对应的标头。 通过get_earliest_l1_head_in_batch从boot_infos中获取最早的L1 Head 作为start, get_l1_header 获取最新的 L1 Head(在链上验证)的完整标头, 通过fetch_headers_in_range获取start到end多个区块的headers
        .await
    {
        Ok(h) => h,
        Err(e) => {
            error!("Failed to get header preimages: {}", e);
            return Err(AppError(anyhow::anyhow!(
                "Failed to get header preimages: {}",
                e
            )));
        }
    };

    let private_key = env::var("SP1_PRIVATE_KEY")?;
    let rpc_url = env::var("PROVER_NETWORK_RPC")?;
    let mut prover = NetworkProverV2::new(&private_key, Some(rpc_url.to_string()), false);
    // 使用预留策略路由到特定集群。
    prover.with_strategy(FulfillmentStrategy::Reserved);

    let stdin =
        match get_agg_proof_stdin(proofs, boot_infos, headers, &state.range_vk, l1_head.into()) { // 获取聚合证明的标准输入。
            Ok(s) => s,
            Err(e) => {
                error!("Failed to get agg proof stdin: {}", e);
                return Err(AppError(anyhow::anyhow!(
                    "Failed to get agg proof stdin: {}",
                    e
                )));
            }
        };

    let vk_hash = match prover.register_program(&state.agg_vk, AGG_ELF).await { // 注册elf/aggregation-elf
        Ok(vk_hash) => vk_hash,
        Err(e) => {
            error!("Failed to register program: {}", e);
            return Err(AppError(anyhow::anyhow!(
                "Failed to register program: {}",
                e
            )));
        }
    };
    let proof_id = match prover
        .request_proof( // 使用给定的验证密钥哈希和标准输入创建证明请求。具体看上面的request_span_proof->request_proof
            &vk_hash,
            &stdin,
            ProofMode::Groth16, // 和request_span_proof有区别,ProofMode::Compressed,
            1_000_000_000_000,
            None,
        )
        .await
    {
        Ok(id) => id,
        Err(e) => {
            error!("Failed to request proof: {}", e);
            return Err(AppError(anyhow::anyhow!("Failed to request proof: {}", e)));
        }
    };

    Ok((StatusCode::OK, Json(ProofResponse { proof_id })))
}
pub fn get_agg_proof_stdin(
    proofs: Vec<SP1Proof>,
    boot_infos: Vec<BootInfoStruct>,
    headers: Vec<Header>,
    multi_block_vkey: &sp1_sdk::SP1VerifyingKey,
    latest_checkpoint_head: B256,
) -> Result<SP1Stdin> {
    let mut stdin = SP1Stdin::new();
    for proof in proofs {
        let SP1Proof::Compressed(compressed_proof) = proof else {
            panic!();
        };
        stdin.write_proof(*compressed_proof, multi_block_vkey.vk.clone());
    }

    // 将聚合输入写入标准输入。
    stdin.write(&AggregationInputs {
        boot_infos,
        latest_l1_checkpoint_head: latest_checkpoint_head,
        multi_block_vkey: multi_block_vkey.hash_u32(),
    });
    // Head在使用 bincode 序列化时存在问题,因此请改用 serde_json。
    let headers_bytes = serde_cbor::to_vec(&headers).unwrap();
    stdin.write_vec(headers_bytes);

    Ok(stdin)
}

总结

  • request_span_proof:通过get_proof_stdin获取证明所需参数,使用ProofMode::Compressed压缩证明模式,通过request_proof向sp1服务申请生成proof
  • request_agg_proof: 将获取到的多个proof,使用ProofMode::Groth16证明模式,通过request_proof向sp1服务申请生成聚合proof

op-succinct 代码分析- proposer

代码分析

1. 启动

proposer/op/proposer/service.go

func (ps *ProposerService) Start(_ context.Context) error {
    ps.Log.Info("Starting Proposer")
    return ps.driver.StartL2OutputSubmitting()
}

proposer/op/proposer/driver.go

func (l *L2OutputSubmitter) StartL2OutputSubmitting() error {
    ...
    // 当使用缓存数据库重新启动提议者时,我们需要将处于见证生成状态的所有证明标记为失败,然后重试。
    witnessGenReqs, err := l.db.GetAllProofsWithStatus(proofrequest.StatusWITNESSGEN)
    if err != nil {
        return fmt.Errorf("failed to get witness generation pending proofs: %w", err)
    }
    for _, req := range witnessGenReqs {
        err = l.RetryRequest(req, ProofStatusResponse{})
        if err != nil {
            return fmt.Errorf("failed to retry request: %w", err)
        }
    }

    // 验证合约的聚合和范围验证密钥的配置以及汇总配置哈希。
    err = l.ValidateConfig(l.Cfg.L2OutputOracleAddr.Hex()) // 向 l.Cfg.OPSuccinctServerUrl+"/validate_config" 配置的proposer/succinct/bin/server.rs 去请求数据
    ...
    go l.loop() // loop 负责创建和提交下一个输出

2. 循环检查和提交

proposer/op/proposer/driver.go

// loopL2OO 定期轮询 L2OO 以提出下一个区块,如果当前最终确定(或安全)的区块超过了下一个区块,则它会提出该区块。
func (l *L2OutputSubmitter) loopL2OO(ctx context.Context) {
    ticker := time.NewTicker(l.Cfg.PollInterval) // 检查间隔
    for {
        select {
        case <-ticker.C:
            // 获取提议者的当前指标。
            metrics, err := l.GetProposerMetrics(ctx)
            if err != nil {
                l.Log.Error("failed to get metrics", "err", err)
                continue
            }
            l.Log.Info("Proposer status", "metrics", metrics)

            // 1) 将准备好进行证明的范围证明放入队列中。根据最新的 L2 最终区块和当前的 L2 不安全头来确定这些范围证明。
            l.Log.Info("Stage 1: Getting Range Proof Boundaries...")
            err = l.GetRangeProofBoundaries(ctx)
            if err != nil {
                l.Log.Error("failed to get range proof boundaries", "err", err)
                continue
            }

            // 2) 检查 PROVING 请求的状态。如果成功返回,我们将验证其是否已保存在磁盘上,并将状态设置为“COMPLETE”。如果失败或超时,我们将状态设置为“FAILED”(如果是跨度证明,则将请求分成两半以重试)。
            l.Log.Info("Stage 2: Processing PROVING requests...")
            err = l.ProcessProvingRequests()
            if err != nil {
                l.Log.Error("failed to update PROVING requests", "err", err)
                continue
            }

            // 3) 检查 WITNESSGEN 请求的状态。如果见证生成请求处于 WITNESSGEN 状态的时间超过超时时间,则将状态设置为 FAILED 并重试。
            l.Log.Info("Stage 3: Processing WITNESSGEN requests...")
            err = l.ProcessWitnessgenRequests()
            if err != nil {
                l.Log.Error("failed to update WITNESSGEN requests", "err", err)
                continue
            }

            // 4) 确定从 L2OO 合约上的最新区块开始,是否存在连续的跨度证明链。如果有,则为所有跨度证明排队一个聚合证明。
            l.Log.Info("Stage 3: Deriving Agg Proofs...")
            err = l.DeriveAggProofs(ctx)
            if err != nil {
                l.Log.Error("failed to generate pending agg proofs", "err", err)
                continue
            }

            // 5) 从证明者网络请求所有未请求的证明。任何状态为“UNREQ”的数据库条目都表示它已排队并准备就绪。我们从证明者网络请求所有这些(span 和 agg)。对于 agg 证明,我们还会提前检查区块哈希。
            l.Log.Info("Stage 4: Requesting Queued Proofs...")
            err = l.RequestQueuedProofs(ctx)
            if err != nil {
                l.Log.Error("failed to request unrequested proofs", "err", err)
                continue
            }

            // 6) 在链上提交聚合证明。如果我们在数据库中有一个完整的聚合证明等待处理,我们会将其提交到链上。
            l.Log.Info("Stage 5: Submitting Agg Proofs...")
            err = l.SubmitAggProofs(ctx)
            if err != nil {
                l.Log.Error("failed to submit agg proofs", "err", err)
            }

2.1 GetRangeProofBoundaries

proposer/op/proposer/range.go

// 将准备好进行证明的范围证明放入队列中。根据最新的 L2 最终区块和当前的 L2 不安全头来确定这些范围证明。
func (l *L2OutputSubmitter) GetRangeProofBoundaries(ctx context.Context) error {
    // nextBlock 等于 DB 的 `EndBlock` 列中的最高值加 1。
    latestL2EndBlock, err := l.db.GetLatestEndBlock()
    if err != nil {
        if ent.IsNotFound(err) {
            latestEndBlockU256, err := l.l2ooContract.LatestBlockNumber(&bind.CallOpts{Context: ctx}) // 如果本地没有记录,则从合约读取
            if err != nil {
                return fmt.Errorf("failed to get latest output index: %w", err)
            } else {
                latestL2EndBlock = latestEndBlockU256.Uint64()
            }
        } else {
            l.Log.Error("failed to get latest end requested", "err", err)
            return err
        }
    }
    newL2StartBlock := latestL2EndBlock

    rollupClient, err := dial.DialRollupClientWithTimeout(ctx, dial.DefaultDialTimeout, l.Log, l.Cfg.RollupRpc) // 连接L2
    if err != nil {
        return err
    }

    // 获取最新的最终确定的 L2 区块。
    status, err := rollupClient.SyncStatus(ctx) // optimism_syncStatus
    if err != nil {
        l.Log.Error("proposer unable to get sync status", "err", err)
        return err
    }
    // 注意:最初,这使用的是 L1 最终区块。但为了满足新 API,我们现在使用 L2 最终区块。
    newL2EndBlock := status.FinalizedL2.Number

    spans := l.SplitRangeBasic(newL2StartBlock, newL2EndBlock)

    // 将每个跨度添加到数据库。如果没有跨度,我们将不会创建任何证明。
    for _, span := range spans {
        err := l.db.NewEntry(proofrequest.TypeSPAN, span.Start, span.End)
        l.Log.Info("New range proof request.", "start", span.Start, "end", span.End)
        if err != nil {
            l.Log.Error("failed to add span to db", "err", err)
            return err
        }
    }

    return nil
}
// CreateSpans 创建一个从开始到结束大小为 MaxBlockRangePerSpanProof 的跨度列表。注意:跨度 i 的结束 = 跨度 i+1 的开始。
func (l *L2OutputSubmitter) SplitRangeBasic(start, end uint64) []Span {
    spans := []Span{}
    // 从开始到结束创建大小为 MaxBlockRangePerSpanProof 的跨度。每个跨度都从前一个跨度结束的地方开始。继续,直到我们在到达终点之前无法再容纳另一个完整的跨度。
    for i := start; i+l.Cfg.MaxBlockRangePerSpanProof <= end; i += l.Cfg.MaxBlockRangePerSpanProof {
        spans = append(spans, Span{Start: i, End: i + l.Cfg.MaxBlockRangePerSpanProof})
    }
    return spans
}
总结

GetRangeProofBoundaries 根据初始高度(本地记录优先,否则查找合约记录)和当前L2最终区块高度,根据设置参数MaxBlockRangePerSpanProof拆分跨度区块,创建对应的Proof任务

2.2 ProcessProvingRequests

proposer/op/proposer/prove.go

// 检查 PROVING 请求的状态。如果成功返回,我们将验证其是否已保存在磁盘上,并将状态设置为“COMPLETE”。如果失败或超时,我们将状态设置为“FAILED”(如果是跨度证明,则将请求分成两半,以重试)。
func (l *L2OutputSubmitter) ProcessProvingRequests() error {
    reqs, err := l.db.GetAllProofsWithStatus(proofrequest.StatusPROVING) // 获取所有当前处于 PROVING 状态的证明请求
    if err != nil {
        return err
    }
    for _, req := range reqs {
        proofStatus, err := l.GetProofStatus(req.ProverRequestID)
        if err != nil {
            l.Log.Error("failed to get proof status for ID", "id", req.ProverRequestID, "err", err)
            l.Metr.RecordError("get_proof_status", 1) // 记录获取证明状态调用的错误。
            return err
        }
        if proofStatus.FulfillmentStatus == SP1FulfillmentStatusFulfilled {
            l.Log.Info("Fulfilled Proof", "id", req.ProverRequestID)
            err = l.db.AddFulfilledProof(req.ID, proofStatus.Proof) // 更新数据库中的证明并将状态更新为完成。
            if err != nil {
                l.Log.Error("failed to update completed proof status", "err", err)
                return err
            }
            continue
        }

        if proofStatus.FulfillmentStatus == SP1FulfillmentStatusUnfulfillable {
            // 记录失败原因。
            l.Log.Info("Proof is unfulfillable", "id", req.ProverRequestID)
            l.Metr.RecordProveFailure("unfulfillable")

            err = l.RetryRequest(req, proofStatus) // 如果为区间,则拆分成2个
            if err != nil {
                return fmt.Errorf("failed to retry request: %w", err)
            }
        }
    }

    return nil
}
func (l *L2OutputSubmitter) RetryRequest(req *ent.ProofRequest, status ProofStatusResponse) error {
    err := l.db.UpdateProofStatus(req.ID, proofrequest.StatusFAILED)
    if err != nil {
        l.Log.Error("failed to update proof status", "err", err)
        return err
    }

    // // 如果出现执行错误,且请求是 SPAN 证明,且区块范围 > 1,则将请求拆分为两个请求。这可能是由于 SP1 OOM 造成的,因为区块范围较大且交易较多。
    // TODO:一旦使用嵌入式分配器,就可以删除此解决方案,因为这样程序就永远不会出现 OOM。
    if req.Type == proofrequest.TypeSPAN && status.ExecutionStatus == SP1ExecutionStatusUnexecutable && req.EndBlock-req.StartBlock > 1 {
        // 将请求拆分为两个请求。
        midBlock := (req.StartBlock + req.EndBlock) / 2
        err = l.db.NewEntry(req.Type, req.StartBlock, midBlock)
        if err != nil {
            l.Log.Error("failed to retry first half of proof request", "err", err)
            return err
        }
        err = l.db.NewEntry(req.Type, midBlock+1, req.EndBlock)
        if err != nil {
            l.Log.Error("failed to retry second half of proof request", "err", err)
            return err
        }
    } else {
        // 重试同一请求。
        err = l.db.NewEntry(req.Type, req.StartBlock, req.EndBlock)
        if err != nil {
            l.Log.Error("failed to retry proof request", "err", err)
            return err
        }
    }

    return nil
}
总结

ProcessProvingRequests 根据本地db记录,依次向服务端l.Cfg.OPSuccinctServerUrl+"/status/"+proofId获取所有当前处于PROVING的任务最新状态,如果SP1已生成完成,则更新本地数据为完成,如果失败则进行重试(如果为区间,则拆分成2个)

2.3 ProcessWitnessgenRequests

proposer/op/proposer/prove.go

func (l *L2OutputSubmitter) ProcessWitnessgenRequests() error {
    // 获取当前处于 WITNESSGEN 状态的所有证明请求。
    reqs, err := l.db.GetAllProofsWithStatus(proofrequest.StatusWITNESSGEN)
    if err != nil {
        return err
    }
    for _, req := range reqs {
        // 如果请求处于 WITNESSGEN 状态的时间超过超时时间(20分钟),则将状态设置为 FAILED。
        if req.LastUpdatedTime+uint64(WITNESSGEN_TIMEOUT.Seconds()) < uint64(time.Now().Unix()) {
            l.RetryRequest(req, ProofStatusResponse{}) // 如果超时,重试请求
        }
    }

    return nil
}

2.4 DeriveAggProofs

proposer/op/proposer/prove.go

// 使用 L2OO 合约查找下一个证明必须覆盖的区块范围。检查数据库以查看我们是否有足够的跨度证明来请求覆盖此范围的聚合证明。如果是,则将聚合证明排队在数据库中以供稍后请求。
func (l *L2OutputSubmitter) DeriveAggProofs(ctx context.Context) error {
    latest, err := l.l2ooContract.LatestBlockNumber(&bind.CallOpts{Context: ctx})
    if err != nil {
        return fmt.Errorf("failed to get latest L2OO output: %w", err)
    }

    // 这将获取下一个块号,即 currentBlock + submissionInterval。
    minTo, err := l.l2ooContract.NextBlockNumber(&bind.CallOpts{Context: ctx})
    if err != nil {
        return fmt.Errorf("failed to get next L2OO output: %w", err)
    }

    created, end, err := l.db.TryCreateAggProofFromSpanProofs(latest.Uint64(), minTo.Uint64()) // 尝试从覆盖范围 [from, minTo) 的跨度证明中创建 AGG 证明。 如果创建了新的 AGG 证明,则返回 true,否则返回 false。
    if err != nil {
        return fmt.Errorf("failed to create agg proof from span proofs: %w", err)
    }
    if created {
        l.Log.Info("created new AGG proof", "from", latest.Uint64(), "to", end)
    }

    return nil
}

2.5 RequestQueuedProofs

// proposer/op/proposer/driver.go

// 从证明者网络请求所有未请求的证明。任何状态为“UNREQ”的数据库条目都表示它已排队并准备就绪。我们从证明者网络请求所有这些(span 和 agg)。对于 agg 证明,我们还会提前检查区块哈希。
func (l *L2OutputSubmitter) RequestQueuedProofs(ctx context.Context) error {
    nextProofToRequest, err := l.db.GetNextUnrequestedProof()
    if err != nil {
        return fmt.Errorf("failed to get unrequested proofs: %w", err)
    }
    if nextProofToRequest == nil {
        return nil
    }

    if nextProofToRequest.Type == proofrequest.TypeAGG {
        if nextProofToRequest.L1BlockHash == "" {
            blockNumber, blockHash, err := l.checkpointBlockHash(ctx) // 获取L1最新区块-1的head, 并把区块高度通过sendCheckpointTransaction方法发送交易,l.Cfg.L2OutputOracleAddr->checkpointBlockHash 写入合约
            if err != nil {
                l.Log.Error("failed to checkpoint block hash", "err", err)
                return err
            }
            nextProofToRequest, err = l.db.AddL1BlockInfoToAggRequest(nextProofToRequest.StartBlock, nextProofToRequest.EndBlock, blockNumber, blockHash.Hex()) // 创建新Proof任务
            if err != nil {
                l.Log.Error("failed to add L1 block info to AGG request", "err", err)
            }

            // 等待下一次循环,这样我们就有了添加了块信息的版本
            return nil
        } else {
            l.Log.Info("found agg proof with already checkpointed l1 block info")
        }
    } else {
        witnessGenProofs, err := l.db.GetNumberOfRequestsWithStatuses(proofrequest.StatusWITNESSGEN)
        if err != nil {
            return fmt.Errorf("failed to count witnessgen proofs: %w", err)
        }
        provingProofs, err := l.db.GetNumberOfRequestsWithStatuses(proofrequest.StatusPROVING)
        if err != nil {
            return fmt.Errorf("failed to count proving proofs: %w", err)
        }

        // 见证生成请求的数量上限为 MAX_CONCURRENT_WITNESS_GEN。这可以防止见证生成服务器产生的进程使机器过载。一旦 https://github.com/anton-rs/kona/issues/553 修复,我们可能就可以删除此检查。
        if witnessGenProofs >= MAX_CONCURRENT_WITNESS_GEN {
            l.Log.Info("max witness generation reached, waiting for next cycle")
            return nil
        }

        // 并发证明的总数上限为 MAX_CONCURRENT_PROOF_REQUESTS。
        if (witnessGenProofs + provingProofs) >= int(l.Cfg.MaxConcurrentProofRequests) {
            l.Log.Info("max concurrent proof requests reached, waiting for next cycle")
            return nil
        }
    }
    go func(p ent.ProofRequest) {
        l.Log.Info("requesting proof from server", "type", p.Type, "start", p.StartBlock, "end", p.EndBlock, "id", p.ID)
        // 将证明状态设置为 WITNESSGEN。
        err = l.db.UpdateProofStatus(p.ID, proofrequest.StatusWITNESSGEN)
        if err != nil {
            l.Log.Error("failed to update proof status", "err", err)
            return
        }

        // 根据模拟配置请求证明类型。
        err = l.RequestProof(p, l.Cfg.Mock)
        if err != nil {
            // 如果证明请求失败,我们应该将其添加到队列中以待重试。
            err = l.RetryRequest(nextProofToRequest, ProofStatusResponse{})
            if err != nil {
                l.Log.Error("failed to retry request", "err", err)
            }

        }
    }(*nextProofToRequest)

    return nil
}
// RequestProof 处理模拟和真实证明请求
func (l *L2OutputSubmitter) RequestProof(p ent.ProofRequest, isMock bool) error {
    jsonBody, err := l.prepareProofRequest(p)
    if err != nil {
        return err
    }

    if isMock { // 开启Mock
        proofData, err := l.requestMockProof(p.Type, jsonBody)
        if err != nil {
            return fmt.Errorf("mock proof request failed: %w", err)
        }

        // 对于模拟证明,一旦生成了“模拟证明”,就将状态设置为 PROVING。AddFulfilledProof 期望证明处于 PROVING 状态。
        err = l.db.UpdateProofStatus(p.ID, proofrequest.StatusPROVING)
        if err != nil {
            return fmt.Errorf("failed to set proof status to proving: %w", err)
        }
        return l.db.AddFulfilledProof(p.ID, proofData)
    }

    // 向见证生成服务器请求真实证明。从网络返回证明 ID。
    proofID, err := l.requestRealProof(p.Type, jsonBody)
    if err != nil {
        return fmt.Errorf("real proof request failed: %w", err)
    }

    // 检索到证明者 ID 后,将证明状态设置为 PROVING。只有状态为 PROVING、SUCCESS 或 FAILED 的证明才有证明者请求 ID。
    err = l.db.UpdateProofStatus(p.ID, proofrequest.StatusPROVING)
    if err != nil {
        return fmt.Errorf("failed to set proof status to proving: %w", err)
    }

    return l.db.SetProverRequestID(p.ID, proofID)
}

2.6 SubmitAggProofs

// 在链上提交聚合证明。如果我们在数据库中有一个完整的聚合证明等待处理,我们会将其提交到链上。
func (l *L2OutputSubmitter) SubmitAggProofs(ctx context.Context) error {
    // 从 L2OutputOracle 合约获取最新的输出索引
    latestBlockNumber, err := l.l2ooContract.LatestBlockNumber(&bind.CallOpts{Context: ctx})
    if err != nil {
        return fmt.Errorf("failed to get latest output index: %w", err)
    }

    // 从下一个索引开始检查已完成的 AGG 证明
    completedAggProofs, err := l.db.GetAllCompletedAggProofs(latestBlockNumber.Uint64())
    if err != nil {
        return fmt.Errorf("failed to query for completed AGG proof: %w", err)
    }

    if len(completedAggProofs) == 0 {
        return nil
    }

    // 选择具有最高 L2 块编号的聚合证明。
    sort.Slice(completedAggProofs, func(i, j int) bool {
        return completedAggProofs[i].EndBlock > completedAggProofs[j].EndBlock
    })

    // 提交具有最高 L2 块编号的聚合证明。
    aggProof := completedAggProofs[0]
    output, err := l.FetchOutput(ctx, aggProof.EndBlock) // 通过optimism_outputAtBlock 健全性检查,例如在出现不良 RPC 缓存的情况下
    if err != nil {
        return fmt.Errorf("failed to fetch output at block %d: %w", aggProof.EndBlock, err)
    }
    err = l.proposeOutput(ctx, output, aggProof.Proof, aggProof.L1BlockNumber)// 通过proposeOutput 进行Proof的提交
    if err != nil {
        return fmt.Errorf("failed to propose output: %w", err)
    }

    return nil
}

op-succinct - 使用SP1 将OP stack转换成type-1 zkEVM Rollup

简介

OP Succinct 使用 SP1 在 1 小时内将任何 OP 堆栈汇总转换为完整的 type-1 zkEVM Rollup。

文章介绍:https://blog.succinct.xyz/op-succinct/
github: https://github.com/succinctlabs/op-succinct
docs: https://succinctlabs.github.io/op-succinct/introduction.html
SP1介绍:https://blog.succinct.xyz/introducing-sp1/
客户用例:https://blog.conduit.xyz/op-succinct-zk-rollups/

跨链桥跨链过程中数据变化

发起的两笔跨链交易 (L1->L2)
https://sepolia.etherscan.io/tx/0xd2db346f588d3eb9c90b0a9da2e8382d5c3275916288131f3701ae62bae56c44
https://sepolia.etherscan.io/tx/0x13a2ac45a973524d704463fe16d4d8e3410efc09cf6d3e53b6052c8a17ece4a4

bridge_db=# select * FROM sync.block;
 id | block_num |                             block_hash                             |                            parent_hash                             | network_id |      received_at       
----+-----------+--------------------------------------------------------------------+--------------------------------------------------------------------+------------+------------------------
  0 |           | \x5c7830                                                           |                                                                    |            | 1970-01-01 00:00:00+00
  1 |   7092388 | \xe984c5267a0e675df842c9ad91a1d0825080cfc581791250955a7a7dd41b71ab | \x5fcd8aa7d9b8650488fcfcf93db2c2dfb03100d02e2e0f927b0ee8d758dfb2b4 |          0 | 2024-11-17 01:21:24+00
  2 |   7092528 | \xcd588c08198efdbe40a4bfc731f906594dd0e80f78acff528a84bf36c8341f03 | \xec136b1e2d7fe0fd4fdb4c4c11cb1b5cc578c97e26d6073652a1d31702b12999 |          0 | 2024-11-17 01:51:36+00
bridge_db=# SELECT * FROM sync.deposit;

leaf_type | network_id | orig_net |                 orig_addr                  |         amount         | dest_net |                 dest_addr                  | block_id | deposit_cnt |                              tx_hash                               |                                                                                                                                                                                                                              metadata                                                                                                                                                                                                                              | id | ready_for_claim 
-----------+------------+----------+--------------------------------------------+------------------------+----------+--------------------------------------------+----------+-------------+--------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----+-----------------
         0 |          0 |        0 | \x3ec3d234625cde1e0f3267014e26e193610e50ac | 1000000000000000000000 |        1 | \xc2df13b6ad0753e0547a318f65f99ac62aec6e2b |        1 |           0 | \xd2db346f588d3eb9c90b0a9da2e8382d5c3275916288131f3701ae62bae56c44 | \x000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000084d4158546f6b656e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000034d41580000000000000000000000000000000000000000000000000000000000 |  1 | f
         0 |          0 |        0 | \x3ec3d234625cde1e0f3267014e26e193610e50ac | 2000000000000000000000 |        1 | \xc2df13b6ad0753e0547a318f65f99ac62aec6e2b |        2 |           1 | \x13a2ac45a973524d704463fe16d4d8e3410efc09cf6d3e53b6052c8a17ece4a4 | \x000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000084d4158546f6b656e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000034d41580000000000000000000000000000000000000000000000000000000000 |  2 | f

等待一段时间 ready_for_claim变为true

 leaf_type | network_id | orig_net |                 orig_addr                  |         amount         | dest_net |                 dest_addr                  | block_id | deposit_cnt |                              tx_hash                               |                                                                                                                                                                                                                              metadata                                                                                                                                                                                                                              | id | ready_for_claim 
-----------+------------+----------+--------------------------------------------+------------------------+----------+--------------------------------------------+----------+-------------+--------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----+-----------------
         0 |          0 |        0 | \x3ec3d234625cde1e0f3267014e26e193610e50ac | 1000000000000000000000 |        1 | \xc2df13b6ad0753e0547a318f65f99ac62aec6e2b |        1 |           0 | \xd2db346f588d3eb9c90b0a9da2e8382d5c3275916288131f3701ae62bae56c44 | \x000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000084d4158546f6b656e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000034d41580000000000000000000000000000000000000000000000000000000000 |  1 | t
         0 |          0 |        0 | \x3ec3d234625cde1e0f3267014e26e193610e50ac | 2000000000000000000000 |        1 | \xc2df13b6ad0753e0547a318f65f99ac62aec6e2b |        2 |           1 | \x13a2ac45a973524d704463fe16d4d8e3410efc09cf6d3e53b6052c8a17ece4a4 | \x000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000084d4158546f6b656e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000034d41580000000000000000000000000000000000000000000000000000000000 |  2 | t

然后交易会被claimtxman模块,会调用L2桥合约进行相应资产的转出
注:首次部署时,跨链桥claim地址需要在genesis allocs中初始化一定代币,用于首次跨链的手续费