BCSkill (Block chain skill )
区块链中文技术社区

只讨论区块链底层技术
遵守一切相关法律政策!

Arbitrum Sequencer 交易接收->区块打包逻辑

代码跟进

go-ethereum/internal/ethapi/api.go

func (s *TransactionAPI) SendTransaction(ctx context.Context, args TransactionArgs) (common.Hash, error) {
// SubmitTransaction is a helper function that submits tx to txPool and logs a message.
func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (common.Hash, error) {
    // If the transaction fee cap is already specified, ensure the
    // fee of the given transaction is _reasonable_.
    if err := checkTxFee(tx.GasPrice(), tx.Gas(), b.RPCTxFeeCap()); err != nil {
       return common.Hash{}, err
    }
    if !b.UnprotectedAllowed() && !tx.Protected() {
       // Ensure only eip155 signed transactions are submitted if EIP155Required is set.
       return common.Hash{}, errors.New("only replay-protected (EIP-155) transactions allowed over RPC")
    }
    if err := b.SendTx(ctx, tx); err != nil {
       return common.Hash{}, err
    }
    // Print a log with full tx details for manual investigations and interventions
    head := b.CurrentBlock()
    signer := types.MakeSigner(b.ChainConfig(), head.Number, head.Time)
    from, err := types.Sender(signer, tx)
    if err != nil {
       return common.Hash{}, err
    }

    if tx.To() == nil {
       addr := crypto.CreateAddress(from, tx.Nonce())
       log.Info("Submitted contract creation", "hash", tx.Hash().Hex(), "from", from, "nonce", tx.Nonce(), "contract", addr.Hex(), "value", tx.Value())
    } else {
       log.Info("Submitted transaction", "hash", tx.Hash().Hex(), "from", from, "nonce", tx.Nonce(), "recipient", tx.To(), "value", tx.Value())
    }
    return tx.Hash(), nil
}
// Transaction pool API
func (a *APIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
    return a.b.EnqueueL2Message(ctx, signedTx, nil)
}

go-ethereum/arbitrum/backend.go

func (b *Backend) EnqueueL2Message(ctx context.Context, tx *types.Transaction, options *arbitrum_types.ConditionalOptions) error {
    return b.arb.PublishTransaction(ctx, tx, options)
}

execution/gethexec/arb_interface.go

func (a *ArbInterface) PublishTransaction(ctx context.Context, tx *types.Transaction, options *arbitrum_types.ConditionalOptions) error {
    return a.txPublisher.PublishTransaction(ctx, tx, options)
}

execution/gethexec/tx_pre_checker.go

func (c *TxPreChecker) PublishTransaction(ctx context.Context, tx *types.Transaction, options *arbitrum_types.ConditionalOptions) error {
    block := c.bc.CurrentBlock()
    statedb, err := c.bc.StateAt(block.Root)
    if err != nil {
       return err
    }
    arbos, err := arbosState.OpenSystemArbosState(statedb, nil, true)
    if err != nil {
       return err
    }
    err = PreCheckTx(c.bc, c.bc.Config(), block, statedb, arbos, tx, options, c.config())
    if err != nil {
       return err
    }
    return c.TransactionPublisher.PublishTransaction(ctx, tx, options)
}

execution/gethexec/sequencer.go

func (s *Sequencer) PublishTransaction(parentCtx context.Context, tx *types.Transaction, options *arbitrum_types.ConditionalOptions) error {
    config := s.config()
    // Only try to acquire Rlock and check for hard threshold if l1reader is not nil
    // And hard threshold was enabled, this prevents spamming of read locks when not needed
    if s.l1Reader != nil && config.ExpectedSurplusHardThreshold != "default" {
       s.expectedSurplusMutex.RLock()
       if s.expectedSurplusUpdated && s.expectedSurplus < int64(config.expectedSurplusHardThreshold) {
          return errors.New("currently not accepting transactions due to expected surplus being below threshold")
       }
       s.expectedSurplusMutex.RUnlock()
    }

    sequencerBacklogGauge.Inc(1)
    defer sequencerBacklogGauge.Dec(1)

    _, forwarder := s.GetPauseAndForwarder()
    if forwarder != nil {
       err := forwarder.PublishTransaction(parentCtx, tx, options)
       if !errors.Is(err, ErrNoSequencer) {
          return err
       }
    }

    if len(s.senderWhitelist) > 0 {
       signer := types.LatestSigner(s.execEngine.bc.Config())
       sender, err := types.Sender(signer, tx)
       if err != nil {
          return err
       }
       _, authorized := s.senderWhitelist[sender]
       if !authorized {
          return errors.New("transaction sender is not on the whitelist")
       }
    }
    if tx.Type() >= types.ArbitrumDepositTxType || tx.Type() == types.BlobTxType {
       // Should be unreachable for Arbitrum types due to UnmarshalBinary not accepting Arbitrum internal txs
       // and we want to disallow BlobTxType since Arbitrum doesn't support EIP-4844 txs yet.
       return types.ErrTxTypeNotSupported
    }

    txBytes, err := tx.MarshalBinary()
    if err != nil {
       return err
    }

    queueTimeout := config.QueueTimeout
    queueCtx, cancelFunc := ctxWithTimeout(parentCtx, queueTimeout)
    defer cancelFunc()

    // Just to be safe, make sure we don't run over twice the queue timeout
    abortCtx, cancel := ctxWithTimeout(parentCtx, queueTimeout*2)
    defer cancel()

    resultChan := make(chan error, 1)
    queueItem := txQueueItem{
       tx,
       len(txBytes),
       options,
       resultChan,
       &atomic.Bool{},
       queueCtx,
       time.Now(),
    }
    select {
    case s.txQueue <- queueItem: // 交易进入交易队列
    case <-queueCtx.Done():
       return queueCtx.Err()
    }

    select {
    case res := <-resultChan:
       return res
    case <-abortCtx.Done():
       // We use abortCtx here and not queueCtx, because the QueueTimeout only applies to the background queue.
       // We want to give the background queue as much time as possible to make a response.
       err := abortCtx.Err()
       if parentCtx.Err() == nil {
          // If we've hit the abort deadline (as opposed to parentCtx being canceled), something went wrong.
          log.Warn("Transaction sequencing hit abort deadline", "err", err, "submittedAt", queueItem.firstAppearance, "queueTimeout", queueTimeout, "txHash", tx.Hash())
       }
       return err
    }
}
func (s *Sequencer) createBlock(ctx context.Context) (returnValue bool) {
    var queueItems []txQueueItem
    var totalBlockSize int

    defer func() {
       panicErr := recover()
       if panicErr != nil {
          log.Error("sequencer block creation panicked", "panic", panicErr, "backtrace", string(debug.Stack()))
          // Return an internal error to any queue items we were trying to process
          for _, item := range queueItems {
             // This can race, but that's alright, worst case is a log line in returnResult
             if !item.returnedResult.Load() {
                item.returnResult(sequencerInternalError)
             }
          }
          // Wait for the MaxBlockSpeed until attempting to create a block again
          returnValue = true
       }
    }()
    defer nonceFailureCacheSizeGauge.Update(int64(s.nonceFailures.Len()))

    config := s.config()

    // Clear out old nonceFailures
    s.nonceFailures.Resize(config.NonceFailureCacheSize)
    nextNonceExpiryTimer := s.expireNonceFailures()
    defer func() {
       // We wrap this in a closure as to not cache the current value of nextNonceExpiryTimer
       if nextNonceExpiryTimer != nil {
          nextNonceExpiryTimer.Stop()
       }
    }()

    for {
       var queueItem txQueueItem
       if s.txRetryQueue.Len() > 0 { // 优先把队列需要重试的交易拿出来
          queueItem = s.txRetryQueue.Pop()
       } else if len(queueItems) == 0 { // 当交易池
          var nextNonceExpiryChan <-chan time.Time
          if nextNonceExpiryTimer != nil {
             nextNonceExpiryChan = nextNonceExpiryTimer.C
          }
          select {
          case queueItem = <-s.txQueue:
          case <-nextNonceExpiryChan:
             // No need to stop the previous timer since it already elapsed
             nextNonceExpiryTimer = s.expireNonceFailures()
             continue
          case <-s.onForwarderSet:
             // Make sure this notification isn't outdated
             _, forwarder := s.GetPauseAndForwarder()
             if forwarder != nil {
                s.nonceFailures.Clear()
             }
             continue
          case <-ctx.Done():
             return false
          }
       } else {
          done := false
          select {
          case queueItem = <-s.txQueue:
          default:
             done = true
          }
          if done {
             break
          }
       }
       err := queueItem.ctx.Err()
       if err != nil {
          queueItem.returnResult(err)
          continue
       }
       if queueItem.txSize > config.MaxTxDataSize { // 超过设置限制的将被丢弃
          // This tx is too large
          queueItem.returnResult(txpool.ErrOversizedData)
          continue
       }
       if totalBlockSize+queueItem.txSize > config.MaxTxDataSize {
          // 此交易太大,无法添加到此批次
          s.txRetryQueue.Push(queueItem)
          // 在这里结束批处理,将此交易放入下一个交易中
          break
       }
       totalBlockSize += queueItem.txSize
       queueItems = append(queueItems, queueItem) // 交易加入当前批次队列
    }

    s.nonceCache.Resize(config.NonceCacheSize) // Would probably be better in a config hook but this is basically free
    s.nonceCache.BeginNewBlock()
    queueItems = s.precheckNonces(queueItems, totalBlockSize)
    txes := make([]*types.Transaction, len(queueItems))
    hooks := s.makeSequencingHooks()
    hooks.ConditionalOptionsForTx = make([]*arbitrum_types.ConditionalOptions, len(queueItems))
    totalBlockSize = 0 // 重新计算总块大小以进行二次检查
    for i, queueItem := range queueItems {
       txes[i] = queueItem.tx
       totalBlockSize = arbmath.SaturatingAdd(totalBlockSize, queueItem.txSize)
       hooks.ConditionalOptionsForTx[i] = queueItem.options
    }

    if totalBlockSize > config.MaxTxDataSize {// 如果超过,则当前批次整体进入下一轮重新计算
       for _, queueItem := range queueItems {
          s.txRetryQueue.Push(queueItem)
       }
       log.Error(
          "put too many transactions in a block",
          "numTxes", len(queueItems),
          "totalBlockSize", totalBlockSize,
          "maxTxDataSize", config.MaxTxDataSize,
       )
       return false
    }

    if s.handleInactive(ctx, queueItems) {
       return false
    }

    timestamp := time.Now().Unix()
    s.L1BlockAndTimeMutex.Lock()
    l1Block := s.l1BlockNumber
    l1Timestamp := s.l1Timestamp
    s.L1BlockAndTimeMutex.Unlock()

    if s.l1Reader != nil && (l1Block == 0 || math.Abs(float64(l1Timestamp)-float64(timestamp)) > config.MaxAcceptableTimestampDelta.Seconds()) {
       for _, queueItem := range queueItems {
          s.txRetryQueue.Push(queueItem)
       }
       log.Error(
          "cannot sequence: unknown L1 block or L1 timestamp too far from local clock time",
          "l1Block", l1Block,
          "l1Timestamp", time.Unix(int64(l1Timestamp), 0),
          "localTimestamp", time.Unix(int64(timestamp), 0),
       )
       return true
    }

    header := &arbostypes.L1IncomingMessageHeader{
       Kind:        arbostypes.L1MessageType_L2Message,
       Poster:      l1pricing.BatchPosterAddress,
       BlockNumber: l1Block,
       Timestamp:   uint64(timestamp),
       RequestId:   nil,
       L1BaseFee:   nil,
    }

    start := time.Now()
    var (
       block *types.Block
       err   error
    )
    if config.EnableProfiling {// 当enable-profiling设置为true时,Sequencer会收集和记录关于其操作的性能数据
       block, err = s.execEngine.SequenceTransactionsWithProfiling(header, txes, hooks)
    } else {
       block, err = s.execEngine.SequenceTransactions(header, txes, hooks) // 生产环境
    }
    elapsed := time.Since(start)
    blockCreationTimer.Update(elapsed)
    if elapsed >= time.Second*5 {
       var blockNum *big.Int
       if block != nil {
          blockNum = block.Number()
       }
       log.Warn("took over 5 seconds to sequence a block", "elapsed", elapsed, "numTxes", len(txes), "success", block != nil, "l2Block", blockNum)
    }
    if err == nil && len(hooks.TxErrors) != len(txes) {
       err = fmt.Errorf("unexpected number of error results: %v vs number of txes %v", len(hooks.TxErrors), len(txes))
    }
    if errors.Is(err, execution.ErrRetrySequencer) {
       log.Warn("error sequencing transactions", "err", err)
       // we changed roles
       // forward if we have where to
       if s.handleInactive(ctx, queueItems) {
          return false
       }
       // try to add back to queue otherwise
       for _, item := range queueItems {
          s.txRetryQueue.Push(item)
       }
       return false
    }
    if err != nil {
       if errors.Is(err, context.Canceled) {
          // thread closed. We'll later try to forward these messages.
          for _, item := range queueItems {
             s.txRetryQueue.Push(item)
          }
          return true // don't return failure to avoid retrying immediately
       }
       log.Error("error sequencing transactions", "err", err)
       for _, queueItem := range queueItems {
          queueItem.returnResult(err)
       }
       return false
    }

    if block != nil {
       successfulBlocksCounter.Inc(1)
       s.nonceCache.Finalize(block)
    }

    madeBlock := false
    for i, err := range hooks.TxErrors {
       if err == nil {
          madeBlock = true
       }
       queueItem := queueItems[i]
       if errors.Is(err, core.ErrGasLimitReached) {
          // 该区块中剩余的 Gas 不足以完成此项交易。
          if madeBlock {
             // 该块中已经有一个较早的交易;请在新的块中重试。
             s.txRetryQueue.Push(queueItem)
             continue
          }
       }
       if errors.Is(err, core.ErrIntrinsicGas) {
          // 删除附加信息,因为由于 L1 数据气体正确。
          err = core.ErrIntrinsicGas
       }
       var nonceError NonceError
       if errors.As(err, &nonceError) && nonceError.txNonce > nonceError.stateNonce {
          s.nonceFailures.Add(nonceError, queueItem)
          continue
       }
       queueItem.returnResult(err)
    }
    return madeBlock
}

execution/gethexec/executionengine.go

func (s *ExecutionEngine) SequenceTransactions(header *arbostypes.L1IncomingMessageHeader, txes types.Transactions, hooks *arbos.SequencingHooks) (*types.Block, error) {
    return s.sequencerWrapper(func() (*types.Block, error) {
       hooks.TxErrors = nil
       return s.sequenceTransactionsWithBlockMutex(header, txes, hooks)
    })
}
func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes.L1IncomingMessageHeader, txes types.Transactions, hooks *arbos.SequencingHooks) (*types.Block, error) {
    lastBlockHeader, err := s.getCurrentHeader()
    if err != nil {
       return nil, err
    }

    statedb, err := s.bc.StateAt(lastBlockHeader.Root)
    if err != nil {
       return nil, err
    }

    delayedMessagesRead := lastBlockHeader.Nonce.Uint64()

    startTime := time.Now()
    block, receipts, err := arbos.ProduceBlockAdvanced(
       header,
       txes,
       delayedMessagesRead,
       lastBlockHeader,
       statedb,
       s.bc,
       s.bc.Config(),
       hooks,
       false,
    )
    if err != nil {
       return nil, err
    }
    blockCalcTime := time.Since(startTime)
    if len(hooks.TxErrors) != len(txes) {
       return nil, fmt.Errorf("unexpected number of error results: %v vs number of txes %v", len(hooks.TxErrors), len(txes))
    }

    if len(receipts) == 0 {
       return nil, nil
    }

    allTxsErrored := true
    for _, err := range hooks.TxErrors {
       if err == nil {
          allTxsErrored = false
          break
       }
    }
    if allTxsErrored {
       return nil, nil
    }

    msg, err := MessageFromTxes(header, txes, hooks.TxErrors)
    if err != nil {
       return nil, err
    }

    pos, err := s.BlockNumberToMessageIndex(lastBlockHeader.Number.Uint64() + 1)
    if err != nil {
       return nil, err
    }

    msgWithMeta := arbostypes.MessageWithMetadata{
       Message:             msg,
       DelayedMessagesRead: delayedMessagesRead,
    }
    msgResult, err := s.resultFromHeader(block.Header())
    if err != nil {
       return nil, err
    }

    err = s.consensus.WriteMessageFromSequencer(pos, msgWithMeta, *msgResult)
    if err != nil {
       return nil, err
    }

    // 仅在我们写入消息后才写入块,因此如果节点在此过程中死亡,
    // 它将通过重新生成丢失的块在启动时自然恢复。
    err = s.appendBlock(block, statedb, receipts, blockCalcTime) 
    if err != nil {
       return nil, err
    }
    s.cacheL1PriceDataOfMsg(pos, receipts, block, false)

    return block, nil
}

交易执行顺序

Arbitrum 的交易处理方式与传统的以太坊链有所不同。Arbitrum 采用了“先到先得”的交易处理顺序,因此在其体系结构中并没有传统意义上的内存池(mempool)。

由于交易是按照序列器接收的顺序进行处理的,因此 Arbitrum 交易不需要优先费;如果交易确实包含优先费,则该费用将在执行结束时退还到交易的原始地址。

注:因为Arbitrum 需要更加有效的交易传播机制,所以生产部署配置和网络,要确保所有外部节点与主节点,尽量保证最短路径,之间的网络要稳定并且延迟尽量低,保证主节点尽快收到并处理交易。

交易价格

Arbitrum 上没有内存池的概念,交易由 Sequencer 按照先到先得的原则处理。因此,gas 价格竞标参数不会影响交易的处理顺序。

因为Arbitrum链上 gasprice 无法影响执行优先级,所以用户没有主动提价的动机,

交易收取的总费用是 L2 基础费用乘以所用 L2 gas 加上 L1 调用数据费用之和

Arbitrum ForwardingTarget 配置参数分析

介绍

对于ForwardingTarget有两个相关参数

参数 类型 介绍
forwarding-target string 交易转发目标 URL,或“null”以禁用转发(当且仅当不是序列器)
secondary-forwarding-target []string 次要交易转发目标 URL

参数验证规则

func (c *Config) Validate() error {
    if err := c.Sequencer.Validate(); err != nil {
       return err
    }
    if !c.Sequencer.Enable && c.ForwardingTarget == "" {
       return errors.New("ForwardingTarget not set and not sequencer (can use \"null\")")
    }
    if c.ForwardingTarget == "null" {
       c.forwardingTarget = ""
    } else {
       c.forwardingTarget = c.ForwardingTarget
    }
    if c.forwardingTarget != "" && c.Sequencer.Enable {
       return errors.New("ForwardingTarget set and sequencer enabled")
    }
    return nil
}

使用场景

  1. Sequencer.Enable == true时,forwardingTarget 必须为空,即不转发交易
  2. Sequencer.Enable != true 时,ForwardingTarget 可以设置为某个接收转发的RPC, 或者设置为null 即不转发交易只查询,可用于 ReadOnly节点

逻辑分析

func CreateExecutionNode(
    ctx context.Context,
    stack *node.Node,
    chainDB ethdb.Database,
    l2BlockChain *core.BlockChain,
    l1client arbutil.L1Interface,
    configFetcher ConfigFetcher,
) (*ExecutionNode, error) {
    ...
    if config.Sequencer.Enable {
        seqConfigFetcher := func() *SequencerConfig { return &configFetcher().Sequencer }
        sequencer, err = NewSequencer(execEngine, parentChainReader, seqConfigFetcher)
        if err != nil {
           return nil, err
        }
        txPublisher = sequencer
    } else {
        if config.Forwarder.RedisUrl != "" {
           txPublisher = NewRedisTxForwarder(config.forwardingTarget, &config.Forwarder)
        } else if config.forwardingTarget == "" {
           txPublisher = NewTxDropper()
        } else {
           targets := append([]string{config.forwardingTarget}, config.SecondaryForwardingTarget...)
           txPublisher = NewForwarder(targets, &config.Forwarder)
        }
    }
    ...
}

Sequencer.Enable == false时

  1. Forwarder.RedisUrl不为空,则使用NewRedisTxForwarder,并仅使用forwardingTarget
  2. 当config.forwardingTarget为空时,即不转发交易,使用NewTxDropper
  3. Else, 同时使用forwardingTarget,SecondaryForwardingTarget
    1. 两者
func (f *TxForwarder) PublishTransaction(inctx context.Context, tx *types.Transaction, options *arbitrum_types.ConditionalOptions) error {
    if !f.enabled.Load() {
       return ErrNoSequencer
    }
    ctx, cancelFunc := f.ctxWithTimeout()
    defer cancelFunc()
    for pos, rpcClient := range f.rpcClients {
       var err error
       if options == nil {
          err = f.ethClients[pos].SendTransaction(ctx, tx)
       } else {
          err = arbitrum.SendConditionalTransactionRPC(ctx, rpcClient, tx, options)
       }
       if err == nil || !f.tryNewForwarderErrors.MatchString(err.Error()) {
          return err
       }
       log.Warn("error forwarding transaction to a backup target", "target", f.targets[pos], "err", err)
    }
    return errors.New("failed to publish transaction to any of the forwarding targets")
}
// CheckHealth returns health of the highest priority forwarding target
func (f *TxForwarder) CheckHealth(inctx context.Context) error {
    // If f.enabled is true, len(f.rpcClients) should always be greater than zero,
    // but better safe than sorry.
    if !f.enabled.Load() || len(f.rpcClients) == 0 {
       return ErrNoSequencer
    }
    f.healthMutex.Lock()
    defer f.healthMutex.Unlock()
    if time.Since(f.healthChecked) > cacheUpstreamHealth {
       timeout := f.timeout
       if timeout == time.Duration(0) || timeout >= maxHealthTimeout {
          timeout = maxHealthTimeout
       }
       ctx, cancelFunc := context.WithTimeout(context.Background(), timeout)
       defer cancelFunc()
       f.healthErr = f.rpcClients[0].CallContext(ctx, nil, "arb_checkPublisherHealth")
       f.healthChecked = time.Now()
    }
    return f.healthErr
}

初始化

func (f *TxForwarder) Initialize(inctx context.Context) error {
    if f.ctx == nil {
       f.ctx = inctx
    }
    ctx, cancelFunc := f.ctxWithTimeout()
    defer cancelFunc()
    var targets []string
    var lastError error
    for _, target := range f.targets {
       if target == "" {
          continue
       }
       rpcClient, err := rpc.DialTransport(ctx, target, f.transport)
       if err != nil {
          log.Warn("error initializing a forwarding client in txForwarder", "forwarding url", target, "err", err)
          lastError = err
          continue
       }
       targets = append(targets, target)
       ethClient := ethclient.NewClient(rpcClient)
       f.rpcClients = append(f.rpcClients, rpcClient)
       f.ethClients = append(f.ethClients, ethClient)
    }
    f.targets = targets
    if len(f.rpcClients) > 0 {
       f.enabled.Store(true)
    } else {
       return lastError
    }
    return nil
}

会遍历所有的targets

区别

根据代码分析,

  • 启用Forwarder.RedisUrl时,仅使用forwardingTarget
  • 当config.forwardingTarget不为空时,forwarding-targetsecondary-forwarding-target同时叠加使用

部署优化

  • 将节点拓扑树形化,减少子叶节点与Sequencer传输距离
  • 尽可能多的覆盖同级子叶节点
  • 防止子叶节点不同层级内循环传播

TODO

继续跟进Forwarder.RedisUrl

if config.Forwarder.RedisUrl != "" {
    txPublisher = NewRedisTxForwarder(config.forwardingTarget, &config.Forwarder)
} else if config.forwardingTarget == "" {
    txPublisher = NewTxDropper()
} else {
    targets := append([]string{config.forwardingTarget}, config.SecondaryForwardingTarget...)
    txPublisher = NewForwarder(targets, &config.Forwarder)
}

NewRedisTxForwarder看位置,应该是推荐方式?相比NewForwarder性能区别是什么?Redis 共享数据加速?

// TODO 空闲再继续

Docker 切换数据存储目录

1. 停止 Docker 服务

在修改 Docker 配置之前,首先需要停止 Docker 服务:

sudo systemctl stop docker

2. 创建新的 Docker 目录

创建一个新的目录来存储 Docker 数据。假设你想将 Docker 的存储目录更改为 /mnt/docker-data,可以使用以下命令:

sudo mkdir -p /mnt/docker-data

3. 编辑 Docker 配置文件

编辑 Docker 的配置文件,通常位于 /etc/docker/daemon.json。如果文件不存在,你可以创建它:

sudo nano /etc/docker/daemon.json

在文件中添加或修改以下内容,将 "/var/lib/docker" 替换为你新创建的目录:

{"data-root": "/mnt/docker-data"}

保存并关闭文件。

4. 移动现有的 Docker 数据(可选)

如果你已经有 Docker 数据,并且希望保留它们,可以将它们移动到新的目录:

sudo rsync -aP /var/lib/docker/ /mnt/docker-data

5. 启动 Docker 服务

完成以上步骤后,重新启动 Docker 服务:

sudo systemctl start docker

6. 验证更改

检查 Docker 是否使用了新的存储路径:

docker info | grep "Docker Root Dir"

如果输出的目录是你指定的新目录,则说明更改成功。
通过这些步骤,你可以成功切换 Docker 的存储目录。

Meemaw 开源MPC钱包使用技术调研

介绍

一款开源MPC钱包SaaS服务
官网:https://getmeemaw.com/
文档:https://getmeemaw.com/docs/getting-started/

特点

接入简单

自定义Auth集成

参考:https://getmeemaw.com/docs/auth/custom

提供云和私有化部署

云:https://getmeemaw.com/docs/getting-started/cloud
私有化:https://getmeemaw.com/docs/getting-started/self-host

部署测试

云部署

准备Sepolia RPC

例如:AlchemyInfura

注册Supabase

使用三方的Auth服务方便演示,后期可以自定义

  • 打开https://supabase.com/ 注册并登录账户
  • 新建组织和项目
  • 点击Authentication,添加测试User {记录邮箱和密码,后面登录会需要}

创建Meemaw账户

配置Meemaw

配置Supabase URL 和 Supabase API 密钥

在仪表板中,单击左侧菜单中的“身份验证”。从那里,您可以配置用户身份验证和认证方式。
为了便于本示例,选择“Supabase”作为身份验证提供商,然后提供您的 Supabase URL 和 Supabase API 密钥。别忘了保存。
注:Supabase信息获取,直接在Supabase新建的项目首页中获取

配置 security

选择security,测试直接设置为*,后期根据真实ip设置

获取项目 URL

点击左侧API,https://getmeemaw.com/cloud/api, 复制Project URL

Clone实例

git clone https://github.com/getmeemaw/example-js.git
cd example-js

修改实例参数

编辑实例代码 client/src/app/tx.jsx,使用您的项目URL 进行更新

const meemaw = await Meemaw.init('https://getmeemaw.co/YOUR-PROJECT-URL');

使用前面准备的Sepolia RPC替换YOUR-JSON-RPC-API-URL

const web3 = new Web3(new Web3.providers.HttpProvider("YOUR-JSON-RPC-API-URL"));

启动测试实例

cd client
npm install
npm run dev

注:nodejs 需要v18及以上版本

登陆测试

访问:http://localhost:3000


输入前面Supabase测试User邮箱和密码,进行登录

初始化

首次使用,创建钱包

非首次,可以选择从备份恢复

创建成功


可以先给新建的钱包地址转入一些gas,然后测试交易

测试交易

输入接收eth地址,点击 Send Transaction,等待转账结束,查看区块浏览器
云部署测试结束

私有化部署

部署服务端

与云部署相同实例

git clone https://github.com/getmeemaw/example-js.git
cd example-js

编辑 server/.env,更新SUPABASE_URL并SUPABASE_API_KEY使用您的配置 {与云部署步骤相同}

启动Server
docker compose up -d
meemaw_app  | 2024/10/11 10:36:14 Schema does not exist, creating...
meemaw_app  | 2024/10/11 10:36:14 Schema loaded
meemaw_app  | 2024/10/11 10:36:14 Starting server on port 8421

修改client

步骤和云部署一致,只是Meemaw.init地址改为Server对应的ip:8421
编辑实例代码 client/src/app/tx.jsx,使用您的项目URL 进行更新

const meemaw = await Meemaw.init('http://服务端所在ip+端口或域名');

使用前面准备的Sepolia RPC替换YOUR-JSON-RPC-API-URL

const web3 = new Web3(new Web3.providers.HttpProvider("YOUR-JSON-RPC-API-URL"));

其余使用逻辑与前面云部署步骤一致

其它

导出MPC备份和导出私钥

参考example-js页面功能代码

多设备

当一个账户已经在其它客户端A创建钱包后,在另外客户端B再次登录相同账户,再次点击创建钱包时,会走新设备加入流程,

  • 客户端B: meemaw.GetWallet()
  • 客户端A: wallet.AcceptDevice()

通过后 ,客户端B将作为新设备加入

对于Meemaw和其它的MPC先关钱包或逻辑有少许不同,类似Sygma, 是有多个relayer服务节点,彼此间完成TSS过程,Meemaw是meemaw_app服务与加入的客户端SDK完成,客户端SDK也参与了TSS

ethw - 以太坊钱包生成器

该Go应用程序旨在使用确定性BIP-39助记符或任意种子字符串作为种子来生成以太坊钱包。

注意:此工具主要用于开发和测试!如果您正在寻找更安全、更完整的解决方案mainnet,请考虑使用ethereal和/或ethdo

命令行参数

Usage: ethw <command>

Flags:
  -h, --help                 Show context-sensitive help.
      --log-level="fatal"    Configure logging level ($LOG_LEVEL)
      --log-format="text"    Configure logging format ($LOG_FORMAT)

Commands:
  wallet create <seed> ...
    Create new Ethereum wallets

  keystore create <wallets> ...
    Manage Ethereum keystores

  keystore list
    List all wallets from the keystore

  seed create
    Create a new seed

  version
    Display the application version

Run "ethw <command> --help" for more information on a command.

使用示例

钱包生成

下面您可以看到使用wallet create子命令生成以太坊钱包的一些示例:

从种子生成以太坊钱包

确保指定seed参数:

$ ethw wallet create --output=table "seed=crouch apology feel panda curtain remind text dignity knee empty sibling radar"

您应该期望得到类似如下的输出:

+---+-------+--------------------------------------------+------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------+
| # | ALIAS | ADDRESS                                    | PRIVATE KEY                                                      | PUBLIC KEY                                                                                                                         |
+---+-------+--------------------------------------------+------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------+
| 1 |       | 0x8d86D515fbee6A364C96Cf60f3220826f13A64F3 | 1d0b0a3898ff359032970f9d831269020d78463d861b305f40b1a85bed5bcefe | 04119a43acba93317d89e4a1181cbcef1a8ac28fdee7bb0df785db2510534b4a001cff289a9b70eb8d962009490c64bc546aa1fc0c880a4d608275639cab07391c |
+---+-------+--------------------------------------------+------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------+

使用别名生成多个钱包

一次生成多个钱包并分配别名以提高可读性:

$ ethw wallet create --output=table "seed=crouch apology feel panda curtain remind text dignity knee empty sibling radar;alias=Hermione Granger" "seed=radar sibling empty knee dignity text remind curtain panda feel apology crouch;alias=Harry Potter"

输出表将显示别名:

+---+------------------+--------------------------------------------+------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------+
| # | ALIAS            | ADDRESS                                    | PRIVATE KEY                                                      | PUBLIC KEY                                                                                                                         |
+---+------------------+--------------------------------------------+------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------+
| 1 | Hermione Granger | 0x8d86D515fbee6A364C96Cf60f3220826f13A64F3 | 1d0b0a3898ff359032970f9d831269020d78463d861b305f40b1a85bed5bcefe | 04119a43acba93317d89e4a1181cbcef1a8ac28fdee7bb0df785db2510534b4a001cff289a9b70eb8d962009490c64bc546aa1fc0c880a4d608275639cab07391c |
| 2 | Harry Potter     | 0x6f339aB74be047e3C5e5a784e2D4dDB5C161a034 | 130cf1653ae56b5278203d140509306fdf2f2a619ce54a64d54b688114339c8f | 04740ae95d36f6bc8b906fd4ee56cc048a0c94a323dd9cd74505e4de30ce52f4799ddc478df118b8377b0378d870014d36ae3fa98409f0a6bfd45fc9d31e54be9b |
+---+------------------+--------------------------------------------+------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------+

生成具有不同输出格式的钱包:

您还可以生成钱包并以和JSON格式输出它们CSV,这对于jq和等实用程序很有用dasel:

$ ethw wallet create --output=json "seed=crouch apology feel panda curtain remind text dignity knee empty sibling radar"

输出将如下所示:

[{"alias":"Hermione Granger","address":"0x8d86D515fbee6A364C96Cf60f3220826f13A64F3","private_key":"1d0b0a3898ff359032970f9d831269020d78463d861b305f40b1a85bed5bcefe","public_key":"04119a43acba93317d89e4a1181cbcef1a8ac28fdee7bb0df785db2510534b4a001cff289a9b70eb8d962009490c64bc546aa1fc0c880a4d608275639cab07391c"}]

密钥库

此功能允许直接生成密钥库,以兼容 Geth 和其他执行客户端。

钱包数据格式:

  • seed=,其中是生成钱包的种子,可以是助记词或任意字符串。
  • password=,其中是保护密钥库的密码(请记住在终端上直接使用密码会导致密码泄露)。

一些例子:

创建单个密钥库

$ ethw keystore create "seed=crouch apology feel panda curtain remind text dignity knee empty sibling radar;password=1234"

在密钥库中创建多个钱包

与生成钱包时相同,您可以将多个钱包添加到一个钱包中keystore:

$ ethw keystore create "seed=crouch apology feel panda curtain remind text dignity knee empty sibling radar;password=1234" "seed=radar sibling empty knee dignity text remind curtain panda feel apology crouch;password=5678"

覆盖现有的密钥库

您可以使用参数删除单个密钥库中找到的所有内容--overwrite:

$ ethw keystore create --overwrite "seed=crouch apology feel panda curtain remind text dignity knee empty sibling radar;password=1234"

指定自定义密钥库目录

默认情况下,ethw将在您调用命令的当前目录中创建一个密钥库,但您可以轻松地使用以下命令覆盖它--keystore-dir:

$ ethw keystore create --keystore-dir=./my_keystore "seed=crouch apology feel panda curtain remind text dignity knee empty sibling radar;password=1234"

GitHub

https://github.com/aldoborrero/ethw