diff --git a/encoding/codecv9.go b/encoding/codecv9.go index 61c5980..f416e61 100644 --- a/encoding/codecv9.go +++ b/encoding/codecv9.go @@ -1,5 +1,31 @@ package encoding +import ( + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/crypto" + "github.com/scroll-tech/go-ethereum/crypto/kzg4844" + "github.com/scroll-tech/go-ethereum/log" +) + +// DACodecV9 updates the implementation of the base function checkCompressedDataCompatibility +// to use the V9 compatibility check (checkCompressedDataCompatibilityV9) instead of the previous V7 version. +// +// As per Go's rules for shadowing methods with struct embedding, we need to override +// all methods that (transitively) call checkCompressedDataCompatibility: +// - checkCompressedDataCompatibility (updated to use V9) +// - constructBlob (calls checkCompressedDataCompatibility) +// - NewDABatch (calls constructBlob) +// - CheckChunkCompressedDataCompatibility (calls CheckBatchCompressedDataCompatibility) +// - CheckBatchCompressedDataCompatibility (calls checkCompressedDataCompatibility) +// - estimateL1CommitBatchSizeAndBlobSize (calls checkCompressedDataCompatibility) +// - EstimateChunkL1CommitBatchSizeAndBlobSize (calls estimateL1CommitBatchSizeAndBlobSize) +// - EstimateBatchL1CommitBatchSizeAndBlobSize (calls estimateL1CommitBatchSizeAndBlobSize) + type DACodecV9 struct { DACodecV8 } @@ -12,3 +38,184 @@ func NewDACodecV9() *DACodecV9 { }, } } + +// checkCompressedDataCompatibility checks the compressed data compatibility for a batch. +// It constructs a blob payload, compresses the data, and checks the compressed data compatibility. +// flag checkLength indicates whether to check the length of the compressed data against the original data. +// If checkLength is true, this function returns if compression is needed based on the compressed data's length, which is used when doing batch bytes encoding. +// If checkLength is false, this function returns the result of the compatibility check, which is used when determining the chunk and batch contents. +func (d *DACodecV9) checkCompressedDataCompatibility(payloadBytes []byte, checkLength bool) ([]byte, bool, error) { + compressedPayloadBytes, err := d.CompressScrollBatchBytes(payloadBytes) + if err != nil { + return nil, false, fmt.Errorf("failed to compress blob payload: %w", err) + } + + if err = checkCompressedDataCompatibilityV9(compressedPayloadBytes); err != nil { + log.Warn("Compressed data compatibility check failed", "err", err, "payloadBytes", hex.EncodeToString(payloadBytes), "compressedPayloadBytes", hex.EncodeToString(compressedPayloadBytes)) + return nil, false, nil + } + + // check if compressed data is bigger or equal to the original data -> no need to compress + if checkLength && len(compressedPayloadBytes) >= len(payloadBytes) { + log.Warn("Compressed data is bigger or equal to the original data", "payloadBytes", hex.EncodeToString(payloadBytes), "compressedPayloadBytes", hex.EncodeToString(compressedPayloadBytes)) + return nil, false, nil + } + + return compressedPayloadBytes, true, nil +} + +// NewDABatch creates a DABatch including blob from the provided Batch. +func (d *DACodecV9) NewDABatch(batch *Batch) (DABatch, error) { + if len(batch.Blocks) == 0 { + return nil, errors.New("batch must contain at least one block") + } + + if err := checkBlocksBatchVSChunksConsistency(batch); err != nil { + return nil, fmt.Errorf("failed to check blocks batch vs chunks consistency: %w", err) + } + + blob, blobVersionedHash, blobBytes, challengeDigest, err := d.constructBlob(batch) + if err != nil { + return nil, fmt.Errorf("failed to construct blob: %w", err) + } + + daBatch, err := newDABatchV7(d.Version(), batch.Index, blobVersionedHash, batch.ParentBatchHash, blob, blobBytes, challengeDigest) + if err != nil { + return nil, fmt.Errorf("failed to construct DABatch: %w", err) + } + + return daBatch, nil +} + +func (d *DACodecV9) constructBlob(batch *Batch) (*kzg4844.Blob, common.Hash, []byte, common.Hash, error) { + blobBytes := make([]byte, blobEnvelopeV7OffsetPayload) + + payloadBytes, err := d.constructBlobPayload(batch) + if err != nil { + return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to construct blob payload: %w", err) + } + + compressedPayloadBytes, enableCompression, err := d.checkCompressedDataCompatibility(payloadBytes, true /* checkLength */) + if err != nil { + return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to check batch compressed data compatibility: %w", err) + } + + isCompressedFlag := uint8(0x0) + if enableCompression { + isCompressedFlag = 0x1 + payloadBytes = compressedPayloadBytes + } + + sizeSlice := encodeSize3Bytes(uint32(len(payloadBytes))) + + blobBytes[blobEnvelopeV7OffsetVersion] = uint8(d.Version()) + copy(blobBytes[blobEnvelopeV7OffsetByteSize:blobEnvelopeV7OffsetCompressedFlag], sizeSlice) + blobBytes[blobEnvelopeV7OffsetCompressedFlag] = isCompressedFlag + blobBytes = append(blobBytes, payloadBytes...) + + if len(blobBytes) > maxEffectiveBlobBytes { + log.Error("ConstructBlob: Blob payload exceeds maximum size", "size", len(blobBytes), "blobBytes", hex.EncodeToString(blobBytes)) + return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("blob exceeds maximum size: got %d, allowed %d", len(blobBytes), maxEffectiveBlobBytes) + } + + // convert raw data to BLSFieldElements + blob, err := makeBlobCanonical(blobBytes) + if err != nil { + return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to convert blobBytes to canonical form: %w", err) + } + + // compute blob versioned hash + c, err := kzg4844.BlobToCommitment(blob) + if err != nil { + return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to create blob commitment: %w", err) + } + blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c) + + // compute challenge digest for codecv7, different from previous versions, + // the blob bytes are padded to the max effective blob size, which is 131072 / 32 * 31 due to the blob encoding + paddedBlobBytes := make([]byte, maxEffectiveBlobBytes) + copy(paddedBlobBytes, blobBytes) + + challengeDigest := crypto.Keccak256Hash(crypto.Keccak256(paddedBlobBytes), blobVersionedHash[:]) + + return blob, blobVersionedHash, blobBytes, challengeDigest, nil +} + +// CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk. +func (d *DACodecV9) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error) { + // filling the needed fields for the batch used in the check + b := &Batch{ + Chunks: []*Chunk{c}, + PrevL1MessageQueueHash: c.PrevL1MessageQueueHash, + PostL1MessageQueueHash: c.PostL1MessageQueueHash, + Blocks: c.Blocks, + } + + return d.CheckBatchCompressedDataCompatibility(b) +} + +// CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch. +func (d *DACodecV9) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) { + if len(b.Blocks) == 0 { + return false, errors.New("batch must contain at least one block") + } + + if err := checkBlocksBatchVSChunksConsistency(b); err != nil { + return false, fmt.Errorf("failed to check blocks batch vs chunks consistency: %w", err) + } + + payloadBytes, err := d.constructBlobPayload(b) + if err != nil { + return false, fmt.Errorf("failed to construct blob payload: %w", err) + } + + // This check is only used for sanity checks. If the check fails, it means that the compression did not work as expected. + // rollup-relayer will try popping the last chunk of the batch (or last block of the chunk when in proposing chunks) and try again to see if it works as expected. + // Since length check is used for DA and proving efficiency, it does not need to be checked here. + _, compatible, err := d.checkCompressedDataCompatibility(payloadBytes, false /* checkLength */) + if err != nil { + return false, fmt.Errorf("failed to check batch compressed data compatibility: %w", err) + } + + return compatible, nil +} + +func (d *DACodecV9) estimateL1CommitBatchSizeAndBlobSize(batch *Batch) (uint64, uint64, error) { + if len(batch.Blocks) == 0 { + return 0, 0, errors.New("batch must contain at least one block") + } + + blobBytes := make([]byte, blobEnvelopeV7OffsetPayload) + + payloadBytes, err := d.constructBlobPayload(batch) + if err != nil { + return 0, 0, fmt.Errorf("failed to construct blob payload: %w", err) + } + + compressedPayloadBytes, enableCompression, err := d.checkCompressedDataCompatibility(payloadBytes, true /* checkLength */) + if err != nil { + return 0, 0, fmt.Errorf("failed to check batch compressed data compatibility: %w", err) + } + + if enableCompression { + blobBytes = append(blobBytes, compressedPayloadBytes...) + } else { + blobBytes = append(blobBytes, payloadBytes...) + } + + return blobEnvelopeV7OffsetPayload + uint64(len(payloadBytes)), calculatePaddedBlobSize(uint64(len(blobBytes))), nil +} + +// EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a single chunk. +func (d *DACodecV9) EstimateChunkL1CommitBatchSizeAndBlobSize(chunk *Chunk) (uint64, uint64, error) { + return d.estimateL1CommitBatchSizeAndBlobSize(&Batch{ + Blocks: chunk.Blocks, + PrevL1MessageQueueHash: chunk.PrevL1MessageQueueHash, + PostL1MessageQueueHash: chunk.PostL1MessageQueueHash, + }) +} + +// EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a batch. +func (d *DACodecV9) EstimateBatchL1CommitBatchSizeAndBlobSize(batch *Batch) (uint64, uint64, error) { + return d.estimateL1CommitBatchSizeAndBlobSize(batch) +} diff --git a/encoding/da.go b/encoding/da.go index 8e16f2c..6d82447 100644 --- a/encoding/da.go +++ b/encoding/da.go @@ -509,6 +509,61 @@ func checkCompressedDataCompatibilityV7(data []byte) error { return nil } +// Sanity check if the compressed data (v9) is compatible with our circuit. +// If we conclude that the data could not be decompressed, then we will +// commit it uncompressed instead. +func checkCompressedDataCompatibilityV9(data []byte) error { + if len(data) < 16 { + return fmt.Errorf("too small size (0x%x), what is it?", data) + } + + fheader := data[0] + // it is not the encoding type we expected in our zstd header + if fheader&63 != 32 { + return fmt.Errorf("unexpected header type (%x)", fheader) + } + + // skip content size + switch fheader >> 6 { + case 0: + data = data[2:] + case 1: + data = data[3:] + case 2: + data = data[5:] + case 3: + data = data[9:] + default: + panic("impossible") + } + + isLast := false + // scan each block until done + for len(data) > 3 && !isLast { + isLast = (data[0] & 1) == 1 + blkType := (data[0] >> 1) & 3 + var blkSize uint + if blkType == 1 { // RLE Block + blkSize = 1 + } else { + if blkType == 3 { + return fmt.Errorf("encounter reserved block type at %v", data) + } + blkSize = (uint(data[2])*65536 + uint(data[1])*256 + uint(data[0])) >> 3 + } + if len(data) < 3+int(blkSize) { + return fmt.Errorf("wrong data len {%d}, expect min {%d}", len(data), 3+blkSize) + } + data = data[3+blkSize:] + } + + if !isLast { + return fmt.Errorf("unexpected end before last block") + } + + return nil +} + // makeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements. // The canonical blob representation is a 32-byte array where every 31 bytes are prepended with 1 zero byte. // The kzg4844.Blob is a 4096-byte array, thus 0s are padded to the end of the array.