|
| 1 | +package encoding |
| 2 | + |
| 3 | +import ( |
| 4 | + "crypto/sha256" |
| 5 | + "encoding/hex" |
| 6 | + "encoding/json" |
| 7 | + "errors" |
| 8 | + "fmt" |
| 9 | + "math" |
| 10 | + |
| 11 | + "github.com/scroll-tech/go-ethereum/common" |
| 12 | + "github.com/scroll-tech/go-ethereum/core/types" |
| 13 | + "github.com/scroll-tech/go-ethereum/crypto/kzg4844" |
| 14 | + "github.com/scroll-tech/go-ethereum/log" |
| 15 | + |
| 16 | + "github.com/scroll-tech/da-codec/encoding/zstd" |
| 17 | +) |
| 18 | + |
| 19 | +type DACodecV7 struct{} |
| 20 | + |
| 21 | +// Version returns the codec version. |
| 22 | +func (d *DACodecV7) Version() CodecVersion { |
| 23 | + return CodecV7 |
| 24 | +} |
| 25 | + |
| 26 | +// MaxNumChunksPerBatch returns the maximum number of chunks per batch. |
| 27 | +func (d *DACodecV7) MaxNumChunksPerBatch() int { |
| 28 | + return math.MaxInt |
| 29 | +} |
| 30 | + |
| 31 | +// NewDABlock creates a new DABlock from the given Block and the total number of L1 messages popped before. |
| 32 | +func (d *DACodecV7) NewDABlock(block *Block, totalL1MessagePoppedBefore uint64) (DABlock, error) { |
| 33 | + return newDABlockV7FromBlockWithValidation(block, &totalL1MessagePoppedBefore) |
| 34 | +} |
| 35 | + |
| 36 | +// NewDAChunk creates a new DAChunk from the given Chunk and the total number of L1 messages popped before. |
| 37 | +// Note: In DACodecV7 there is no notion of chunks. Blobs contain the entire batch data without any information of Chunks within. |
| 38 | +// However, for compatibility reasons this function is implemented to create a DAChunk from a Chunk. |
| 39 | +// This way we can still uniquely identify a set of blocks and their L1 messages. |
| 40 | +func (d *DACodecV7) NewDAChunk(chunk *Chunk, totalL1MessagePoppedBefore uint64) (DAChunk, error) { |
| 41 | + if chunk == nil { |
| 42 | + return nil, errors.New("chunk is nil") |
| 43 | + } |
| 44 | + |
| 45 | + if len(chunk.Blocks) == 0 { |
| 46 | + return nil, errors.New("number of blocks is 0") |
| 47 | + } |
| 48 | + |
| 49 | + if len(chunk.Blocks) > math.MaxUint16 { |
| 50 | + return nil, fmt.Errorf("number of blocks (%d) exceeds maximum allowed (%d)", len(chunk.Blocks), math.MaxUint16) |
| 51 | + } |
| 52 | + |
| 53 | + blocks := make([]DABlock, 0, len(chunk.Blocks)) |
| 54 | + txs := make([][]*types.TransactionData, 0, len(chunk.Blocks)) |
| 55 | + |
| 56 | + if err := iterateAndVerifyBlocksAndL1Messages(chunk.PrevL1MessageQueueHash, chunk.PostL1MessageQueueHash, chunk.Blocks, &totalL1MessagePoppedBefore, func(initialBlockNumber uint64) {}, func(block *Block, daBlock *daBlockV7) error { |
| 57 | + blocks = append(blocks, daBlock) |
| 58 | + txs = append(txs, block.Transactions) |
| 59 | + |
| 60 | + return nil |
| 61 | + }); err != nil { |
| 62 | + return nil, fmt.Errorf("failed to iterate and verify blocks and L1 messages: %w", err) |
| 63 | + } |
| 64 | + |
| 65 | + daChunk := newDAChunkV7( |
| 66 | + blocks, |
| 67 | + txs, |
| 68 | + ) |
| 69 | + |
| 70 | + return daChunk, nil |
| 71 | +} |
| 72 | + |
| 73 | +// NewDABatch creates a DABatch including blob from the provided Batch. |
| 74 | +func (d *DACodecV7) NewDABatch(batch *Batch) (DABatch, error) { |
| 75 | + if len(batch.Blocks) == 0 { |
| 76 | + return nil, errors.New("batch must contain at least one block") |
| 77 | + } |
| 78 | + |
| 79 | + if err := checkBlocksBatchVSChunksConsistency(batch); err != nil { |
| 80 | + return nil, fmt.Errorf("failed to check blocks batch vs chunks consistency: %w", err) |
| 81 | + } |
| 82 | + |
| 83 | + blob, blobVersionedHash, blobBytes, err := d.constructBlob(batch) |
| 84 | + if err != nil { |
| 85 | + return nil, fmt.Errorf("failed to construct blob: %w", err) |
| 86 | + } |
| 87 | + |
| 88 | + daBatch, err := newDABatchV7(CodecV7, batch.Index, blobVersionedHash, batch.ParentBatchHash, blob, blobBytes) |
| 89 | + if err != nil { |
| 90 | + return nil, fmt.Errorf("failed to construct DABatch: %w", err) |
| 91 | + } |
| 92 | + |
| 93 | + return daBatch, nil |
| 94 | +} |
| 95 | + |
| 96 | +func (d *DACodecV7) constructBlob(batch *Batch) (*kzg4844.Blob, common.Hash, []byte, error) { |
| 97 | + blobBytes := make([]byte, blobEnvelopeV7OffsetPayload) |
| 98 | + |
| 99 | + payloadBytes, err := d.constructBlobPayload(batch) |
| 100 | + if err != nil { |
| 101 | + return nil, common.Hash{}, nil, fmt.Errorf("failed to construct blob payload: %w", err) |
| 102 | + } |
| 103 | + |
| 104 | + compressedPayloadBytes, enableCompression, err := d.checkCompressedDataCompatibility(payloadBytes) |
| 105 | + if err != nil { |
| 106 | + return nil, common.Hash{}, nil, fmt.Errorf("failed to check batch compressed data compatibility: %w", err) |
| 107 | + } |
| 108 | + |
| 109 | + isCompressedFlag := uint8(0x0) |
| 110 | + if enableCompression { |
| 111 | + isCompressedFlag = 0x1 |
| 112 | + payloadBytes = compressedPayloadBytes |
| 113 | + } |
| 114 | + |
| 115 | + sizeSlice := encodeSize3Bytes(uint32(len(payloadBytes))) |
| 116 | + |
| 117 | + blobBytes[blobEnvelopeV7OffsetVersion] = uint8(CodecV7) |
| 118 | + copy(blobBytes[blobEnvelopeV7OffsetByteSize:blobEnvelopeV7OffsetCompressedFlag], sizeSlice) |
| 119 | + blobBytes[blobEnvelopeV7OffsetCompressedFlag] = isCompressedFlag |
| 120 | + blobBytes = append(blobBytes, payloadBytes...) |
| 121 | + |
| 122 | + if len(blobBytes) > maxEffectiveBlobBytes { |
| 123 | + log.Error("ConstructBlob: Blob payload exceeds maximum size", "size", len(blobBytes), "blobBytes", hex.EncodeToString(blobBytes)) |
| 124 | + return nil, common.Hash{}, nil, fmt.Errorf("blob exceeds maximum size: got %d, allowed %d", len(blobBytes), maxEffectiveBlobBytes) |
| 125 | + } |
| 126 | + |
| 127 | + // convert raw data to BLSFieldElements |
| 128 | + blob, err := makeBlobCanonical(blobBytes) |
| 129 | + if err != nil { |
| 130 | + return nil, common.Hash{}, nil, fmt.Errorf("failed to convert blobBytes to canonical form: %w", err) |
| 131 | + } |
| 132 | + |
| 133 | + // compute blob versioned hash |
| 134 | + c, err := kzg4844.BlobToCommitment(blob) |
| 135 | + if err != nil { |
| 136 | + return nil, common.Hash{}, nil, fmt.Errorf("failed to create blob commitment: %w", err) |
| 137 | + } |
| 138 | + blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c) |
| 139 | + |
| 140 | + return blob, blobVersionedHash, blobBytes, nil |
| 141 | +} |
| 142 | + |
| 143 | +func (d *DACodecV7) constructBlobPayload(batch *Batch) ([]byte, error) { |
| 144 | + blobPayload := blobPayloadV7{ |
| 145 | + prevL1MessageQueueHash: batch.PrevL1MessageQueueHash, |
| 146 | + postL1MessageQueueHash: batch.PostL1MessageQueueHash, |
| 147 | + blocks: batch.Blocks, |
| 148 | + } |
| 149 | + |
| 150 | + return blobPayload.Encode() |
| 151 | +} |
| 152 | + |
| 153 | +// NewDABatchFromBytes decodes the given byte slice into a DABatch. |
| 154 | +// Note: This function only populates the batch header, it leaves the blob-related fields empty. |
| 155 | +func (d *DACodecV7) NewDABatchFromBytes(data []byte) (DABatch, error) { |
| 156 | + daBatch, err := decodeDABatchV7(data) |
| 157 | + if err != nil { |
| 158 | + return nil, fmt.Errorf("failed to decode DA batch: %w", err) |
| 159 | + } |
| 160 | + |
| 161 | + if daBatch.version != CodecV7 { |
| 162 | + return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", CodecV7, daBatch.version) |
| 163 | + } |
| 164 | + |
| 165 | + return daBatch, nil |
| 166 | +} |
| 167 | + |
| 168 | +func (d *DACodecV7) NewDABatchFromParams(batchIndex uint64, blobVersionedHash, parentBatchHash common.Hash) (DABatch, error) { |
| 169 | + return newDABatchV7(CodecV7, batchIndex, blobVersionedHash, parentBatchHash, nil, nil) |
| 170 | +} |
| 171 | + |
| 172 | +func (d *DACodecV7) DecodeDAChunksRawTx(_ [][]byte) ([]*DAChunkRawTx, error) { |
| 173 | + return nil, errors.New("DecodeDAChunksRawTx is not implemented for DACodecV7, use DecodeBlob instead") |
| 174 | +} |
| 175 | + |
| 176 | +func (d *DACodecV7) DecodeBlob(blob *kzg4844.Blob) (DABlobPayload, error) { |
| 177 | + rawBytes := bytesFromBlobCanonical(blob) |
| 178 | + |
| 179 | + // read the blob envelope header |
| 180 | + version := rawBytes[blobEnvelopeV7OffsetVersion] |
| 181 | + if CodecVersion(version) != CodecV7 { |
| 182 | + return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", CodecV7, version) |
| 183 | + } |
| 184 | + |
| 185 | + // read the data size |
| 186 | + blobPayloadSize := decodeSize3Bytes(rawBytes[blobEnvelopeV7OffsetByteSize:blobEnvelopeV7OffsetCompressedFlag]) |
| 187 | + if blobPayloadSize+blobEnvelopeV7OffsetPayload > uint32(len(rawBytes)) { |
| 188 | + return nil, fmt.Errorf("blob envelope size exceeds the raw data size: %d > %d", blobPayloadSize, len(rawBytes)) |
| 189 | + } |
| 190 | + |
| 191 | + payloadBytes := rawBytes[blobEnvelopeV7OffsetPayload : blobEnvelopeV7OffsetPayload+blobPayloadSize] |
| 192 | + |
| 193 | + // read the compressed flag and decompress if needed |
| 194 | + compressed := rawBytes[blobEnvelopeV7OffsetCompressedFlag] |
| 195 | + if compressed != 0x0 && compressed != 0x1 { |
| 196 | + return nil, fmt.Errorf("invalid compressed flag: %d", compressed) |
| 197 | + } |
| 198 | + if compressed == 0x1 { |
| 199 | + var err error |
| 200 | + if payloadBytes, err = decompressV7Bytes(payloadBytes); err != nil { |
| 201 | + return nil, fmt.Errorf("failed to decompress blob payload: %w", err) |
| 202 | + } |
| 203 | + } |
| 204 | + |
| 205 | + // read the payload |
| 206 | + payload, err := decodeBlobPayloadV7(payloadBytes) |
| 207 | + if err != nil { |
| 208 | + return nil, fmt.Errorf("failed to decode blob payload: %w", err) |
| 209 | + } |
| 210 | + |
| 211 | + return payload, nil |
| 212 | +} |
| 213 | + |
| 214 | +func (d *DACodecV7) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx) error { |
| 215 | + return nil |
| 216 | +} |
| 217 | + |
| 218 | +// checkCompressedDataCompatibility checks the compressed data compatibility for a batch. |
| 219 | +// It constructs a blob payload, compresses the data, and checks the compressed data compatibility. |
| 220 | +func (d *DACodecV7) checkCompressedDataCompatibility(payloadBytes []byte) ([]byte, bool, error) { |
| 221 | + compressedPayloadBytes, err := zstd.CompressScrollBatchBytes(payloadBytes) |
| 222 | + if err != nil { |
| 223 | + return nil, false, fmt.Errorf("failed to compress blob payload: %w", err) |
| 224 | + } |
| 225 | + |
| 226 | + if err = checkCompressedDataCompatibility(compressedPayloadBytes); err != nil { |
| 227 | + log.Warn("Compressed data compatibility check failed", "err", err, "payloadBytes", hex.EncodeToString(payloadBytes), "compressedPayloadBytes", hex.EncodeToString(compressedPayloadBytes)) |
| 228 | + return nil, false, nil |
| 229 | + } |
| 230 | + |
| 231 | + // check if compressed data is bigger or equal to the original data -> no need to compress |
| 232 | + if len(compressedPayloadBytes) >= len(payloadBytes) { |
| 233 | + log.Warn("Compressed data is bigger or equal to the original data", "payloadBytes", hex.EncodeToString(payloadBytes), "compressedPayloadBytes", hex.EncodeToString(compressedPayloadBytes)) |
| 234 | + return nil, false, nil |
| 235 | + } |
| 236 | + |
| 237 | + return compressedPayloadBytes, true, nil |
| 238 | +} |
| 239 | + |
| 240 | +// CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk. |
| 241 | +// Note: For DACodecV7, this function is not implemented since there is no notion of DAChunk in this version. Blobs |
| 242 | +// contain the entire batch data, and it is up to a prover to decide the chunk sizes. |
| 243 | +func (d *DACodecV7) CheckChunkCompressedDataCompatibility(_ *Chunk) (bool, error) { |
| 244 | + return true, nil |
| 245 | +} |
| 246 | + |
| 247 | +// CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch. |
| 248 | +func (d *DACodecV7) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) { |
| 249 | + if len(b.Blocks) == 0 { |
| 250 | + return false, errors.New("batch must contain at least one block") |
| 251 | + } |
| 252 | + |
| 253 | + if err := checkBlocksBatchVSChunksConsistency(b); err != nil { |
| 254 | + return false, fmt.Errorf("failed to check blocks batch vs chunks consistency: %w", err) |
| 255 | + } |
| 256 | + |
| 257 | + payloadBytes, err := d.constructBlobPayload(b) |
| 258 | + if err != nil { |
| 259 | + return false, fmt.Errorf("failed to construct blob payload: %w", err) |
| 260 | + } |
| 261 | + |
| 262 | + _, compatible, err := d.checkCompressedDataCompatibility(payloadBytes) |
| 263 | + if err != nil { |
| 264 | + return false, fmt.Errorf("failed to check batch compressed data compatibility: %w", err) |
| 265 | + } |
| 266 | + |
| 267 | + return compatible, nil |
| 268 | +} |
| 269 | + |
| 270 | +func (d *DACodecV7) estimateL1CommitBatchSizeAndBlobSize(batch *Batch) (uint64, uint64, error) { |
| 271 | + blobBytes := make([]byte, blobEnvelopeV7OffsetPayload) |
| 272 | + |
| 273 | + payloadBytes, err := d.constructBlobPayload(batch) |
| 274 | + if err != nil { |
| 275 | + return 0, 0, fmt.Errorf("failed to construct blob payload: %w", err) |
| 276 | + } |
| 277 | + |
| 278 | + compressedPayloadBytes, enableCompression, err := d.checkCompressedDataCompatibility(payloadBytes) |
| 279 | + if err != nil { |
| 280 | + return 0, 0, fmt.Errorf("failed to check batch compressed data compatibility: %w", err) |
| 281 | + } |
| 282 | + |
| 283 | + if enableCompression { |
| 284 | + blobBytes = append(blobBytes, compressedPayloadBytes...) |
| 285 | + } else { |
| 286 | + blobBytes = append(blobBytes, payloadBytes...) |
| 287 | + } |
| 288 | + |
| 289 | + return blobEnvelopeV7OffsetPayload + uint64(len(payloadBytes)), calculatePaddedBlobSize(uint64(len(blobBytes))), nil |
| 290 | +} |
| 291 | + |
| 292 | +// EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a single chunk. |
| 293 | +func (d *DACodecV7) EstimateChunkL1CommitBatchSizeAndBlobSize(chunk *Chunk) (uint64, uint64, error) { |
| 294 | + return d.estimateL1CommitBatchSizeAndBlobSize(&Batch{ |
| 295 | + Blocks: chunk.Blocks, |
| 296 | + PrevL1MessageQueueHash: chunk.PrevL1MessageQueueHash, |
| 297 | + PostL1MessageQueueHash: chunk.PostL1MessageQueueHash, |
| 298 | + }) |
| 299 | +} |
| 300 | + |
| 301 | +// EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a batch. |
| 302 | +func (d *DACodecV7) EstimateBatchL1CommitBatchSizeAndBlobSize(batch *Batch) (uint64, uint64, error) { |
| 303 | + return d.estimateL1CommitBatchSizeAndBlobSize(batch) |
| 304 | +} |
| 305 | + |
| 306 | +// EstimateBlockL1CommitCalldataSize calculates the calldata size in l1 commit for this block approximately. |
| 307 | +// Note: For CodecV7 calldata is constant independently of how many blocks or batches are submitted. |
| 308 | +func (d *DACodecV7) EstimateBlockL1CommitCalldataSize(block *Block) (uint64, error) { |
| 309 | + return 0, nil |
| 310 | +} |
| 311 | + |
| 312 | +// EstimateChunkL1CommitCalldataSize calculates the calldata size needed for committing a chunk to L1 approximately. |
| 313 | +// Note: For CodecV7 calldata is constant independently of how many blocks or batches are submitted. There is no notion |
| 314 | +// of chunks in this version. |
| 315 | +func (d *DACodecV7) EstimateChunkL1CommitCalldataSize(chunk *Chunk) (uint64, error) { |
| 316 | + return 0, nil |
| 317 | +} |
| 318 | + |
| 319 | +// EstimateBatchL1CommitCalldataSize calculates the calldata size in l1 commit for this batch approximately. |
| 320 | +// Note: For CodecV7 calldata is constant independently of how many blocks or batches are submitted. |
| 321 | +// Version + BatchHeader |
| 322 | +func (d *DACodecV7) EstimateBatchL1CommitCalldataSize(batch *Batch) (uint64, error) { |
| 323 | + return 1 + daBatchV7EncodedLength, nil |
| 324 | +} |
| 325 | + |
| 326 | +// EstimateChunkL1CommitGas calculates the total L1 commit gas for this chunk approximately. |
| 327 | +// Note: For CodecV7 calldata is constant independently of how many blocks or batches are submitted. There is no notion |
| 328 | +// of chunks in this version. |
| 329 | +func (d *DACodecV7) EstimateChunkL1CommitGas(chunk *Chunk) (uint64, error) { |
| 330 | + return 0, nil |
| 331 | +} |
| 332 | + |
| 333 | +// EstimateBatchL1CommitGas calculates the total L1 commit gas for this batch approximately. |
| 334 | +func (d *DACodecV7) EstimateBatchL1CommitGas(batch *Batch) (uint64, error) { |
| 335 | + // TODO: adjust this after contracts are implemented |
| 336 | + var totalL1CommitGas uint64 |
| 337 | + |
| 338 | + // Add extra gas costs |
| 339 | + totalL1CommitGas += extraGasCost // constant to account for ops like _getAdmin, _implementation, _requireNotPaused, etc |
| 340 | + totalL1CommitGas += 4 * coldSloadGas // 4 one-time cold sload for commitBatch |
| 341 | + totalL1CommitGas += sstoreGas // 1 time sstore |
| 342 | + totalL1CommitGas += baseTxGas // base gas for tx |
| 343 | + totalL1CommitGas += calldataNonZeroByteGas // version in calldata |
| 344 | + |
| 345 | + return totalL1CommitGas, nil |
| 346 | +} |
| 347 | + |
| 348 | +// JSONFromBytes converts the bytes to a DABatch and then marshals it to JSON. |
| 349 | +func (d *DACodecV7) JSONFromBytes(data []byte) ([]byte, error) { |
| 350 | + batch, err := d.NewDABatchFromBytes(data) |
| 351 | + if err != nil { |
| 352 | + return nil, fmt.Errorf("failed to decode DABatch from bytes: %w", err) |
| 353 | + } |
| 354 | + |
| 355 | + jsonBytes, err := json.Marshal(batch) |
| 356 | + if err != nil { |
| 357 | + return nil, fmt.Errorf("failed to marshal DABatch to JSON, version %d, hash %s: %w", batch.Version(), batch.Hash(), err) |
| 358 | + } |
| 359 | + |
| 360 | + return jsonBytes, nil |
| 361 | +} |
0 commit comments