ETH Price: $3,083.32 (-1.61%)
Gas: 2 Gwei

Transaction Decoder

Block:
19745281 at Apr-27-2024 07:47:59 AM +UTC
Transaction Fee:
0.001523523478733698 ETH $4.70
Gas Used:
230,698 Gas / 6.603973501 Gwei

Emitted Events:

84 DiamondProxy.0x8f2916b2f2d78cc5890ead36c06c0f6d5d112c7e103589947e8e2f0d6eddb763( 0x8f2916b2f2d78cc5890ead36c06c0f6d5d112c7e103589947e8e2f0d6eddb763, 0x0000000000000000000000000000000000000000000000000000000000074643, 0x61c2e41e1bf5ded0d946856978ce9cc62666904627358cede9b46d37cde7dcee, 0xe2dccf25ca3d1d3c55f8a91e3afa0f83831bd1451d0744a2993d369bcf3945f9 )

Account State Difference:

  Address   Before After State Difference Code
0x0D3250c3...Ef790ec99
(zkSync Era: Batcher)
77.641699219371228346 Eth
Nonce: 17116
77.640175695892232504 Eth
Nonce: 17117
0.001523523478995842
0x32400084...60a000324
(zkSync Era: Diamond Proxy)
(beaverbuild)
9.360517098468275081 Eth9.360747796468275081 Eth0.000230698
0xa8CB082A...D63aDc1bD
(zkSync Era: Validator Timelock 3)

Execution Trace

ValidatorTimelock.commitBatches( [{name:batchNumber, type:uint64, order:1, indexed:false, value:476738, valueString:476738}, {name:batchHash, type:bytes32, order:2, indexed:false, value:5E5656114621A991309DA48BC225DB16AE82DDE68C2D3C9BEF53CAEE9C8247AF, valueString:5E5656114621A991309DA48BC225DB16AE82DDE68C2D3C9BEF53CAEE9C8247AF}, {name:indexRepeatedStorageChanges, type:uint64, order:3, indexed:false, value:310264353, valueString:310264353}, {name:numberOfLayer1Txs, type:uint256, order:4, indexed:false, value:26, valueString:26}, {name:priorityOperationsHash, type:bytes32, order:5, indexed:false, value:D06B954BB2C43F4B085044EA24AD74D35143DFD084769A87767CD0304BBF5370, valueString:D06B954BB2C43F4B085044EA24AD74D35143DFD084769A87767CD0304BBF5370}, {name:l2LogsTreeRoot, type:bytes32, order:6, indexed:false, value:46BE1CA3265C725BE954BE289D0A458D753981D73760B1CDACC71C7185B6AC84, valueString:46BE1CA3265C725BE954BE289D0A458D753981D73760B1CDACC71C7185B6AC84}, {name:timestamp, type:uint256, order:7, indexed:false, value:1714203364, valueString:1714203364}, {name:commitment, type:bytes32, order:8, indexed:false, value:C73D8916733BE1D8B8E2BE3EDEB5EA0623115AB192321C26020B20C052D9EE95, valueString:C73D8916733BE1D8B8E2BE3EDEB5EA0623115AB192321C26020B20C052D9EE95}], _newBatchesData= )
  • DiamondProxy.701f58c5( )
    • ExecutorFacet.commitBatches( _lastCommittedBatchData=[{name:batchNumber, type:uint64, order:1, indexed:false, value:476738, valueString:476738}, {name:batchHash, type:bytes32, order:2, indexed:false, value:5E5656114621A991309DA48BC225DB16AE82DDE68C2D3C9BEF53CAEE9C8247AF, valueString:5E5656114621A991309DA48BC225DB16AE82DDE68C2D3C9BEF53CAEE9C8247AF}, {name:indexRepeatedStorageChanges, type:uint64, order:3, indexed:false, value:310264353, valueString:310264353}, {name:numberOfLayer1Txs, type:uint256, order:4, indexed:false, value:26, valueString:26}, {name:priorityOperationsHash, type:bytes32, order:5, indexed:false, value:D06B954BB2C43F4B085044EA24AD74D35143DFD084769A87767CD0304BBF5370, valueString:D06B954BB2C43F4B085044EA24AD74D35143DFD084769A87767CD0304BBF5370}, {name:l2LogsTreeRoot, type:bytes32, order:6, indexed:false, value:46BE1CA3265C725BE954BE289D0A458D753981D73760B1CDACC71C7185B6AC84, valueString:46BE1CA3265C725BE954BE289D0A458D753981D73760B1CDACC71C7185B6AC84}, {name:timestamp, type:uint256, order:7, indexed:false, value:1714203364, valueString:1714203364}, {name:commitment, type:bytes32, order:8, indexed:false, value:C73D8916733BE1D8B8E2BE3EDEB5EA0623115AB192321C26020B20C052D9EE95, valueString:C73D8916733BE1D8B8E2BE3EDEB5EA0623115AB192321C26020B20C052D9EE95}], _newBatchesData= )
      • 0x273bdccdd979510adf4fb801d92f64b243c01fe2.00000000( )
      • 0x000000000000000000000000000000000000000a.011cf20d( )
      • 0x273bdccdd979510adf4fb801d92f64b243c01fe2.00000000( )
      • 0x000000000000000000000000000000000000000a.0147ad55( )
      • 0x273bdccdd979510adf4fb801d92f64b243c01fe2.00000000( )
        File 1 of 3: ValidatorTimelock
        pragma solidity 0.8.20;
        // SPDX-License-Identifier: MIT
        import {Ownable2Step} from "@openzeppelin/contracts/access/Ownable2Step.sol";
        import {LibMap} from "./libraries/LibMap.sol";
        import {IExecutor} from "./interfaces/IExecutor.sol";
        /// @author Matter Labs
        /// @custom:security-contact [email protected]
        /// @notice Intermediate smart contract between the validator EOA account and the zkSync smart contract.
        /// @dev The primary purpose of this contract is to provide a trustless means of delaying batch execution without
        /// modifying the main zkSync contract. As such, even if this contract is compromised, it will not impact the main
        /// contract.
        /// @dev zkSync actively monitors the chain activity and reacts to any suspicious activity by freezing the chain.
        /// This allows time for investigation and mitigation before resuming normal operations.
        /// @dev The contract overloads all of the 4 methods, that are used in state transition. When the batch is committed,
        /// the timestamp is stored for it. Later, when the owner calls the batch execution, the contract checks that batch
        /// was committed not earlier than X time ago.
        contract ValidatorTimelock is IExecutor, Ownable2Step {
            using LibMap for LibMap.Uint32Map;
            /// @dev Part of the IBase interface. Not used in this contract.
            string public constant override getName = "ValidatorTimelock";
            /// @notice The delay between committing and executing batches is changed.
            event NewExecutionDelay(uint256 _newExecutionDelay);
            /// @notice A new validator has been added.
            event ValidatorAdded(address _addedValidator);
            /// @notice A validator has been removed.
            event ValidatorRemoved(address _removedValidator);
            /// @notice Error for when an address is already a validator.
            error AddressAlreadyValidator();
            /// @notice Error for when an address is not a validator.
            error ValidatorDoesNotExist();
            /// @dev The main zkSync smart contract.
            address public immutable zkSyncContract;
            /// @dev The mapping of L2 batch number => timestamp when it was committed.
            LibMap.Uint32Map internal committedBatchTimestamp;
            /// @dev The delay between committing and executing batches.ValidatorTimelock
            uint32 public executionDelay;
            /// @dev Mapping denoting if an address is also a validator
            mapping(address => bool) public validators;
            constructor(address _initialOwner, address _zkSyncContract, uint32 _executionDelay, address[] memory _validators) {
                _transferOwnership(_initialOwner);
                zkSyncContract = _zkSyncContract;
                executionDelay = _executionDelay;
                for (uint256 i = 0; i < _validators.length; i++) {
                    validators[_validators[i]] = true;
                }
            }
            /// @dev Sets an address as a validator.
            function addValidator(address _newValidator) external onlyOwner {
                if (validators[_newValidator]) {
                    revert AddressAlreadyValidator();
                }
                validators[_newValidator] = true;
                emit ValidatorAdded(_newValidator);
            }
            /// @dev Removes an address as a validator.
            function removeValidator(address _validator) external onlyOwner {
                if (!validators[_validator]) {
                    revert ValidatorDoesNotExist();
                }
                validators[_validator] = false;
                emit ValidatorRemoved(_validator);
            }
            /// @dev Set the delay between committing and executing batches.
            function setExecutionDelay(uint32 _executionDelay) external onlyOwner {
                executionDelay = _executionDelay;
                emit NewExecutionDelay(_executionDelay);
            }
            /// @notice Checks if the caller is a validator.
            modifier onlyValidator() {
                require(validators[msg.sender] == true, "8h");
                _;
            }
            /// @dev Returns the timestamp when `_l2BatchNumber` was committed.
            function getCommittedBatchTimestamp(uint256 _l2BatchNumber) external view returns (uint256) {
                return committedBatchTimestamp.get(_l2BatchNumber);
            }
            /// @dev Records the timestamp for all provided committed batches and make
            /// a call to the zkSync contract with the same calldata.
            function commitBatches(
                StoredBatchInfo calldata,
                CommitBatchInfo[] calldata _newBatchesData
            ) external onlyValidator {
                unchecked {
                    // This contract is only a temporary solution, that hopefully will be disabled until 2106 year, so...
                    // It is safe to cast.
                    uint32 timestamp = uint32(block.timestamp);
                    for (uint256 i = 0; i < _newBatchesData.length; ++i) {
                        committedBatchTimestamp.set(_newBatchesData[i].batchNumber, timestamp);
                    }
                }
                _propagateToZkSync();
            }
            /// @dev Make a call to the zkSync contract with the same calldata.
            /// Note: If the batch is reverted, it needs to be committed first before the execution.
            /// So it's safe to not override the committed batches.
            function revertBatches(uint256) external onlyValidator {
                _propagateToZkSync();
            }
            /// @dev Make a call to the zkSync contract with the same calldata.
            /// Note: We don't track the time when batches are proven, since all information about
            /// the batch is known on the commit stage and the proved is not finalized (may be reverted).
            function proveBatches(
                StoredBatchInfo calldata,
                StoredBatchInfo[] calldata,
                ProofInput calldata
            ) external onlyValidator {
                _propagateToZkSync();
            }
            /// @dev Check that batches were committed at least X time ago and
            /// make a call to the zkSync contract with the same calldata.
            function executeBatches(StoredBatchInfo[] calldata _newBatchesData) external onlyValidator {
                uint256 delay = executionDelay; // uint32
                unchecked {
                    for (uint256 i = 0; i < _newBatchesData.length; ++i) {
                        uint256 commitBatchTimestamp = committedBatchTimestamp.get(_newBatchesData[i].batchNumber);
                        // Note: if the `commitBatchTimestamp` is zero, that means either:
                        // * The batch was committed, but not through this contract.
                        // * The batch wasn't committed at all, so execution will fail in the zkSync contract.
                        // We allow executing such batches.
                        require(block.timestamp >= commitBatchTimestamp + delay, "5c"); // The delay is not passed
                    }
                }
                _propagateToZkSync();
            }
            /// @dev Call the zkSync contract with the same calldata as this contract was called.
            /// Note: it is called the zkSync contract, not delegatecalled!
            function _propagateToZkSync() internal {
                address contractAddress = zkSyncContract;
                assembly {
                    // Copy function signature and arguments from calldata at zero position into memory at pointer position
                    calldatacopy(0, 0, calldatasize())
                    // Call method of the zkSync contract returns 0 on error
                    let result := call(gas(), contractAddress, 0, 0, calldatasize(), 0, 0)
                    // Get the size of the last return data
                    let size := returndatasize()
                    // Copy the size length of bytes from return data at zero position to pointer position
                    returndatacopy(0, 0, size)
                    // Depending on the result value
                    switch result
                    case 0 {
                        // End execution and revert state changes
                        revert(0, size)
                    }
                    default {
                        // Return data with length of size at pointers position
                        return(0, size)
                    }
                }
            }
        }
        // SPDX-License-Identifier: MIT
        // OpenZeppelin Contracts (last updated v4.9.0) (access/Ownable2Step.sol)
        pragma solidity ^0.8.0;
        import "./Ownable.sol";
        /**
         * @dev Contract module which provides access control mechanism, where
         * there is an account (an owner) that can be granted exclusive access to
         * specific functions.
         *
         * By default, the owner account will be the one that deploys the contract. This
         * can later be changed with {transferOwnership} and {acceptOwnership}.
         *
         * This module is used through inheritance. It will make available all functions
         * from parent (Ownable).
         */
        abstract contract Ownable2Step is Ownable {
            address private _pendingOwner;
            event OwnershipTransferStarted(address indexed previousOwner, address indexed newOwner);
            /**
             * @dev Returns the address of the pending owner.
             */
            function pendingOwner() public view virtual returns (address) {
                return _pendingOwner;
            }
            /**
             * @dev Starts the ownership transfer of the contract to a new account. Replaces the pending transfer if there is one.
             * Can only be called by the current owner.
             */
            function transferOwnership(address newOwner) public virtual override onlyOwner {
                _pendingOwner = newOwner;
                emit OwnershipTransferStarted(owner(), newOwner);
            }
            /**
             * @dev Transfers ownership of the contract to a new account (`newOwner`) and deletes any pending owner.
             * Internal function without access restriction.
             */
            function _transferOwnership(address newOwner) internal virtual override {
                delete _pendingOwner;
                super._transferOwnership(newOwner);
            }
            /**
             * @dev The new owner accepts the ownership transfer.
             */
            function acceptOwnership() public virtual {
                address sender = _msgSender();
                require(pendingOwner() == sender, "Ownable2Step: caller is not the new owner");
                _transferOwnership(sender);
            }
        }
        pragma solidity 0.8.20;
        // SPDX-License-Identifier: MIT
        /// @notice Library for storage of packed unsigned integers.
        /// @author Matter Labs
        /// @dev This library is an adaptation of the corresponding Solady library (https://github.com/vectorized/solady/blob/main/src/utils/LibMap.sol)
        /// @custom:security-contact [email protected]
        library LibMap {
            /// @dev A uint32 map in storage.
            struct Uint32Map {
                mapping(uint256 packedIndex => uint256 eightPackedValues) map;
            }
            /// @dev Retrieves the uint32 value at a specific index from the Uint32Map.
            /// @param _map The Uint32Map instance containing the packed uint32 values.
            /// @param _index The index of the uint32 value to retrieve.
            /// @return result The uint32 value at the specified index.
            function get(Uint32Map storage _map, uint256 _index) internal view returns (uint32 result) {
                unchecked {
                    // Each storage slot can store 256 bits of data.
                    // As uint32 is 32 bits long, 8 uint32s can be packed into one storage slot.
                    // Hence, `_index / 8` is done to find the storage slot that contains the required uint32.
                    uint256 mapValue = _map.map[_index / 8];
                    // First three bits of the original `_index` denotes the position of the uint32 in that slot.
                    // So, '(_index & 7) * 32' is done to find the bit position of the uint32 in that storage slot.
                    uint256 bitOffset = (_index & 7) * 32;
                    // Shift the bits to the right and retrieve the uint32 value.
                    result = uint32(mapValue >> bitOffset);
                }
            }
            /// @dev Updates the uint32 value at `_index` in `map`.
            /// @param _map The Uint32Map instance containing the packed uint32 values.
            /// @param _index The index of the uint32 value to set.
            /// @param _value The new value at the specified index.
            function set(Uint32Map storage _map, uint256 _index, uint32 _value) internal {
                unchecked {
                    // Each storage slot can store 256 bits of data.
                    // As uint32 is 32 bits long, 8 uint32s can be packed into one storage slot.
                    // Hence, `_index / 8` is done to find the storage slot that contains the required uint32.
                    uint256 mapIndex = _index / 8;
                    uint256 mapValue = _map.map[mapIndex];
                    // First three bits of the original `_index` denotes the position of the uint32 in that slot.
                    // So, '(_index & 7) * 32' is done to find the bit position of the uint32 in that storage slot.
                    uint256 bitOffset = (_index & 7) * 32;
                    // XORing a value A with B, and then with A again, gives the original value B.
                    // We will use this property to update the uint32 value in the slot.
                    // Shift the bits to the right and retrieve the uint32 value.
                    uint32 oldValue = uint32(mapValue >> bitOffset);
                    // Calculate the XOR of the new value and the existing value.
                    uint256 newValueXorOldValue = uint256(oldValue ^ _value);
                    // Finally, we XOR the slot with the XOR of the new value and the existing value,
                    // shifted to its proper position. The XOR operation will effectively replace the old value with the new value.
                    _map.map[mapIndex] = (newValueXorOldValue << bitOffset) ^ mapValue;
                }
            }
        }
        pragma solidity 0.8.20;
        // SPDX-License-Identifier: MIT
        import {IBase} from "./IBase.sol";
        /// @dev Enum used by L2 System Contracts to differentiate logs.
        enum SystemLogKey {
            L2_TO_L1_LOGS_TREE_ROOT_KEY,
            TOTAL_L2_TO_L1_PUBDATA_KEY,
            STATE_DIFF_HASH_KEY,
            PACKED_BATCH_AND_L2_BLOCK_TIMESTAMP_KEY,
            PREV_BATCH_HASH_KEY,
            CHAINED_PRIORITY_TXN_HASH_KEY,
            NUMBER_OF_LAYER_1_TXS_KEY,
            BLOB_ONE_HASH_KEY,
            BLOB_TWO_HASH_KEY,
            EXPECTED_SYSTEM_CONTRACT_UPGRADE_TX_HASH_KEY
        }
        /// @dev Enum used to determine the source of pubdata. At first we will support calldata and blobs but this can be extended.
        enum PubdataSource {
            Calldata,
            Blob
        }
        struct LogProcessingOutput {
            uint256 numberOfLayer1Txs;
            bytes32 chainedPriorityTxsHash;
            bytes32 previousBatchHash;
            bytes32 pubdataHash;
            bytes32 stateDiffHash;
            bytes32 l2LogsTreeRoot;
            uint256 packedBatchAndL2BlockTimestamp;
            bytes32 blob1Hash;
            bytes32 blob2Hash;
        }
        /// @dev Offset used to pull Address From Log. Equal to 4 (bytes for isService)
        uint256 constant L2_LOG_ADDRESS_OFFSET = 4;
        /// @dev Offset used to pull Key From Log. Equal to 4 (bytes for isService) + 20 (bytes for address)
        uint256 constant L2_LOG_KEY_OFFSET = 24;
        /// @dev Offset used to pull Value From Log. Equal to 4 (bytes for isService) + 20 (bytes for address) + 32 (bytes for key)
        uint256 constant L2_LOG_VALUE_OFFSET = 56;
        /// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
        /// point evaluation precompile
        uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
        /// @dev Packed pubdata commitments.
        /// @dev Format: list of: opening point (16 bytes) || claimed value (32 bytes) || commitment (48 bytes) || proof (48 bytes)) = 144 bytes
        uint256 constant PUBDATA_COMMITMENT_SIZE = 144;
        /// @dev Offset in pubdata commitment of blobs for claimed value
        uint256 constant PUBDATA_COMMITMENT_CLAIMED_VALUE_OFFSET = 16;
        /// @dev Offset in pubdata commitment of blobs for kzg commitment
        uint256 constant PUBDATA_COMMITMENT_COMMITMENT_OFFSET = 48;
        /// @dev Max number of blobs currently supported
        uint256 constant MAX_NUMBER_OF_BLOBS = 2;
        /// @title The interface of the zkSync Executor contract capable of processing events emitted in the zkSync protocol.
        /// @author Matter Labs
        /// @custom:security-contact [email protected]
        interface IExecutor is IBase {
            /// @notice Rollup batch stored data
            /// @param batchNumber Rollup batch number
            /// @param batchHash Hash of L2 batch
            /// @param indexRepeatedStorageChanges The serial number of the shortcut index that's used as a unique identifier for storage keys that were used twice or more
            /// @param numberOfLayer1Txs Number of priority operations to be processed
            /// @param priorityOperationsHash Hash of all priority operations from this batch
            /// @param l2LogsTreeRoot Root hash of tree that contains L2 -> L1 messages from this batch
            /// @param timestamp Rollup batch timestamp, have the same format as Ethereum batch constant
            /// @param commitment Verified input for the zkSync circuit
            struct StoredBatchInfo {
                uint64 batchNumber;
                bytes32 batchHash;
                uint64 indexRepeatedStorageChanges;
                uint256 numberOfLayer1Txs;
                bytes32 priorityOperationsHash;
                bytes32 l2LogsTreeRoot;
                uint256 timestamp;
                bytes32 commitment;
            }
            /// @notice Data needed to commit new batch
            /// @param batchNumber Number of the committed batch
            /// @param timestamp Unix timestamp denoting the start of the batch execution
            /// @param indexRepeatedStorageChanges The serial number of the shortcut index that's used as a unique identifier for storage keys that were used twice or more
            /// @param newStateRoot The state root of the full state tree
            /// @param numberOfLayer1Txs Number of priority operations to be processed
            /// @param priorityOperationsHash Hash of all priority operations from this batch
            /// @param bootloaderHeapInitialContentsHash Hash of the initial contents of the bootloader heap. In practice it serves as the commitment to the transactions in the batch.
            /// @param eventsQueueStateHash Hash of the events queue state. In practice it serves as the commitment to the events in the batch.
            /// @param systemLogs concatenation of all L2 -> L1 system logs in the batch
            /// @param pubdataCommitments Packed pubdata commitments/data.
            /// @dev pubdataCommitments format: This will always start with a 1 byte pubdataSource flag. Current allowed values are 0 (calldata) or 1 (blobs)
            ///                             kzg: list of: opening point (16 bytes) || claimed value (32 bytes) || commitment (48 bytes) || proof (48 bytes) = 144 bytes
            ///                             calldata: pubdataCommitments.length - 1 - 32 bytes of pubdata
            ///                                       and 32 bytes appended to serve as the blob commitment part for the aux output part of the batch commitment
            /// @dev For 2 blobs we will be sending 288 bytes of calldata instead of the full amount for pubdata.
            /// @dev When using calldata, we only need to send one blob commitment since the max number of bytes in calldata fits in a single blob and we can pull the
            ///     linear hash from the system logs
            struct CommitBatchInfo {
                uint64 batchNumber;
                uint64 timestamp;
                uint64 indexRepeatedStorageChanges;
                bytes32 newStateRoot;
                uint256 numberOfLayer1Txs;
                bytes32 priorityOperationsHash;
                bytes32 bootloaderHeapInitialContentsHash;
                bytes32 eventsQueueStateHash;
                bytes systemLogs;
                bytes pubdataCommitments;
            }
            /// @notice Recursive proof input data (individual commitments are constructed onchain)
            struct ProofInput {
                uint256[] recursiveAggregationInput;
                uint256[] serializedProof;
            }
            /// @notice Function called by the operator to commit new batches. It is responsible for:
            /// - Verifying the correctness of their timestamps.
            /// - Processing their L2->L1 logs.
            /// - Storing batch commitments.
            /// @param _lastCommittedBatchData Stored data of the last committed batch.
            /// @param _newBatchesData Data of the new batches to be committed.
            function commitBatches(
                StoredBatchInfo calldata _lastCommittedBatchData,
                CommitBatchInfo[] calldata _newBatchesData
            ) external;
            /// @notice Batches commitment verification.
            /// @dev Only verifies batch commitments without any other processing.
            /// @param _prevBatch Stored data of the last committed batch.
            /// @param _committedBatches Stored data of the committed batches.
            /// @param _proof The zero knowledge proof.
            function proveBatches(
                StoredBatchInfo calldata _prevBatch,
                StoredBatchInfo[] calldata _committedBatches,
                ProofInput calldata _proof
            ) external;
            /// @notice The function called by the operator to finalize (execute) batches. It is responsible for:
            /// - Processing all pending operations (commpleting priority requests).
            /// - Finalizing this batch (i.e. allowing to withdraw funds from the system)
            /// @param _batchesData Data of the batches to be executed.
            function executeBatches(StoredBatchInfo[] calldata _batchesData) external;
            /// @notice Reverts unexecuted batches
            /// @param _newLastBatch batch number after which batches should be reverted
            /// NOTE: Doesn't delete the stored data about batches, but only decreases
            /// counters that are responsible for the number of batches
            function revertBatches(uint256 _newLastBatch) external;
            /// @notice Event emitted when a batch is committed
            /// @param batchNumber Number of the batch committed
            /// @param batchHash Hash of the L2 batch
            /// @param commitment Calculated input for the zkSync circuit
            /// @dev It has the name "BlockCommit" and not "BatchCommit" due to backward compatibility considerations
            event BlockCommit(uint256 indexed batchNumber, bytes32 indexed batchHash, bytes32 indexed commitment);
            /// @notice Event emitted when batches are verified
            /// @param previousLastVerifiedBatch Batch number of the previous last verified batch
            /// @param currentLastVerifiedBatch Batch number of the current last verified batch
            /// @dev It has the name "BlocksVerification" and not "BatchesVerification" due to backward compatibility considerations
            event BlocksVerification(uint256 indexed previousLastVerifiedBatch, uint256 indexed currentLastVerifiedBatch);
            /// @notice Event emitted when a batch is executed
            /// @param batchNumber Number of the batch executed
            /// @param batchHash Hash of the L2 batch
            /// @param commitment Verified input for the zkSync circuit
            /// @dev It has the name "BlockExecution" and not "BatchExecution" due to backward compatibility considerations
            event BlockExecution(uint256 indexed batchNumber, bytes32 indexed batchHash, bytes32 indexed commitment);
            /// @notice Event emitted when batches are reverted
            /// @param totalBatchesCommitted Total number of committed batches after the revert
            /// @param totalBatchesVerified Total number of verified batches after the revert
            /// @param totalBatchesExecuted Total number of executed batches
            /// @dev It has the name "BlocksRevert" and not "BatchesRevert" due to backward compatibility considerations
            event BlocksRevert(uint256 totalBatchesCommitted, uint256 totalBatchesVerified, uint256 totalBatchesExecuted);
        }
        // SPDX-License-Identifier: MIT
        // OpenZeppelin Contracts (last updated v4.9.0) (access/Ownable.sol)
        pragma solidity ^0.8.0;
        import "../utils/Context.sol";
        /**
         * @dev Contract module which provides a basic access control mechanism, where
         * there is an account (an owner) that can be granted exclusive access to
         * specific functions.
         *
         * By default, the owner account will be the one that deploys the contract. This
         * can later be changed with {transferOwnership}.
         *
         * This module is used through inheritance. It will make available the modifier
         * `onlyOwner`, which can be applied to your functions to restrict their use to
         * the owner.
         */
        abstract contract Ownable is Context {
            address private _owner;
            event OwnershipTransferred(address indexed previousOwner, address indexed newOwner);
            /**
             * @dev Initializes the contract setting the deployer as the initial owner.
             */
            constructor() {
                _transferOwnership(_msgSender());
            }
            /**
             * @dev Throws if called by any account other than the owner.
             */
            modifier onlyOwner() {
                _checkOwner();
                _;
            }
            /**
             * @dev Returns the address of the current owner.
             */
            function owner() public view virtual returns (address) {
                return _owner;
            }
            /**
             * @dev Throws if the sender is not the owner.
             */
            function _checkOwner() internal view virtual {
                require(owner() == _msgSender(), "Ownable: caller is not the owner");
            }
            /**
             * @dev Leaves the contract without owner. It will not be possible to call
             * `onlyOwner` functions. Can only be called by the current owner.
             *
             * NOTE: Renouncing ownership will leave the contract without an owner,
             * thereby disabling any functionality that is only available to the owner.
             */
            function renounceOwnership() public virtual onlyOwner {
                _transferOwnership(address(0));
            }
            /**
             * @dev Transfers ownership of the contract to a new account (`newOwner`).
             * Can only be called by the current owner.
             */
            function transferOwnership(address newOwner) public virtual onlyOwner {
                require(newOwner != address(0), "Ownable: new owner is the zero address");
                _transferOwnership(newOwner);
            }
            /**
             * @dev Transfers ownership of the contract to a new account (`newOwner`).
             * Internal function without access restriction.
             */
            function _transferOwnership(address newOwner) internal virtual {
                address oldOwner = _owner;
                _owner = newOwner;
                emit OwnershipTransferred(oldOwner, newOwner);
            }
        }
        pragma solidity 0.8.20;
        // SPDX-License-Identifier: UNLICENSED
        /// @title The interface of the zkSync contract, responsible for the main zkSync logic.
        /// @author Matter Labs
        /// @custom:security-contact [email protected]
        interface IBase {
            /// @return Returns facet name.
            function getName() external view returns (string memory);
        }
        // SPDX-License-Identifier: MIT
        // OpenZeppelin Contracts (last updated v4.9.4) (utils/Context.sol)
        pragma solidity ^0.8.0;
        /**
         * @dev Provides information about the current execution context, including the
         * sender of the transaction and its data. While these are generally available
         * via msg.sender and msg.data, they should not be accessed in such a direct
         * manner, since when dealing with meta-transactions the account sending and
         * paying for execution may not be the actual sender (as far as an application
         * is concerned).
         *
         * This contract is only required for intermediate, library-like contracts.
         */
        abstract contract Context {
            function _msgSender() internal view virtual returns (address) {
                return msg.sender;
            }
            function _msgData() internal view virtual returns (bytes calldata) {
                return msg.data;
            }
            function _contextSuffixLength() internal view virtual returns (uint256) {
                return 0;
            }
        }
        

        File 2 of 3: DiamondProxy
        // SPDX-License-Identifier: MIT
        // OpenZeppelin Contracts (last updated v4.8.0) (utils/math/SafeCast.sol)
        // This file was procedurally generated from scripts/generate/templates/SafeCast.js.
        pragma solidity ^0.8.0;
        /**
         * @dev Wrappers over Solidity's uintXX/intXX casting operators with added overflow
         * checks.
         *
         * Downcasting from uint256/int256 in Solidity does not revert on overflow. This can
         * easily result in undesired exploitation or bugs, since developers usually
         * assume that overflows raise errors. `SafeCast` restores this intuition by
         * reverting the transaction when such an operation overflows.
         *
         * Using this library instead of the unchecked operations eliminates an entire
         * class of bugs, so it's recommended to use it always.
         *
         * Can be combined with {SafeMath} and {SignedSafeMath} to extend it to smaller types, by performing
         * all math on `uint256` and `int256` and then downcasting.
         */
        library SafeCast {
            /**
             * @dev Returns the downcasted uint248 from uint256, reverting on
             * overflow (when the input is greater than largest uint248).
             *
             * Counterpart to Solidity's `uint248` operator.
             *
             * Requirements:
             *
             * - input must fit into 248 bits
             *
             * _Available since v4.7._
             */
            function toUint248(uint256 value) internal pure returns (uint248) {
                require(value <= type(uint248).max, "SafeCast: value doesn't fit in 248 bits");
                return uint248(value);
            }
            /**
             * @dev Returns the downcasted uint240 from uint256, reverting on
             * overflow (when the input is greater than largest uint240).
             *
             * Counterpart to Solidity's `uint240` operator.
             *
             * Requirements:
             *
             * - input must fit into 240 bits
             *
             * _Available since v4.7._
             */
            function toUint240(uint256 value) internal pure returns (uint240) {
                require(value <= type(uint240).max, "SafeCast: value doesn't fit in 240 bits");
                return uint240(value);
            }
            /**
             * @dev Returns the downcasted uint232 from uint256, reverting on
             * overflow (when the input is greater than largest uint232).
             *
             * Counterpart to Solidity's `uint232` operator.
             *
             * Requirements:
             *
             * - input must fit into 232 bits
             *
             * _Available since v4.7._
             */
            function toUint232(uint256 value) internal pure returns (uint232) {
                require(value <= type(uint232).max, "SafeCast: value doesn't fit in 232 bits");
                return uint232(value);
            }
            /**
             * @dev Returns the downcasted uint224 from uint256, reverting on
             * overflow (when the input is greater than largest uint224).
             *
             * Counterpart to Solidity's `uint224` operator.
             *
             * Requirements:
             *
             * - input must fit into 224 bits
             *
             * _Available since v4.2._
             */
            function toUint224(uint256 value) internal pure returns (uint224) {
                require(value <= type(uint224).max, "SafeCast: value doesn't fit in 224 bits");
                return uint224(value);
            }
            /**
             * @dev Returns the downcasted uint216 from uint256, reverting on
             * overflow (when the input is greater than largest uint216).
             *
             * Counterpart to Solidity's `uint216` operator.
             *
             * Requirements:
             *
             * - input must fit into 216 bits
             *
             * _Available since v4.7._
             */
            function toUint216(uint256 value) internal pure returns (uint216) {
                require(value <= type(uint216).max, "SafeCast: value doesn't fit in 216 bits");
                return uint216(value);
            }
            /**
             * @dev Returns the downcasted uint208 from uint256, reverting on
             * overflow (when the input is greater than largest uint208).
             *
             * Counterpart to Solidity's `uint208` operator.
             *
             * Requirements:
             *
             * - input must fit into 208 bits
             *
             * _Available since v4.7._
             */
            function toUint208(uint256 value) internal pure returns (uint208) {
                require(value <= type(uint208).max, "SafeCast: value doesn't fit in 208 bits");
                return uint208(value);
            }
            /**
             * @dev Returns the downcasted uint200 from uint256, reverting on
             * overflow (when the input is greater than largest uint200).
             *
             * Counterpart to Solidity's `uint200` operator.
             *
             * Requirements:
             *
             * - input must fit into 200 bits
             *
             * _Available since v4.7._
             */
            function toUint200(uint256 value) internal pure returns (uint200) {
                require(value <= type(uint200).max, "SafeCast: value doesn't fit in 200 bits");
                return uint200(value);
            }
            /**
             * @dev Returns the downcasted uint192 from uint256, reverting on
             * overflow (when the input is greater than largest uint192).
             *
             * Counterpart to Solidity's `uint192` operator.
             *
             * Requirements:
             *
             * - input must fit into 192 bits
             *
             * _Available since v4.7._
             */
            function toUint192(uint256 value) internal pure returns (uint192) {
                require(value <= type(uint192).max, "SafeCast: value doesn't fit in 192 bits");
                return uint192(value);
            }
            /**
             * @dev Returns the downcasted uint184 from uint256, reverting on
             * overflow (when the input is greater than largest uint184).
             *
             * Counterpart to Solidity's `uint184` operator.
             *
             * Requirements:
             *
             * - input must fit into 184 bits
             *
             * _Available since v4.7._
             */
            function toUint184(uint256 value) internal pure returns (uint184) {
                require(value <= type(uint184).max, "SafeCast: value doesn't fit in 184 bits");
                return uint184(value);
            }
            /**
             * @dev Returns the downcasted uint176 from uint256, reverting on
             * overflow (when the input is greater than largest uint176).
             *
             * Counterpart to Solidity's `uint176` operator.
             *
             * Requirements:
             *
             * - input must fit into 176 bits
             *
             * _Available since v4.7._
             */
            function toUint176(uint256 value) internal pure returns (uint176) {
                require(value <= type(uint176).max, "SafeCast: value doesn't fit in 176 bits");
                return uint176(value);
            }
            /**
             * @dev Returns the downcasted uint168 from uint256, reverting on
             * overflow (when the input is greater than largest uint168).
             *
             * Counterpart to Solidity's `uint168` operator.
             *
             * Requirements:
             *
             * - input must fit into 168 bits
             *
             * _Available since v4.7._
             */
            function toUint168(uint256 value) internal pure returns (uint168) {
                require(value <= type(uint168).max, "SafeCast: value doesn't fit in 168 bits");
                return uint168(value);
            }
            /**
             * @dev Returns the downcasted uint160 from uint256, reverting on
             * overflow (when the input is greater than largest uint160).
             *
             * Counterpart to Solidity's `uint160` operator.
             *
             * Requirements:
             *
             * - input must fit into 160 bits
             *
             * _Available since v4.7._
             */
            function toUint160(uint256 value) internal pure returns (uint160) {
                require(value <= type(uint160).max, "SafeCast: value doesn't fit in 160 bits");
                return uint160(value);
            }
            /**
             * @dev Returns the downcasted uint152 from uint256, reverting on
             * overflow (when the input is greater than largest uint152).
             *
             * Counterpart to Solidity's `uint152` operator.
             *
             * Requirements:
             *
             * - input must fit into 152 bits
             *
             * _Available since v4.7._
             */
            function toUint152(uint256 value) internal pure returns (uint152) {
                require(value <= type(uint152).max, "SafeCast: value doesn't fit in 152 bits");
                return uint152(value);
            }
            /**
             * @dev Returns the downcasted uint144 from uint256, reverting on
             * overflow (when the input is greater than largest uint144).
             *
             * Counterpart to Solidity's `uint144` operator.
             *
             * Requirements:
             *
             * - input must fit into 144 bits
             *
             * _Available since v4.7._
             */
            function toUint144(uint256 value) internal pure returns (uint144) {
                require(value <= type(uint144).max, "SafeCast: value doesn't fit in 144 bits");
                return uint144(value);
            }
            /**
             * @dev Returns the downcasted uint136 from uint256, reverting on
             * overflow (when the input is greater than largest uint136).
             *
             * Counterpart to Solidity's `uint136` operator.
             *
             * Requirements:
             *
             * - input must fit into 136 bits
             *
             * _Available since v4.7._
             */
            function toUint136(uint256 value) internal pure returns (uint136) {
                require(value <= type(uint136).max, "SafeCast: value doesn't fit in 136 bits");
                return uint136(value);
            }
            /**
             * @dev Returns the downcasted uint128 from uint256, reverting on
             * overflow (when the input is greater than largest uint128).
             *
             * Counterpart to Solidity's `uint128` operator.
             *
             * Requirements:
             *
             * - input must fit into 128 bits
             *
             * _Available since v2.5._
             */
            function toUint128(uint256 value) internal pure returns (uint128) {
                require(value <= type(uint128).max, "SafeCast: value doesn't fit in 128 bits");
                return uint128(value);
            }
            /**
             * @dev Returns the downcasted uint120 from uint256, reverting on
             * overflow (when the input is greater than largest uint120).
             *
             * Counterpart to Solidity's `uint120` operator.
             *
             * Requirements:
             *
             * - input must fit into 120 bits
             *
             * _Available since v4.7._
             */
            function toUint120(uint256 value) internal pure returns (uint120) {
                require(value <= type(uint120).max, "SafeCast: value doesn't fit in 120 bits");
                return uint120(value);
            }
            /**
             * @dev Returns the downcasted uint112 from uint256, reverting on
             * overflow (when the input is greater than largest uint112).
             *
             * Counterpart to Solidity's `uint112` operator.
             *
             * Requirements:
             *
             * - input must fit into 112 bits
             *
             * _Available since v4.7._
             */
            function toUint112(uint256 value) internal pure returns (uint112) {
                require(value <= type(uint112).max, "SafeCast: value doesn't fit in 112 bits");
                return uint112(value);
            }
            /**
             * @dev Returns the downcasted uint104 from uint256, reverting on
             * overflow (when the input is greater than largest uint104).
             *
             * Counterpart to Solidity's `uint104` operator.
             *
             * Requirements:
             *
             * - input must fit into 104 bits
             *
             * _Available since v4.7._
             */
            function toUint104(uint256 value) internal pure returns (uint104) {
                require(value <= type(uint104).max, "SafeCast: value doesn't fit in 104 bits");
                return uint104(value);
            }
            /**
             * @dev Returns the downcasted uint96 from uint256, reverting on
             * overflow (when the input is greater than largest uint96).
             *
             * Counterpart to Solidity's `uint96` operator.
             *
             * Requirements:
             *
             * - input must fit into 96 bits
             *
             * _Available since v4.2._
             */
            function toUint96(uint256 value) internal pure returns (uint96) {
                require(value <= type(uint96).max, "SafeCast: value doesn't fit in 96 bits");
                return uint96(value);
            }
            /**
             * @dev Returns the downcasted uint88 from uint256, reverting on
             * overflow (when the input is greater than largest uint88).
             *
             * Counterpart to Solidity's `uint88` operator.
             *
             * Requirements:
             *
             * - input must fit into 88 bits
             *
             * _Available since v4.7._
             */
            function toUint88(uint256 value) internal pure returns (uint88) {
                require(value <= type(uint88).max, "SafeCast: value doesn't fit in 88 bits");
                return uint88(value);
            }
            /**
             * @dev Returns the downcasted uint80 from uint256, reverting on
             * overflow (when the input is greater than largest uint80).
             *
             * Counterpart to Solidity's `uint80` operator.
             *
             * Requirements:
             *
             * - input must fit into 80 bits
             *
             * _Available since v4.7._
             */
            function toUint80(uint256 value) internal pure returns (uint80) {
                require(value <= type(uint80).max, "SafeCast: value doesn't fit in 80 bits");
                return uint80(value);
            }
            /**
             * @dev Returns the downcasted uint72 from uint256, reverting on
             * overflow (when the input is greater than largest uint72).
             *
             * Counterpart to Solidity's `uint72` operator.
             *
             * Requirements:
             *
             * - input must fit into 72 bits
             *
             * _Available since v4.7._
             */
            function toUint72(uint256 value) internal pure returns (uint72) {
                require(value <= type(uint72).max, "SafeCast: value doesn't fit in 72 bits");
                return uint72(value);
            }
            /**
             * @dev Returns the downcasted uint64 from uint256, reverting on
             * overflow (when the input is greater than largest uint64).
             *
             * Counterpart to Solidity's `uint64` operator.
             *
             * Requirements:
             *
             * - input must fit into 64 bits
             *
             * _Available since v2.5._
             */
            function toUint64(uint256 value) internal pure returns (uint64) {
                require(value <= type(uint64).max, "SafeCast: value doesn't fit in 64 bits");
                return uint64(value);
            }
            /**
             * @dev Returns the downcasted uint56 from uint256, reverting on
             * overflow (when the input is greater than largest uint56).
             *
             * Counterpart to Solidity's `uint56` operator.
             *
             * Requirements:
             *
             * - input must fit into 56 bits
             *
             * _Available since v4.7._
             */
            function toUint56(uint256 value) internal pure returns (uint56) {
                require(value <= type(uint56).max, "SafeCast: value doesn't fit in 56 bits");
                return uint56(value);
            }
            /**
             * @dev Returns the downcasted uint48 from uint256, reverting on
             * overflow (when the input is greater than largest uint48).
             *
             * Counterpart to Solidity's `uint48` operator.
             *
             * Requirements:
             *
             * - input must fit into 48 bits
             *
             * _Available since v4.7._
             */
            function toUint48(uint256 value) internal pure returns (uint48) {
                require(value <= type(uint48).max, "SafeCast: value doesn't fit in 48 bits");
                return uint48(value);
            }
            /**
             * @dev Returns the downcasted uint40 from uint256, reverting on
             * overflow (when the input is greater than largest uint40).
             *
             * Counterpart to Solidity's `uint40` operator.
             *
             * Requirements:
             *
             * - input must fit into 40 bits
             *
             * _Available since v4.7._
             */
            function toUint40(uint256 value) internal pure returns (uint40) {
                require(value <= type(uint40).max, "SafeCast: value doesn't fit in 40 bits");
                return uint40(value);
            }
            /**
             * @dev Returns the downcasted uint32 from uint256, reverting on
             * overflow (when the input is greater than largest uint32).
             *
             * Counterpart to Solidity's `uint32` operator.
             *
             * Requirements:
             *
             * - input must fit into 32 bits
             *
             * _Available since v2.5._
             */
            function toUint32(uint256 value) internal pure returns (uint32) {
                require(value <= type(uint32).max, "SafeCast: value doesn't fit in 32 bits");
                return uint32(value);
            }
            /**
             * @dev Returns the downcasted uint24 from uint256, reverting on
             * overflow (when the input is greater than largest uint24).
             *
             * Counterpart to Solidity's `uint24` operator.
             *
             * Requirements:
             *
             * - input must fit into 24 bits
             *
             * _Available since v4.7._
             */
            function toUint24(uint256 value) internal pure returns (uint24) {
                require(value <= type(uint24).max, "SafeCast: value doesn't fit in 24 bits");
                return uint24(value);
            }
            /**
             * @dev Returns the downcasted uint16 from uint256, reverting on
             * overflow (when the input is greater than largest uint16).
             *
             * Counterpart to Solidity's `uint16` operator.
             *
             * Requirements:
             *
             * - input must fit into 16 bits
             *
             * _Available since v2.5._
             */
            function toUint16(uint256 value) internal pure returns (uint16) {
                require(value <= type(uint16).max, "SafeCast: value doesn't fit in 16 bits");
                return uint16(value);
            }
            /**
             * @dev Returns the downcasted uint8 from uint256, reverting on
             * overflow (when the input is greater than largest uint8).
             *
             * Counterpart to Solidity's `uint8` operator.
             *
             * Requirements:
             *
             * - input must fit into 8 bits
             *
             * _Available since v2.5._
             */
            function toUint8(uint256 value) internal pure returns (uint8) {
                require(value <= type(uint8).max, "SafeCast: value doesn't fit in 8 bits");
                return uint8(value);
            }
            /**
             * @dev Converts a signed int256 into an unsigned uint256.
             *
             * Requirements:
             *
             * - input must be greater than or equal to 0.
             *
             * _Available since v3.0._
             */
            function toUint256(int256 value) internal pure returns (uint256) {
                require(value >= 0, "SafeCast: value must be positive");
                return uint256(value);
            }
            /**
             * @dev Returns the downcasted int248 from int256, reverting on
             * overflow (when the input is less than smallest int248 or
             * greater than largest int248).
             *
             * Counterpart to Solidity's `int248` operator.
             *
             * Requirements:
             *
             * - input must fit into 248 bits
             *
             * _Available since v4.7._
             */
            function toInt248(int256 value) internal pure returns (int248 downcasted) {
                downcasted = int248(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 248 bits");
            }
            /**
             * @dev Returns the downcasted int240 from int256, reverting on
             * overflow (when the input is less than smallest int240 or
             * greater than largest int240).
             *
             * Counterpart to Solidity's `int240` operator.
             *
             * Requirements:
             *
             * - input must fit into 240 bits
             *
             * _Available since v4.7._
             */
            function toInt240(int256 value) internal pure returns (int240 downcasted) {
                downcasted = int240(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 240 bits");
            }
            /**
             * @dev Returns the downcasted int232 from int256, reverting on
             * overflow (when the input is less than smallest int232 or
             * greater than largest int232).
             *
             * Counterpart to Solidity's `int232` operator.
             *
             * Requirements:
             *
             * - input must fit into 232 bits
             *
             * _Available since v4.7._
             */
            function toInt232(int256 value) internal pure returns (int232 downcasted) {
                downcasted = int232(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 232 bits");
            }
            /**
             * @dev Returns the downcasted int224 from int256, reverting on
             * overflow (when the input is less than smallest int224 or
             * greater than largest int224).
             *
             * Counterpart to Solidity's `int224` operator.
             *
             * Requirements:
             *
             * - input must fit into 224 bits
             *
             * _Available since v4.7._
             */
            function toInt224(int256 value) internal pure returns (int224 downcasted) {
                downcasted = int224(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 224 bits");
            }
            /**
             * @dev Returns the downcasted int216 from int256, reverting on
             * overflow (when the input is less than smallest int216 or
             * greater than largest int216).
             *
             * Counterpart to Solidity's `int216` operator.
             *
             * Requirements:
             *
             * - input must fit into 216 bits
             *
             * _Available since v4.7._
             */
            function toInt216(int256 value) internal pure returns (int216 downcasted) {
                downcasted = int216(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 216 bits");
            }
            /**
             * @dev Returns the downcasted int208 from int256, reverting on
             * overflow (when the input is less than smallest int208 or
             * greater than largest int208).
             *
             * Counterpart to Solidity's `int208` operator.
             *
             * Requirements:
             *
             * - input must fit into 208 bits
             *
             * _Available since v4.7._
             */
            function toInt208(int256 value) internal pure returns (int208 downcasted) {
                downcasted = int208(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 208 bits");
            }
            /**
             * @dev Returns the downcasted int200 from int256, reverting on
             * overflow (when the input is less than smallest int200 or
             * greater than largest int200).
             *
             * Counterpart to Solidity's `int200` operator.
             *
             * Requirements:
             *
             * - input must fit into 200 bits
             *
             * _Available since v4.7._
             */
            function toInt200(int256 value) internal pure returns (int200 downcasted) {
                downcasted = int200(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 200 bits");
            }
            /**
             * @dev Returns the downcasted int192 from int256, reverting on
             * overflow (when the input is less than smallest int192 or
             * greater than largest int192).
             *
             * Counterpart to Solidity's `int192` operator.
             *
             * Requirements:
             *
             * - input must fit into 192 bits
             *
             * _Available since v4.7._
             */
            function toInt192(int256 value) internal pure returns (int192 downcasted) {
                downcasted = int192(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 192 bits");
            }
            /**
             * @dev Returns the downcasted int184 from int256, reverting on
             * overflow (when the input is less than smallest int184 or
             * greater than largest int184).
             *
             * Counterpart to Solidity's `int184` operator.
             *
             * Requirements:
             *
             * - input must fit into 184 bits
             *
             * _Available since v4.7._
             */
            function toInt184(int256 value) internal pure returns (int184 downcasted) {
                downcasted = int184(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 184 bits");
            }
            /**
             * @dev Returns the downcasted int176 from int256, reverting on
             * overflow (when the input is less than smallest int176 or
             * greater than largest int176).
             *
             * Counterpart to Solidity's `int176` operator.
             *
             * Requirements:
             *
             * - input must fit into 176 bits
             *
             * _Available since v4.7._
             */
            function toInt176(int256 value) internal pure returns (int176 downcasted) {
                downcasted = int176(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 176 bits");
            }
            /**
             * @dev Returns the downcasted int168 from int256, reverting on
             * overflow (when the input is less than smallest int168 or
             * greater than largest int168).
             *
             * Counterpart to Solidity's `int168` operator.
             *
             * Requirements:
             *
             * - input must fit into 168 bits
             *
             * _Available since v4.7._
             */
            function toInt168(int256 value) internal pure returns (int168 downcasted) {
                downcasted = int168(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 168 bits");
            }
            /**
             * @dev Returns the downcasted int160 from int256, reverting on
             * overflow (when the input is less than smallest int160 or
             * greater than largest int160).
             *
             * Counterpart to Solidity's `int160` operator.
             *
             * Requirements:
             *
             * - input must fit into 160 bits
             *
             * _Available since v4.7._
             */
            function toInt160(int256 value) internal pure returns (int160 downcasted) {
                downcasted = int160(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 160 bits");
            }
            /**
             * @dev Returns the downcasted int152 from int256, reverting on
             * overflow (when the input is less than smallest int152 or
             * greater than largest int152).
             *
             * Counterpart to Solidity's `int152` operator.
             *
             * Requirements:
             *
             * - input must fit into 152 bits
             *
             * _Available since v4.7._
             */
            function toInt152(int256 value) internal pure returns (int152 downcasted) {
                downcasted = int152(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 152 bits");
            }
            /**
             * @dev Returns the downcasted int144 from int256, reverting on
             * overflow (when the input is less than smallest int144 or
             * greater than largest int144).
             *
             * Counterpart to Solidity's `int144` operator.
             *
             * Requirements:
             *
             * - input must fit into 144 bits
             *
             * _Available since v4.7._
             */
            function toInt144(int256 value) internal pure returns (int144 downcasted) {
                downcasted = int144(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 144 bits");
            }
            /**
             * @dev Returns the downcasted int136 from int256, reverting on
             * overflow (when the input is less than smallest int136 or
             * greater than largest int136).
             *
             * Counterpart to Solidity's `int136` operator.
             *
             * Requirements:
             *
             * - input must fit into 136 bits
             *
             * _Available since v4.7._
             */
            function toInt136(int256 value) internal pure returns (int136 downcasted) {
                downcasted = int136(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 136 bits");
            }
            /**
             * @dev Returns the downcasted int128 from int256, reverting on
             * overflow (when the input is less than smallest int128 or
             * greater than largest int128).
             *
             * Counterpart to Solidity's `int128` operator.
             *
             * Requirements:
             *
             * - input must fit into 128 bits
             *
             * _Available since v3.1._
             */
            function toInt128(int256 value) internal pure returns (int128 downcasted) {
                downcasted = int128(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 128 bits");
            }
            /**
             * @dev Returns the downcasted int120 from int256, reverting on
             * overflow (when the input is less than smallest int120 or
             * greater than largest int120).
             *
             * Counterpart to Solidity's `int120` operator.
             *
             * Requirements:
             *
             * - input must fit into 120 bits
             *
             * _Available since v4.7._
             */
            function toInt120(int256 value) internal pure returns (int120 downcasted) {
                downcasted = int120(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 120 bits");
            }
            /**
             * @dev Returns the downcasted int112 from int256, reverting on
             * overflow (when the input is less than smallest int112 or
             * greater than largest int112).
             *
             * Counterpart to Solidity's `int112` operator.
             *
             * Requirements:
             *
             * - input must fit into 112 bits
             *
             * _Available since v4.7._
             */
            function toInt112(int256 value) internal pure returns (int112 downcasted) {
                downcasted = int112(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 112 bits");
            }
            /**
             * @dev Returns the downcasted int104 from int256, reverting on
             * overflow (when the input is less than smallest int104 or
             * greater than largest int104).
             *
             * Counterpart to Solidity's `int104` operator.
             *
             * Requirements:
             *
             * - input must fit into 104 bits
             *
             * _Available since v4.7._
             */
            function toInt104(int256 value) internal pure returns (int104 downcasted) {
                downcasted = int104(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 104 bits");
            }
            /**
             * @dev Returns the downcasted int96 from int256, reverting on
             * overflow (when the input is less than smallest int96 or
             * greater than largest int96).
             *
             * Counterpart to Solidity's `int96` operator.
             *
             * Requirements:
             *
             * - input must fit into 96 bits
             *
             * _Available since v4.7._
             */
            function toInt96(int256 value) internal pure returns (int96 downcasted) {
                downcasted = int96(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 96 bits");
            }
            /**
             * @dev Returns the downcasted int88 from int256, reverting on
             * overflow (when the input is less than smallest int88 or
             * greater than largest int88).
             *
             * Counterpart to Solidity's `int88` operator.
             *
             * Requirements:
             *
             * - input must fit into 88 bits
             *
             * _Available since v4.7._
             */
            function toInt88(int256 value) internal pure returns (int88 downcasted) {
                downcasted = int88(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 88 bits");
            }
            /**
             * @dev Returns the downcasted int80 from int256, reverting on
             * overflow (when the input is less than smallest int80 or
             * greater than largest int80).
             *
             * Counterpart to Solidity's `int80` operator.
             *
             * Requirements:
             *
             * - input must fit into 80 bits
             *
             * _Available since v4.7._
             */
            function toInt80(int256 value) internal pure returns (int80 downcasted) {
                downcasted = int80(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 80 bits");
            }
            /**
             * @dev Returns the downcasted int72 from int256, reverting on
             * overflow (when the input is less than smallest int72 or
             * greater than largest int72).
             *
             * Counterpart to Solidity's `int72` operator.
             *
             * Requirements:
             *
             * - input must fit into 72 bits
             *
             * _Available since v4.7._
             */
            function toInt72(int256 value) internal pure returns (int72 downcasted) {
                downcasted = int72(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 72 bits");
            }
            /**
             * @dev Returns the downcasted int64 from int256, reverting on
             * overflow (when the input is less than smallest int64 or
             * greater than largest int64).
             *
             * Counterpart to Solidity's `int64` operator.
             *
             * Requirements:
             *
             * - input must fit into 64 bits
             *
             * _Available since v3.1._
             */
            function toInt64(int256 value) internal pure returns (int64 downcasted) {
                downcasted = int64(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 64 bits");
            }
            /**
             * @dev Returns the downcasted int56 from int256, reverting on
             * overflow (when the input is less than smallest int56 or
             * greater than largest int56).
             *
             * Counterpart to Solidity's `int56` operator.
             *
             * Requirements:
             *
             * - input must fit into 56 bits
             *
             * _Available since v4.7._
             */
            function toInt56(int256 value) internal pure returns (int56 downcasted) {
                downcasted = int56(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 56 bits");
            }
            /**
             * @dev Returns the downcasted int48 from int256, reverting on
             * overflow (when the input is less than smallest int48 or
             * greater than largest int48).
             *
             * Counterpart to Solidity's `int48` operator.
             *
             * Requirements:
             *
             * - input must fit into 48 bits
             *
             * _Available since v4.7._
             */
            function toInt48(int256 value) internal pure returns (int48 downcasted) {
                downcasted = int48(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 48 bits");
            }
            /**
             * @dev Returns the downcasted int40 from int256, reverting on
             * overflow (when the input is less than smallest int40 or
             * greater than largest int40).
             *
             * Counterpart to Solidity's `int40` operator.
             *
             * Requirements:
             *
             * - input must fit into 40 bits
             *
             * _Available since v4.7._
             */
            function toInt40(int256 value) internal pure returns (int40 downcasted) {
                downcasted = int40(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 40 bits");
            }
            /**
             * @dev Returns the downcasted int32 from int256, reverting on
             * overflow (when the input is less than smallest int32 or
             * greater than largest int32).
             *
             * Counterpart to Solidity's `int32` operator.
             *
             * Requirements:
             *
             * - input must fit into 32 bits
             *
             * _Available since v3.1._
             */
            function toInt32(int256 value) internal pure returns (int32 downcasted) {
                downcasted = int32(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 32 bits");
            }
            /**
             * @dev Returns the downcasted int24 from int256, reverting on
             * overflow (when the input is less than smallest int24 or
             * greater than largest int24).
             *
             * Counterpart to Solidity's `int24` operator.
             *
             * Requirements:
             *
             * - input must fit into 24 bits
             *
             * _Available since v4.7._
             */
            function toInt24(int256 value) internal pure returns (int24 downcasted) {
                downcasted = int24(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 24 bits");
            }
            /**
             * @dev Returns the downcasted int16 from int256, reverting on
             * overflow (when the input is less than smallest int16 or
             * greater than largest int16).
             *
             * Counterpart to Solidity's `int16` operator.
             *
             * Requirements:
             *
             * - input must fit into 16 bits
             *
             * _Available since v3.1._
             */
            function toInt16(int256 value) internal pure returns (int16 downcasted) {
                downcasted = int16(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 16 bits");
            }
            /**
             * @dev Returns the downcasted int8 from int256, reverting on
             * overflow (when the input is less than smallest int8 or
             * greater than largest int8).
             *
             * Counterpart to Solidity's `int8` operator.
             *
             * Requirements:
             *
             * - input must fit into 8 bits
             *
             * _Available since v3.1._
             */
            function toInt8(int256 value) internal pure returns (int8 downcasted) {
                downcasted = int8(value);
                require(downcasted == value, "SafeCast: value doesn't fit in 8 bits");
            }
            /**
             * @dev Converts an unsigned uint256 into a signed int256.
             *
             * Requirements:
             *
             * - input must be less than or equal to maxInt256.
             *
             * _Available since v3.0._
             */
            function toInt256(uint256 value) internal pure returns (int256) {
                // Note: Unsafe cast below is okay because `type(int256).max` is guaranteed to be positive
                require(value <= uint256(type(int256).max), "SafeCast: value doesn't fit in an int256");
                return int256(value);
            }
        }
        pragma solidity ^0.8.0;
        // SPDX-License-Identifier: MIT
        library UncheckedMath {
            function uncheckedInc(uint256 _number) internal pure returns (uint256) {
                unchecked {
                    return _number + 1;
                }
            }
            function uncheckedAdd(uint256 _lhs, uint256 _rhs) internal pure returns (uint256) {
                unchecked {
                    return _lhs + _rhs;
                }
            }
        }
        pragma solidity ^0.8.0;
        // SPDX-License-Identifier: MIT
        import "./libraries/Diamond.sol";
        /// @title Diamond Proxy Contract (EIP-2535)
        /// @author Matter Labs
        contract DiamondProxy {
            constructor(uint256 _chainId, Diamond.DiamondCutData memory _diamondCut) {
                // Check that the contract is deployed on the expected chain.
                // Thus, the contract deployed by the same Create2 factory on the different chain will have different addresses!
                require(_chainId == block.chainid, "pr");
                Diamond.diamondCut(_diamondCut);
            }
            /// @dev 1. Find the facet for the function that is called.
            /// @dev 2. Delegate the execution to the found facet via `delegatecall`.
            fallback() external payable {
                Diamond.DiamondStorage storage diamondStorage = Diamond.getDiamondStorage();
                // Check whether the data contains a "full" selector or it is empty.
                // Required because Diamond proxy finds a facet by function signature,
                // which is not defined for data length in range [1, 3].
                require(msg.data.length >= 4 || msg.data.length == 0, "Ut");
                // Get facet from function selector
                Diamond.SelectorToFacet memory facet = diamondStorage.selectorToFacet[msg.sig];
                address facetAddress = facet.facetAddress;
                require(facetAddress != address(0), "F"); // Proxy has no facet for this selector
                require(!diamondStorage.isFrozen || !facet.isFreezable, "q1"); // Facet is frozen
                assembly {
                    // The pointer to the free memory slot
                    let ptr := mload(0x40)
                    // Copy function signature and arguments from calldata at zero position into memory at pointer position
                    calldatacopy(ptr, 0, calldatasize())
                    // Delegatecall method of the implementation contract returns 0 on error
                    let result := delegatecall(gas(), facetAddress, ptr, calldatasize(), 0, 0)
                    // Get the size of the last return data
                    let size := returndatasize()
                    // Copy the size length of bytes from return data at zero position to pointer position
                    returndatacopy(ptr, 0, size)
                    // Depending on the result value
                    switch result
                    case 0 {
                        // End execution and revert state changes
                        revert(ptr, size)
                    }
                    default {
                        // Return data with length of size at pointers position
                        return(ptr, size)
                    }
                }
            }
        }
        pragma solidity ^0.8.0;
        // SPDX-License-Identifier: MIT
        import "@openzeppelin/contracts/utils/math/SafeCast.sol";
        import "../../common/libraries/UncheckedMath.sol";
        /// @author Matter Labs
        /// @notice The helper library for managing the EIP-2535 diamond proxy.
        library Diamond {
            using UncheckedMath for uint256;
            using SafeCast for uint256;
            /// @dev Magic value that should be returned by diamond cut initialize contracts.
            /// @dev Used to distinguish calls to contracts that were supposed to be used as diamond initializer from other contracts.
            bytes32 constant DIAMOND_INIT_SUCCESS_RETURN_VALUE =
                0x33774e659306e47509050e97cb651e731180a42d458212294d30751925c551a2; // keccak256("diamond.zksync.init") - 1
            /// @dev Storage position of `DiamondStorage` structure.
            bytes32 constant DIAMOND_STORAGE_POSITION = 0xc8fcad8db84d3cc18b4c41d551ea0ee66dd599cde068d998e57d5e09332c131b; // keccak256("diamond.standard.diamond.storage") - 1;
            event DiamondCut(FacetCut[] facetCuts, address initAddress, bytes initCalldata);
            /// @dev Utility struct that contains associated facet & meta information of selector
            /// @param facetAddress address of the facet which is connected with selector
            /// @param selectorPosition index in `FacetToSelectors.selectors` array, where is selector stored
            /// @param isFreezable denotes whether the selector can be frozen.
            struct SelectorToFacet {
                address facetAddress;
                uint16 selectorPosition;
                bool isFreezable;
            }
            /// @dev Utility struct that contains associated selectors & meta information of facet
            /// @param selectors list of all selectors that belong to the facet
            /// @param facetPosition index in `DiamondStorage.facets` array, where is facet stored
            struct FacetToSelectors {
                bytes4[] selectors;
                uint16 facetPosition;
            }
            /// @notice The structure that holds all diamond proxy associated parameters
            /// @dev According to the EIP-2535 should be stored on a special storage key - `DIAMOND_STORAGE_POSITION`
            /// @param selectorToFacet A mapping from the selector to the facet address and its meta information
            /// @param facetToSelectors A mapping from facet address to its selector with meta information
            /// @param facets The array of all unique facet addresses that belong to the diamond proxy
            /// @param isFrozen Denotes whether the diamond proxy is frozen and all freezable facets are not accessible
            struct DiamondStorage {
                mapping(bytes4 => SelectorToFacet) selectorToFacet;
                mapping(address => FacetToSelectors) facetToSelectors;
                address[] facets;
                bool isFrozen;
            }
            /// @dev Parameters for diamond changes that touch one of the facets
            /// @param facet The address of facet that's affected by the cut
            /// @param action The action that is made on the facet
            /// @param isFreezable Denotes whether the facet & all their selectors can be frozen
            /// @param selectors An array of unique selectors that belongs to the facet address
            struct FacetCut {
                address facet;
                Action action;
                bool isFreezable;
                bytes4[] selectors;
            }
            /// @dev Structure of the diamond proxy changes
            /// @param facetCuts The set of changes (adding/removing/replacement) of implementation contracts
            /// @param initAddress The address that's delegate called after setting up new facet changes
            /// @param initCalldata Calldata for the delegate call to `initAddress`
            struct DiamondCutData {
                FacetCut[] facetCuts;
                address initAddress;
                bytes initCalldata;
            }
            /// @dev Type of change over diamond: add/replace/remove facets
            enum Action {
                Add,
                Replace,
                Remove
            }
            /// @return diamondStorage The pointer to the storage where all specific diamond proxy parameters stored
            function getDiamondStorage() internal pure returns (DiamondStorage storage diamondStorage) {
                bytes32 position = DIAMOND_STORAGE_POSITION;
                assembly {
                    diamondStorage.slot := position
                }
            }
            /// @dev Add/replace/remove any number of selectors and optionally execute a function with delegatecall
            /// @param _diamondCut Diamond's facet changes and the parameters to optional initialization delegatecall
            function diamondCut(DiamondCutData memory _diamondCut) internal {
                FacetCut[] memory facetCuts = _diamondCut.facetCuts;
                address initAddress = _diamondCut.initAddress;
                bytes memory initCalldata = _diamondCut.initCalldata;
                uint256 facetCutsLength = facetCuts.length;
                for (uint256 i = 0; i < facetCutsLength; i = i.uncheckedInc()) {
                    Action action = facetCuts[i].action;
                    address facet = facetCuts[i].facet;
                    bool isFacetFreezable = facetCuts[i].isFreezable;
                    bytes4[] memory selectors = facetCuts[i].selectors;
                    require(selectors.length > 0, "B"); // no functions for diamond cut
                    if (action == Action.Add) {
                        _addFunctions(facet, selectors, isFacetFreezable);
                    } else if (action == Action.Replace) {
                        _replaceFunctions(facet, selectors, isFacetFreezable);
                    } else if (action == Action.Remove) {
                        _removeFunctions(facet, selectors);
                    } else {
                        revert("C"); // undefined diamond cut action
                    }
                }
                _initializeDiamondCut(initAddress, initCalldata);
                emit DiamondCut(facetCuts, initAddress, initCalldata);
            }
            /// @dev Add new functions to the diamond proxy
            /// NOTE: expect but NOT enforce that `_selectors` is NON-EMPTY array
            function _addFunctions(
                address _facet,
                bytes4[] memory _selectors,
                bool _isFacetFreezable
            ) private {
                DiamondStorage storage ds = getDiamondStorage();
                require(_facet != address(0), "G"); // facet with zero address cannot be added
                // Add facet to the list of facets if the facet address is new one
                _saveFacetIfNew(_facet);
                uint256 selectorsLength = _selectors.length;
                for (uint256 i = 0; i < selectorsLength; i = i.uncheckedInc()) {
                    bytes4 selector = _selectors[i];
                    SelectorToFacet memory oldFacet = ds.selectorToFacet[selector];
                    require(oldFacet.facetAddress == address(0), "J"); // facet for this selector already exists
                    _addOneFunction(_facet, selector, _isFacetFreezable);
                }
            }
            /// @dev Change associated facets to already known function selectors
            /// NOTE: expect but NOT enforce that `_selectors` is NON-EMPTY array
            function _replaceFunctions(
                address _facet,
                bytes4[] memory _selectors,
                bool _isFacetFreezable
            ) private {
                DiamondStorage storage ds = getDiamondStorage();
                require(_facet != address(0), "K"); // cannot replace facet with zero address
                uint256 selectorsLength = _selectors.length;
                for (uint256 i = 0; i < selectorsLength; i = i.uncheckedInc()) {
                    bytes4 selector = _selectors[i];
                    SelectorToFacet memory oldFacet = ds.selectorToFacet[selector];
                    require(oldFacet.facetAddress != address(0), "L"); // it is impossible to replace the facet with zero address
                    _removeOneFunction(oldFacet.facetAddress, selector);
                    // Add facet to the list of facets if the facet address is a new one
                    _saveFacetIfNew(_facet);
                    _addOneFunction(_facet, selector, _isFacetFreezable);
                }
            }
            /// @dev Remove association with function and facet
            /// NOTE: expect but NOT enforce that `_selectors` is NON-EMPTY array
            function _removeFunctions(address _facet, bytes4[] memory _selectors) private {
                DiamondStorage storage ds = getDiamondStorage();
                require(_facet == address(0), "a1"); // facet address must be zero
                uint256 selectorsLength = _selectors.length;
                for (uint256 i = 0; i < selectorsLength; i = i.uncheckedInc()) {
                    bytes4 selector = _selectors[i];
                    SelectorToFacet memory oldFacet = ds.selectorToFacet[selector];
                    require(oldFacet.facetAddress != address(0), "a2"); // Can't delete a non-existent facet
                    _removeOneFunction(oldFacet.facetAddress, selector);
                }
            }
            /// @dev Add address to the list of known facets if it is not on the list yet
            /// NOTE: should be called ONLY before adding a new selector associated with the address
            function _saveFacetIfNew(address _facet) private {
                DiamondStorage storage ds = getDiamondStorage();
                uint256 selectorsLength = ds.facetToSelectors[_facet].selectors.length;
                // If there are no selectors associated with facet then save facet as new one
                if (selectorsLength == 0) {
                    ds.facetToSelectors[_facet].facetPosition = ds.facets.length.toUint16();
                    ds.facets.push(_facet);
                }
            }
            /// @dev Add one function to the already known facet
            /// NOTE: It is expected but NOT enforced that:
            /// - `_facet` is NON-ZERO address
            /// - `_facet` is already stored address in `DiamondStorage.facets`
            /// - `_selector` is NOT associated by another facet
            function _addOneFunction(
                address _facet,
                bytes4 _selector,
                bool _isSelectorFreezable
            ) private {
                DiamondStorage storage ds = getDiamondStorage();
                uint16 selectorPosition = (ds.facetToSelectors[_facet].selectors.length).toUint16();
                // if selectorPosition is nonzero, it means it is not a new facet
                // so the freezability of the first selector must be matched to _isSelectorFreezable
                // so all the selectors in a facet will have the same freezability
                if (selectorPosition != 0) {
                    bytes4 selector0 = ds.facetToSelectors[_facet].selectors[0];
                    require(_isSelectorFreezable == ds.selectorToFacet[selector0].isFreezable, "J1");
                }
                ds.selectorToFacet[_selector] = SelectorToFacet({
                    facetAddress: _facet,
                    selectorPosition: selectorPosition,
                    isFreezable: _isSelectorFreezable
                });
                ds.facetToSelectors[_facet].selectors.push(_selector);
            }
            /// @dev Remove one associated function with facet
            /// NOTE: It is expected but NOT enforced that `_facet` is NON-ZERO address
            function _removeOneFunction(address _facet, bytes4 _selector) private {
                DiamondStorage storage ds = getDiamondStorage();
                // Get index of `FacetToSelectors.selectors` of the selector and last element of array
                uint256 selectorPosition = ds.selectorToFacet[_selector].selectorPosition;
                uint256 lastSelectorPosition = ds.facetToSelectors[_facet].selectors.length - 1;
                // If the selector is not at the end of the array then move the last element to the selector position
                if (selectorPosition != lastSelectorPosition) {
                    bytes4 lastSelector = ds.facetToSelectors[_facet].selectors[lastSelectorPosition];
                    ds.facetToSelectors[_facet].selectors[selectorPosition] = lastSelector;
                    ds.selectorToFacet[lastSelector].selectorPosition = selectorPosition.toUint16();
                }
                // Remove last element from the selectors array
                ds.facetToSelectors[_facet].selectors.pop();
                // Finally, clean up the association with facet
                delete ds.selectorToFacet[_selector];
                // If there are no selectors for facet then remove the facet from the list of known facets
                if (lastSelectorPosition == 0) {
                    _removeFacet(_facet);
                }
            }
            /// @dev remove facet from the list of known facets
            /// NOTE: It is expected but NOT enforced that there are no selectors associated with `_facet`
            function _removeFacet(address _facet) private {
                DiamondStorage storage ds = getDiamondStorage();
                // Get index of `DiamondStorage.facets` of the facet and last element of array
                uint256 facetPosition = ds.facetToSelectors[_facet].facetPosition;
                uint256 lastFacetPosition = ds.facets.length - 1;
                // If the facet is not at the end of the array then move the last element to the facet position
                if (facetPosition != lastFacetPosition) {
                    address lastFacet = ds.facets[lastFacetPosition];
                    ds.facets[facetPosition] = lastFacet;
                    ds.facetToSelectors[lastFacet].facetPosition = facetPosition.toUint16();
                }
                // Remove last element from the facets array
                ds.facets.pop();
            }
            /// @dev Delegates call to the initialization address with provided calldata
            /// @dev Used as a final step of diamond cut to execute the logic of the initialization for changed facets
            function _initializeDiamondCut(address _init, bytes memory _calldata) private {
                if (_init == address(0)) {
                    require(_calldata.length == 0, "H"); // Non-empty calldata for zero address
                } else {
                    // Do not check whether `_init` is a contract since later we check that it returns data.
                    (bool success, bytes memory data) = _init.delegatecall(_calldata);
                    require(success, "I"); // delegatecall failed
                    // Check that called contract returns magic value to make sure that contract logic
                    // supposed to be used as diamond cut initializer.
                    require(data.length == 32, "lp");
                    require(abi.decode(data, (bytes32)) == DIAMOND_INIT_SUCCESS_RETURN_VALUE, "lp1");
                }
            }
        }
        

        File 3 of 3: ExecutorFacet
        pragma solidity 0.8.20;
        // SPDX-License-Identifier: MIT
        /// @dev The formal address of the initial program of the system: the bootloader
        address constant L2_BOOTLOADER_ADDRESS = address(0x8001);
        /// @dev The address of the known code storage system contract
        address constant L2_KNOWN_CODE_STORAGE_SYSTEM_CONTRACT_ADDR = address(0x8004);
        /// @dev The address of the L2 deployer system contract.
        address constant L2_DEPLOYER_SYSTEM_CONTRACT_ADDR = address(0x8006);
        /// @dev The special reserved L2 address. It is located in the system contracts space but doesn't have deployed
        /// bytecode.
        /// @dev The L2 deployer system contract allows changing bytecodes on any address if the `msg.sender` is this address.
        /// @dev So, whenever the governor wants to redeploy system contracts, it just initiates the L1 upgrade call deployer
        /// system contract
        /// via the L1 -> L2 transaction with `sender == L2_FORCE_DEPLOYER_ADDR`. For more details see the
        /// `diamond-initializers` contracts.
        address constant L2_FORCE_DEPLOYER_ADDR = address(0x8007);
        /// @dev The address of the special smart contract that can send arbitrary length message as an L2 log
        address constant L2_TO_L1_MESSENGER_SYSTEM_CONTRACT_ADDR = address(0x8008);
        /// @dev The address of the eth token system contract
        address constant L2_ETH_TOKEN_SYSTEM_CONTRACT_ADDR = address(0x800a);
        /// @dev The address of the context system contract
        address constant L2_SYSTEM_CONTEXT_SYSTEM_CONTRACT_ADDR = address(0x800b);
        /// @dev The address of the pubdata chunk publisher contract
        address constant L2_PUBDATA_CHUNK_PUBLISHER_ADDR = address(0x8011);
        pragma solidity 0.8.20;
        // SPDX-License-Identifier: MIT
        /**
         * @author Matter Labs
         * @custom:security-contact [email protected]
         * @notice The library for unchecked math.
         */
        library UncheckedMath {
            function uncheckedInc(uint256 _number) internal pure returns (uint256) {
                unchecked {
                    return _number + 1;
                }
            }
            function uncheckedAdd(uint256 _lhs, uint256 _rhs) internal pure returns (uint256) {
                unchecked {
                    return _lhs + _rhs;
                }
            }
        }
        pragma solidity 0.8.20;
        // SPDX-License-Identifier: MIT
        /**
         * @author Matter Labs
         * @custom:security-contact [email protected]
         * @dev The library provides a set of functions that help read data from an "abi.encodePacked" byte array.
         * @dev Each of the functions accepts the `bytes memory` and the offset where data should be read and returns a value of a certain type.
         *
         * @dev WARNING!
         * 1) Functions don't check the length of the bytes array, so it can go out of bounds.
         * The user of the library must check for bytes length before using any functions from the library!
         *
         * 2) Read variables are not cleaned up - https://docs.soliditylang.org/en/v0.8.16/internals/variable_cleanup.html.
         * Using data in inline assembly can lead to unexpected behavior!
         */
        library UnsafeBytes {
            function readUint32(bytes memory _bytes, uint256 _start) internal pure returns (uint32 result, uint256 offset) {
                assembly {
                    offset := add(_start, 4)
                    result := mload(add(_bytes, offset))
                }
            }
            function readAddress(bytes memory _bytes, uint256 _start) internal pure returns (address result, uint256 offset) {
                assembly {
                    offset := add(_start, 20)
                    result := mload(add(_bytes, offset))
                }
            }
            function readUint256(bytes memory _bytes, uint256 _start) internal pure returns (uint256 result, uint256 offset) {
                assembly {
                    offset := add(_start, 32)
                    result := mload(add(_bytes, offset))
                }
            }
            function readBytes32(bytes memory _bytes, uint256 _start) internal pure returns (bytes32 result, uint256 offset) {
                assembly {
                    offset := add(_start, 32)
                    result := mload(add(_bytes, offset))
                }
            }
        }
        pragma solidity 0.8.20;
        // SPDX-License-Identifier: MIT
        /**
         * @custom:security-contact [email protected]
         * @dev Contract module that helps prevent reentrant calls to a function.
         *
         * Inheriting from `ReentrancyGuard` will make the {nonReentrant} modifier
         * available, which can be applied to functions to make sure there are no nested
         * (reentrant) calls to them.
         *
         * Note that because there is a single `nonReentrant` guard, functions marked as
         * `nonReentrant` may not call one another. This can be worked around by making
         * those functions `private`, and then adding `external` `nonReentrant` entry
         * points to them.
         *
         * TIP: If you would like to learn more about reentrancy and alternative ways
         * to protect against it, check out our blog post
         * https://blog.openzeppelin.com/reentrancy-after-istanbul/[Reentrancy After Istanbul].
         *
         * _Since v2.5.0:_ this module is now much more gas efficient, given net gas
         * metering changes introduced in the Istanbul hardfork.
         */
        abstract contract ReentrancyGuard {
            /// @dev Address of lock flag variable.
            /// @dev Flag is placed at random memory location to not interfere with Storage contract.
            // keccak256("ReentrancyGuard") - 1;
            uint256 private constant LOCK_FLAG_ADDRESS = 0x8e94fed44239eb2314ab7a406345e6c5a8f0ccedf3b600de3d004e672c33abf4;
            // solhint-disable-next-line max-line-length
            // https://github.com/OpenZeppelin/openzeppelin-contracts/blob/566a774222707e424896c0c390a84dc3c13bdcb2/contracts/security/ReentrancyGuard.sol
            // The values being non-zero value makes deployment a bit more expensive,
            // but in exchange the refund on every call to nonReentrant will be lower in
            // amount. Since refunds are capped to a percentage of the total
            // transaction's gas, it is best to keep them low in cases like this one, to
            // increase the likelihood of the full refund coming into effect.
            uint256 private constant _NOT_ENTERED = 1;
            uint256 private constant _ENTERED = 2;
            modifier reentrancyGuardInitializer() {
                _initializeReentrancyGuard();
                _;
            }
            function _initializeReentrancyGuard() private {
                uint256 lockSlotOldValue;
                // Storing an initial non-zero value makes deployment a bit more
                // expensive but in exchange every call to nonReentrant
                // will be cheaper.
                assembly {
                    lockSlotOldValue := sload(LOCK_FLAG_ADDRESS)
                    sstore(LOCK_FLAG_ADDRESS, _NOT_ENTERED)
                }
                // Check that storage slot for reentrancy guard is empty to rule out possibility of slot conflict
                require(lockSlotOldValue == 0, "1B");
            }
            /**
             * @dev Prevents a contract from calling itself, directly or indirectly.
             * Calling a `nonReentrant` function from another `nonReentrant`
             * function is not supported. It is possible to prevent this from happening
             * by making the `nonReentrant` function external, and make it call a
             * `private` function that does the actual work.
             */
            modifier nonReentrant() {
                uint256 _status;
                assembly {
                    _status := sload(LOCK_FLAG_ADDRESS)
                }
                // On the first call to nonReentrant, _notEntered will be true
                require(_status == _NOT_ENTERED, "r1");
                // Any calls to nonReentrant after this point will fail
                assembly {
                    sstore(LOCK_FLAG_ADDRESS, _ENTERED)
                }
                _;
                // By storing the original value once again, a refund is triggered (see
                // https://eips.ethereum.org/EIPS/eip-2200)
                assembly {
                    sstore(LOCK_FLAG_ADDRESS, _NOT_ENTERED)
                }
            }
        }
        pragma solidity 0.8.20;
        // SPDX-License-Identifier: MIT
        /// @dev `keccak256("")`
        bytes32 constant EMPTY_STRING_KECCAK = 0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470;
        /// @dev Bytes in raw L2 log
        /// @dev Equal to the bytes size of the tuple - (uint8 ShardId, bool isService, uint16 txNumberInBatch, address sender,
        /// bytes32 key, bytes32 value)
        uint256 constant L2_TO_L1_LOG_SERIALIZE_SIZE = 88;
        /// @dev The maximum length of the bytes array with L2 -> L1 logs
        uint256 constant MAX_L2_TO_L1_LOGS_COMMITMENT_BYTES = 4 + L2_TO_L1_LOG_SERIALIZE_SIZE * 512;
        /// @dev The value of default leaf hash for L2 -> L1 logs Merkle tree
        /// @dev An incomplete fixed-size tree is filled with this value to be a full binary tree
        /// @dev Actually equal to the `keccak256(new bytes(L2_TO_L1_LOG_SERIALIZE_SIZE))`
        bytes32 constant L2_L1_LOGS_TREE_DEFAULT_LEAF_HASH = 0x72abee45b59e344af8a6e520241c4744aff26ed411f4c4b00f8af09adada43ba;
        // TODO: change constant to the real root hash of empty Merkle tree (SMA-184)
        bytes32 constant DEFAULT_L2_LOGS_TREE_ROOT_HASH = bytes32(0);
        /// @dev Denotes the type of the zkSync transaction that came from L1.
        uint256 constant PRIORITY_OPERATION_L2_TX_TYPE = 255;
        /// @dev Denotes the type of the zkSync transaction that is used for system upgrades.
        uint256 constant SYSTEM_UPGRADE_L2_TX_TYPE = 254;
        /// @dev The maximal allowed difference between protocol versions in an upgrade. The 100 gap is needed
        /// in case a protocol version has been tested on testnet, but then not launched on mainnet, e.g.
        /// due to a bug found.
        uint256 constant MAX_ALLOWED_PROTOCOL_VERSION_DELTA = 100;
        /// @dev The amount of time in seconds the validator has to process the priority transaction
        /// NOTE: The constant is set to zero for the Alpha release period
        uint256 constant PRIORITY_EXPIRATION = 0 days;
        /// @dev Timestamp - seconds since unix epoch.
        uint256 constant COMMIT_TIMESTAMP_NOT_OLDER = 3 days;
        /// @dev Maximum available error between real commit batch timestamp and analog used in the verifier (in seconds)
        /// @dev Must be used cause miner's `block.timestamp` value can differ on some small value (as we know - 12 seconds)
        uint256 constant COMMIT_TIMESTAMP_APPROXIMATION_DELTA = 1 hours;
        /// @dev Shift to apply to verify public input before verifying.
        uint256 constant PUBLIC_INPUT_SHIFT = 32;
        /// @dev The maximum number of L2 gas that a user can request for an L2 transaction
        uint256 constant MAX_GAS_PER_TRANSACTION = 80000000;
        /// @dev Even though the price for 1 byte of pubdata is 16 L1 gas, we have a slightly increased
        /// value.
        uint256 constant L1_GAS_PER_PUBDATA_BYTE = 17;
        /// @dev The intrinsic cost of the L1->l2 transaction in computational L2 gas
        uint256 constant L1_TX_INTRINSIC_L2_GAS = 167157;
        /// @dev The intrinsic cost of the L1->l2 transaction in pubdata
        uint256 constant L1_TX_INTRINSIC_PUBDATA = 88;
        /// @dev The minimal base price for L1 transaction
        uint256 constant L1_TX_MIN_L2_GAS_BASE = 173484;
        /// @dev The number of L2 gas the transaction starts costing more with each 544 bytes of encoding
        uint256 constant L1_TX_DELTA_544_ENCODING_BYTES = 1656;
        /// @dev The number of L2 gas an L1->L2 transaction gains with each new factory dependency
        uint256 constant L1_TX_DELTA_FACTORY_DEPS_L2_GAS = 2473;
        /// @dev The number of L2 gas an L1->L2 transaction gains with each new factory dependency
        uint256 constant L1_TX_DELTA_FACTORY_DEPS_PUBDATA = 64;
        /// @dev The number of pubdata an L1->L2 transaction requires with each new factory dependency
        uint256 constant MAX_NEW_FACTORY_DEPS = 32;
        /// @dev The L2 gasPricePerPubdata required to be used in bridges.
        uint256 constant REQUIRED_L2_GAS_PRICE_PER_PUBDATA = 800;
        /// @dev The mask which should be applied to the packed batch and L2 block timestamp in order
        /// to obtain the L2 block timestamp. Applying this mask is equivalent to calculating modulo 2**128
        uint256 constant PACKED_L2_BLOCK_TIMESTAMP_MASK = 0xffffffffffffffffffffffffffffffff;
        /// @dev Address of the point evaluation precompile used for EIP-4844 blob verification.
        address constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
        /// @dev The overhead for a transaction slot in L2 gas.
        /// It is roughly equal to 80kk/MAX_TRANSACTIONS_IN_BATCH, i.e. how many gas would an L1->L2 transaction
        /// need to pay to compensate for the batch being closed.
        /// @dev It is expected that the L1 contracts will enforce that the L2 gas price will be high enough to compensate
        /// the operator in case the batch is closed because of tx slots filling up.
        uint256 constant TX_SLOT_OVERHEAD_L2_GAS = 10000;
        /// @dev The overhead for each byte of the bootloader memory that the encoding of the transaction.
        /// It is roughly equal to 80kk/BOOTLOADER_MEMORY_FOR_TXS, i.e. how many gas would an L1->L2 transaction
        /// need to pay to compensate for the batch being closed.
        /// @dev It is expected that the L1 contracts will enforce that the L2 gas price will be high enough to compensate
        /// the operator in case the batch is closed because of the memory for transactions being filled up.
        uint256 constant MEMORY_OVERHEAD_GAS = 10;
        pragma solidity 0.8.20;
        // SPDX-License-Identifier: MIT
        import {AppStorage} from "../Storage.sol";
        import {ReentrancyGuard} from "../../common/ReentrancyGuard.sol";
        /// @title Base contract containing functions accessible to the other facets.
        /// @author Matter Labs
        /// @custom:security-contact [email protected]
        contract Base is ReentrancyGuard {
            AppStorage internal s;
            /// @notice Checks that the message sender is an active governor
            modifier onlyGovernor() {
                require(msg.sender == s.governor, "1g"); // only by governor
                _;
            }
            /// @notice Checks that the message sender is an active governor or admin
            modifier onlyGovernorOrAdmin() {
                require(msg.sender == s.governor || msg.sender == s.admin, "1k");
                _;
            }
            /// @notice Checks if validator is active
            modifier onlyValidator() {
                require(s.validators[msg.sender], "1h"); // validator is not active
                _;
            }
        }
        pragma solidity 0.8.20;
        // SPDX-License-Identifier: MIT
        import {Base} from "./Base.sol";
        import {COMMIT_TIMESTAMP_NOT_OLDER, COMMIT_TIMESTAMP_APPROXIMATION_DELTA, EMPTY_STRING_KECCAK, L2_TO_L1_LOG_SERIALIZE_SIZE, MAX_L2_TO_L1_LOGS_COMMITMENT_BYTES, PACKED_L2_BLOCK_TIMESTAMP_MASK, PUBLIC_INPUT_SHIFT, POINT_EVALUATION_PRECOMPILE_ADDR} from "../Config.sol";
        import {IExecutor, L2_LOG_ADDRESS_OFFSET, L2_LOG_KEY_OFFSET, L2_LOG_VALUE_OFFSET, SystemLogKey, LogProcessingOutput, PubdataSource, BLS_MODULUS, PUBDATA_COMMITMENT_SIZE, PUBDATA_COMMITMENT_CLAIMED_VALUE_OFFSET, PUBDATA_COMMITMENT_COMMITMENT_OFFSET, MAX_NUMBER_OF_BLOBS} from "../interfaces/IExecutor.sol";
        import {PriorityQueue, PriorityOperation} from "../libraries/PriorityQueue.sol";
        import {UncheckedMath} from "../../common/libraries/UncheckedMath.sol";
        import {UnsafeBytes} from "../../common/libraries/UnsafeBytes.sol";
        import {VerifierParams} from "../Storage.sol";
        import {L2_BOOTLOADER_ADDRESS, L2_TO_L1_MESSENGER_SYSTEM_CONTRACT_ADDR, L2_SYSTEM_CONTEXT_SYSTEM_CONTRACT_ADDR, L2_PUBDATA_CHUNK_PUBLISHER_ADDR} from "../../common/L2ContractAddresses.sol";
        // While formally the following import is not used, it is needed to inherit documentation from it
        import {IBase} from "../interfaces/IBase.sol";
        /// @title zkSync Executor contract capable of processing events emitted in the zkSync protocol.
        /// @author Matter Labs
        /// @custom:security-contact [email protected]
        contract ExecutorFacet is Base, IExecutor {
            using UncheckedMath for uint256;
            using PriorityQueue for PriorityQueue.Queue;
            /// @inheritdoc IBase
            string public constant override getName = "ExecutorFacet";
            /// @dev Process one batch commit using the previous batch StoredBatchInfo
            /// @dev returns new batch StoredBatchInfo
            /// @notice Does not change storage
            function _commitOneBatch(
                StoredBatchInfo memory _previousBatch,
                CommitBatchInfo calldata _newBatch,
                bytes32 _expectedSystemContractUpgradeTxHash
            ) internal view returns (StoredBatchInfo memory) {
                require(_newBatch.batchNumber == _previousBatch.batchNumber + 1, "f"); // only commit next batch
                uint8 pubdataSource = uint8(bytes1(_newBatch.pubdataCommitments[0]));
                require(pubdataSource == uint8(PubdataSource.Calldata) || pubdataSource == uint8(PubdataSource.Blob), "us");
                // Check that batch contain all meta information for L2 logs.
                // Get the chained hash of priority transaction hashes.
                LogProcessingOutput memory logOutput = _processL2Logs(_newBatch, _expectedSystemContractUpgradeTxHash);
                bytes32[] memory blobCommitments = new bytes32[](MAX_NUMBER_OF_BLOBS);
                bytes32[] memory blobHashes = new bytes32[](MAX_NUMBER_OF_BLOBS);
                if (pubdataSource == uint8(PubdataSource.Blob)) {
                    // We want only want to include the actual blob linear hashes when we send pubdata via blobs.
                    // Otherwise we should be using bytes32(0)
                    blobHashes[0] = logOutput.blob1Hash;
                    blobHashes[1] = logOutput.blob2Hash;
                    // In this scenario, pubdataCommitments is a list of: opening point (16 bytes) || claimed value (32 bytes) || commitment (48 bytes) || proof (48 bytes)) = 144 bytes
                    blobCommitments = _verifyBlobInformation(_newBatch.pubdataCommitments[1:], blobHashes);
                } else if (pubdataSource == uint8(PubdataSource.Calldata)) {
                    // In this scenario pubdataCommitments is actual pubdata consisting of l2 to l1 logs, l2 to l1 message, compressed smart contract bytecode, and compressed state diffs
                    require(
                        logOutput.pubdataHash ==
                            keccak256(_newBatch.pubdataCommitments[1:_newBatch.pubdataCommitments.length - 32]),
                        "wp"
                    );
                    blobHashes[0] = logOutput.blob1Hash;
                    blobCommitments[0] = bytes32(
                        _newBatch.pubdataCommitments[_newBatch.pubdataCommitments.length - 32:_newBatch
                            .pubdataCommitments
                            .length]
                    );
                }
                require(_previousBatch.batchHash == logOutput.previousBatchHash, "l");
                // Check that the priority operation hash in the L2 logs is as expected
                require(logOutput.chainedPriorityTxsHash == _newBatch.priorityOperationsHash, "t");
                // Check that the number of processed priority operations is as expected
                require(logOutput.numberOfLayer1Txs == _newBatch.numberOfLayer1Txs, "ta");
                // Check the timestamp of the new batch
                _verifyBatchTimestamp(logOutput.packedBatchAndL2BlockTimestamp, _newBatch.timestamp, _previousBatch.timestamp);
                // Create batch commitment for the proof verification
                bytes32 commitment = _createBatchCommitment(_newBatch, logOutput.stateDiffHash, blobCommitments, blobHashes);
                return
                    StoredBatchInfo(
                        _newBatch.batchNumber,
                        _newBatch.newStateRoot,
                        _newBatch.indexRepeatedStorageChanges,
                        _newBatch.numberOfLayer1Txs,
                        _newBatch.priorityOperationsHash,
                        logOutput.l2LogsTreeRoot,
                        _newBatch.timestamp,
                        commitment
                    );
            }
            /// @notice checks that the timestamps of both the new batch and the new L2 block are correct.
            /// @param _packedBatchAndL2BlockTimestamp - packed batch and L2 block timestamp in a format of batchTimestamp * 2**128 + l2BatchTimestamp
            /// @param _expectedBatchTimestamp - expected batch timestamp
            /// @param _previousBatchTimestamp - the timestamp of the previous batch
            function _verifyBatchTimestamp(
                uint256 _packedBatchAndL2BlockTimestamp,
                uint256 _expectedBatchTimestamp,
                uint256 _previousBatchTimestamp
            ) internal view {
                // Check that the timestamp that came from the system context is expected
                uint256 batchTimestamp = _packedBatchAndL2BlockTimestamp >> 128;
                require(batchTimestamp == _expectedBatchTimestamp, "tb");
                // While the fact that _previousBatchTimestamp < batchTimestamp is already checked on L2,
                // we double check it here for clarity
                require(_previousBatchTimestamp < batchTimestamp, "h3");
                uint256 lastL2BlockTimestamp = _packedBatchAndL2BlockTimestamp & PACKED_L2_BLOCK_TIMESTAMP_MASK;
                // All L2 blocks have timestamps within the range of [batchTimestamp, lastL2BlockTimestamp].
                // So here we need to only double check that:
                // - The timestamp of the batch is not too small.
                // - The timestamp of the last L2 block is not too big.
                require(block.timestamp - COMMIT_TIMESTAMP_NOT_OLDER <= batchTimestamp, "h1"); // New batch timestamp is too small
                require(lastL2BlockTimestamp <= block.timestamp + COMMIT_TIMESTAMP_APPROXIMATION_DELTA, "h2"); // The last L2 block timestamp is too big
            }
            /// @dev Check that L2 logs are proper and batch contain all meta information for them
            /// @dev The logs processed here should line up such that only one log for each key from the
            ///      SystemLogKey enum in Constants.sol is processed per new batch.
            /// @dev Data returned from here will be used to form the batch commitment.
            function _processL2Logs(
                CommitBatchInfo calldata _newBatch,
                bytes32 _expectedSystemContractUpgradeTxHash
            ) internal pure returns (LogProcessingOutput memory logOutput) {
                // Copy L2 to L1 logs into memory.
                bytes memory emittedL2Logs = _newBatch.systemLogs;
                // Used as bitmap to set/check log processing happens exactly once.
                // See SystemLogKey enum in Constants.sol for ordering.
                uint256 processedLogs;
                // linear traversal of the logs
                for (uint256 i = 0; i < emittedL2Logs.length; i = i.uncheckedAdd(L2_TO_L1_LOG_SERIALIZE_SIZE)) {
                    // Extract the values to be compared to/used such as the log sender, key, and value
                    (address logSender, ) = UnsafeBytes.readAddress(emittedL2Logs, i + L2_LOG_ADDRESS_OFFSET);
                    (uint256 logKey, ) = UnsafeBytes.readUint256(emittedL2Logs, i + L2_LOG_KEY_OFFSET);
                    (bytes32 logValue, ) = UnsafeBytes.readBytes32(emittedL2Logs, i + L2_LOG_VALUE_OFFSET);
                    // Ensure that the log hasn't been processed already
                    require(!_checkBit(processedLogs, uint8(logKey)), "kp");
                    processedLogs = _setBit(processedLogs, uint8(logKey));
                    // Need to check that each log was sent by the correct address.
                    if (logKey == uint256(SystemLogKey.L2_TO_L1_LOGS_TREE_ROOT_KEY)) {
                        require(logSender == L2_TO_L1_MESSENGER_SYSTEM_CONTRACT_ADDR, "lm");
                        logOutput.l2LogsTreeRoot = logValue;
                    } else if (logKey == uint256(SystemLogKey.TOTAL_L2_TO_L1_PUBDATA_KEY)) {
                        require(logSender == L2_TO_L1_MESSENGER_SYSTEM_CONTRACT_ADDR, "ln");
                        logOutput.pubdataHash = logValue;
                    } else if (logKey == uint256(SystemLogKey.STATE_DIFF_HASH_KEY)) {
                        require(logSender == L2_TO_L1_MESSENGER_SYSTEM_CONTRACT_ADDR, "lb");
                        logOutput.stateDiffHash = logValue;
                    } else if (logKey == uint256(SystemLogKey.PACKED_BATCH_AND_L2_BLOCK_TIMESTAMP_KEY)) {
                        require(logSender == L2_SYSTEM_CONTEXT_SYSTEM_CONTRACT_ADDR, "sc");
                        logOutput.packedBatchAndL2BlockTimestamp = uint256(logValue);
                    } else if (logKey == uint256(SystemLogKey.PREV_BATCH_HASH_KEY)) {
                        require(logSender == L2_SYSTEM_CONTEXT_SYSTEM_CONTRACT_ADDR, "sv");
                        logOutput.previousBatchHash = logValue;
                    } else if (logKey == uint256(SystemLogKey.CHAINED_PRIORITY_TXN_HASH_KEY)) {
                        require(logSender == L2_BOOTLOADER_ADDRESS, "bl");
                        logOutput.chainedPriorityTxsHash = logValue;
                    } else if (logKey == uint256(SystemLogKey.NUMBER_OF_LAYER_1_TXS_KEY)) {
                        require(logSender == L2_BOOTLOADER_ADDRESS, "bk");
                        logOutput.numberOfLayer1Txs = uint256(logValue);
                    } else if (logKey == uint256(SystemLogKey.BLOB_ONE_HASH_KEY)) {
                        require(logSender == L2_PUBDATA_CHUNK_PUBLISHER_ADDR, "pc");
                        logOutput.blob1Hash = logValue;
                    } else if (logKey == uint256(SystemLogKey.BLOB_TWO_HASH_KEY)) {
                        require(logSender == L2_PUBDATA_CHUNK_PUBLISHER_ADDR, "pd");
                        logOutput.blob2Hash = logValue;
                    } else if (logKey == uint256(SystemLogKey.EXPECTED_SYSTEM_CONTRACT_UPGRADE_TX_HASH_KEY)) {
                        require(logSender == L2_BOOTLOADER_ADDRESS, "bu");
                        require(_expectedSystemContractUpgradeTxHash == logValue, "ut");
                    } else {
                        revert("ul");
                    }
                }
                // We only require 9 logs to be checked, the 10th is if we are expecting a protocol upgrade
                // Without the protocol upgrade we expect 9 logs: 2^9 - 1 = 511
                // With the protocol upgrade we expect 8 logs: 2^10 - 1 = 1023
                if (_expectedSystemContractUpgradeTxHash == bytes32(0)) {
                    require(processedLogs == 511, "b7");
                } else {
                    require(processedLogs == 1023, "b8");
                }
            }
            /// @inheritdoc IExecutor
            function commitBatches(
                StoredBatchInfo memory _lastCommittedBatchData,
                CommitBatchInfo[] calldata _newBatchesData
            ) external nonReentrant onlyValidator {
                // With the new changes for EIP-4844, namely the restriction on number of blobs per block, we only allow for a single batch to be committed at a time.
                require(_newBatchesData.length == 1, "e4");
                // Check that we commit batches after last committed batch
                require(s.storedBatchHashes[s.totalBatchesCommitted] == _hashStoredBatchInfo(_lastCommittedBatchData), "i"); // incorrect previous batch data
                bytes32 systemContractsUpgradeTxHash = s.l2SystemContractsUpgradeTxHash;
                // Upgrades are rarely done so we optimize a case with no active system contracts upgrade.
                if (systemContractsUpgradeTxHash == bytes32(0) || s.l2SystemContractsUpgradeBatchNumber != 0) {
                    _commitBatchesWithoutSystemContractsUpgrade(_lastCommittedBatchData, _newBatchesData);
                } else {
                    _commitBatchesWithSystemContractsUpgrade(
                        _lastCommittedBatchData,
                        _newBatchesData,
                        systemContractsUpgradeTxHash
                    );
                }
                s.totalBatchesCommitted = s.totalBatchesCommitted + _newBatchesData.length;
            }
            /// @dev Commits new batches without any system contracts upgrade.
            /// @param _lastCommittedBatchData The data of the last committed batch.
            /// @param _newBatchesData An array of batch data that needs to be committed.
            function _commitBatchesWithoutSystemContractsUpgrade(
                StoredBatchInfo memory _lastCommittedBatchData,
                CommitBatchInfo[] calldata _newBatchesData
            ) internal {
                for (uint256 i = 0; i < _newBatchesData.length; i = i.uncheckedInc()) {
                    _lastCommittedBatchData = _commitOneBatch(_lastCommittedBatchData, _newBatchesData[i], bytes32(0));
                    s.storedBatchHashes[_lastCommittedBatchData.batchNumber] = _hashStoredBatchInfo(_lastCommittedBatchData);
                    emit BlockCommit(
                        _lastCommittedBatchData.batchNumber,
                        _lastCommittedBatchData.batchHash,
                        _lastCommittedBatchData.commitment
                    );
                }
            }
            /// @dev Commits new batches with a system contracts upgrade transaction.
            /// @param _lastCommittedBatchData The data of the last committed batch.
            /// @param _newBatchesData An array of batch data that needs to be committed.
            /// @param _systemContractUpgradeTxHash The transaction hash of the system contract upgrade.
            function _commitBatchesWithSystemContractsUpgrade(
                StoredBatchInfo memory _lastCommittedBatchData,
                CommitBatchInfo[] calldata _newBatchesData,
                bytes32 _systemContractUpgradeTxHash
            ) internal {
                // The system contract upgrade is designed to be executed atomically with the new bootloader, a default account,
                // ZKP verifier, and other system parameters. Hence, we ensure that the upgrade transaction is
                // carried out within the first batch committed after the upgrade.
                // While the logic of the contract ensures that the s.l2SystemContractsUpgradeBatchNumber is 0 when this function is called,
                // this check is added just in case. Since it is a hot read, it does not encure noticeable gas cost.
                require(s.l2SystemContractsUpgradeBatchNumber == 0, "ik");
                // Save the batch number where the upgrade transaction was executed.
                s.l2SystemContractsUpgradeBatchNumber = _newBatchesData[0].batchNumber;
                for (uint256 i = 0; i < _newBatchesData.length; i = i.uncheckedInc()) {
                    // The upgrade transaction must only be included in the first batch.
                    bytes32 expectedUpgradeTxHash = i == 0 ? _systemContractUpgradeTxHash : bytes32(0);
                    _lastCommittedBatchData = _commitOneBatch(
                        _lastCommittedBatchData,
                        _newBatchesData[i],
                        expectedUpgradeTxHash
                    );
                    s.storedBatchHashes[_lastCommittedBatchData.batchNumber] = _hashStoredBatchInfo(_lastCommittedBatchData);
                    emit BlockCommit(
                        _lastCommittedBatchData.batchNumber,
                        _lastCommittedBatchData.batchHash,
                        _lastCommittedBatchData.commitment
                    );
                }
            }
            /// @dev Pops the priority operations from the priority queue and returns a rolling hash of operations
            function _collectOperationsFromPriorityQueue(uint256 _nPriorityOps) internal returns (bytes32 concatHash) {
                concatHash = EMPTY_STRING_KECCAK;
                for (uint256 i = 0; i < _nPriorityOps; i = i.uncheckedInc()) {
                    PriorityOperation memory priorityOp = s.priorityQueue.popFront();
                    concatHash = keccak256(abi.encode(concatHash, priorityOp.canonicalTxHash));
                }
            }
            /// @dev Executes one batch
            /// @dev 1. Processes all pending operations (Complete priority requests)
            /// @dev 2. Finalizes batch on Ethereum
            /// @dev _executedBatchIdx is an index in the array of the batches that we want to execute together
            function _executeOneBatch(StoredBatchInfo memory _storedBatch, uint256 _executedBatchIdx) internal {
                uint256 currentBatchNumber = _storedBatch.batchNumber;
                require(currentBatchNumber == s.totalBatchesExecuted + _executedBatchIdx + 1, "k"); // Execute batches in order
                require(
                    _hashStoredBatchInfo(_storedBatch) == s.storedBatchHashes[currentBatchNumber],
                    "exe10" // executing batch should be committed
                );
                bytes32 priorityOperationsHash = _collectOperationsFromPriorityQueue(_storedBatch.numberOfLayer1Txs);
                require(priorityOperationsHash == _storedBatch.priorityOperationsHash, "x"); // priority operations hash does not match to expected
                // Save root hash of L2 -> L1 logs tree
                s.l2LogsRootHashes[currentBatchNumber] = _storedBatch.l2LogsTreeRoot;
            }
            /// @inheritdoc IExecutor
            function executeBatches(StoredBatchInfo[] calldata _batchesData) external nonReentrant onlyValidator {
                uint256 nBatches = _batchesData.length;
                for (uint256 i = 0; i < nBatches; i = i.uncheckedInc()) {
                    _executeOneBatch(_batchesData[i], i);
                    emit BlockExecution(_batchesData[i].batchNumber, _batchesData[i].batchHash, _batchesData[i].commitment);
                }
                uint256 newTotalBatchesExecuted = s.totalBatchesExecuted + nBatches;
                s.totalBatchesExecuted = newTotalBatchesExecuted;
                require(newTotalBatchesExecuted <= s.totalBatchesVerified, "n"); // Can't execute batches more than committed and proven currently.
                uint256 batchWhenUpgradeHappened = s.l2SystemContractsUpgradeBatchNumber;
                if (batchWhenUpgradeHappened != 0 && batchWhenUpgradeHappened <= newTotalBatchesExecuted) {
                    delete s.l2SystemContractsUpgradeTxHash;
                    delete s.l2SystemContractsUpgradeBatchNumber;
                }
            }
            /// @inheritdoc IExecutor
            function proveBatches(
                StoredBatchInfo calldata _prevBatch,
                StoredBatchInfo[] calldata _committedBatches,
                ProofInput calldata _proof
            ) external nonReentrant onlyValidator {
                // Save the variables into the stack to save gas on reading them later
                uint256 currentTotalBatchesVerified = s.totalBatchesVerified;
                uint256 committedBatchesLength = _committedBatches.length;
                // Save the variable from the storage to memory to save gas
                VerifierParams memory verifierParams = s.verifierParams;
                // Initialize the array, that will be used as public input to the ZKP
                uint256[] memory proofPublicInput = new uint256[](committedBatchesLength);
                // Check that the batch passed by the validator is indeed the first unverified batch
                require(_hashStoredBatchInfo(_prevBatch) == s.storedBatchHashes[currentTotalBatchesVerified], "t1");
                bytes32 prevBatchCommitment = _prevBatch.commitment;
                for (uint256 i = 0; i < committedBatchesLength; i = i.uncheckedInc()) {
                    currentTotalBatchesVerified = currentTotalBatchesVerified.uncheckedInc();
                    require(
                        _hashStoredBatchInfo(_committedBatches[i]) == s.storedBatchHashes[currentTotalBatchesVerified],
                        "o1"
                    );
                    bytes32 currentBatchCommitment = _committedBatches[i].commitment;
                    proofPublicInput[i] = _getBatchProofPublicInput(
                        prevBatchCommitment,
                        currentBatchCommitment,
                        verifierParams
                    );
                    prevBatchCommitment = currentBatchCommitment;
                }
                require(currentTotalBatchesVerified <= s.totalBatchesCommitted, "q");
                _verifyProof(proofPublicInput, _proof);
                emit BlocksVerification(s.totalBatchesVerified, currentTotalBatchesVerified);
                s.totalBatchesVerified = currentTotalBatchesVerified;
            }
            function _verifyProof(uint256[] memory proofPublicInput, ProofInput calldata _proof) internal view {
                // We can only process 1 batch proof at a time.
                require(proofPublicInput.length == 1, "t4");
                bool successVerifyProof = s.verifier.verify(
                    proofPublicInput,
                    _proof.serializedProof,
                    _proof.recursiveAggregationInput
                );
                require(successVerifyProof, "p"); // Proof verification fail
            }
            /// @dev Gets zk proof public input
            function _getBatchProofPublicInput(
                bytes32 _prevBatchCommitment,
                bytes32 _currentBatchCommitment,
                VerifierParams memory _verifierParams
            ) internal pure returns (uint256) {
                return
                    uint256(
                        keccak256(
                            abi.encodePacked(
                                _prevBatchCommitment,
                                _currentBatchCommitment,
                                _verifierParams.recursionNodeLevelVkHash,
                                _verifierParams.recursionLeafLevelVkHash
                            )
                        )
                    ) >> PUBLIC_INPUT_SHIFT;
            }
            /// @inheritdoc IExecutor
            function revertBatches(uint256 _newLastBatch) external nonReentrant onlyValidator {
                require(s.totalBatchesCommitted > _newLastBatch, "v1"); // The last committed batch is less than new last batch
                require(_newLastBatch >= s.totalBatchesExecuted, "v2"); // Already executed batches cannot be reverted
                if (_newLastBatch < s.totalBatchesVerified) {
                    s.totalBatchesVerified = _newLastBatch;
                }
                s.totalBatchesCommitted = _newLastBatch;
                // Reset the batch number of the executed system contracts upgrade transaction if the batch
                // where the system contracts upgrade was committed is among the reverted batches.
                if (s.l2SystemContractsUpgradeBatchNumber > _newLastBatch) {
                    delete s.l2SystemContractsUpgradeBatchNumber;
                }
                emit BlocksRevert(s.totalBatchesCommitted, s.totalBatchesVerified, s.totalBatchesExecuted);
            }
            /// @dev Creates batch commitment from its data
            function _createBatchCommitment(
                CommitBatchInfo calldata _newBatchData,
                bytes32 _stateDiffHash,
                bytes32[] memory _blobCommitments,
                bytes32[] memory _blobHashes
            ) internal view returns (bytes32) {
                bytes32 passThroughDataHash = keccak256(_batchPassThroughData(_newBatchData));
                bytes32 metadataHash = keccak256(_batchMetaParameters());
                bytes32 auxiliaryOutputHash = keccak256(
                    _batchAuxiliaryOutput(_newBatchData, _stateDiffHash, _blobCommitments, _blobHashes)
                );
                return keccak256(abi.encode(passThroughDataHash, metadataHash, auxiliaryOutputHash));
            }
            function _batchPassThroughData(CommitBatchInfo calldata _batch) internal pure returns (bytes memory) {
                return
                    abi.encodePacked(
                        _batch.indexRepeatedStorageChanges,
                        _batch.newStateRoot,
                        uint64(0), // index repeated storage changes in zkPorter
                        bytes32(0) // zkPorter batch hash
                    );
            }
            function _batchMetaParameters() internal view returns (bytes memory) {
                return abi.encodePacked(s.zkPorterIsAvailable, s.l2BootloaderBytecodeHash, s.l2DefaultAccountBytecodeHash);
            }
            function _batchAuxiliaryOutput(
                CommitBatchInfo calldata _batch,
                bytes32 _stateDiffHash,
                bytes32[] memory _blobCommitments,
                bytes32[] memory _blobHashes
            ) internal pure returns (bytes memory) {
                require(_batch.systemLogs.length <= MAX_L2_TO_L1_LOGS_COMMITMENT_BYTES, "pu");
                bytes32 l2ToL1LogsHash = keccak256(_batch.systemLogs);
                return
                    abi.encode(
                        l2ToL1LogsHash,
                        _stateDiffHash,
                        _batch.bootloaderHeapInitialContentsHash,
                        _batch.eventsQueueStateHash,
                        // for each blob we have:
                        // linear hash (hash of preimage from system logs) and
                        // output hash of blob commitments: keccak(versioned hash || opening point || evaluation value)
                        // These values will all be bytes32(0) when we submit pubdata via calldata instead of blobs.
                        // If we only utilize a single blob, _blobHash[1] and _blobCommitments[1] will be bytes32(0)
                        _blobHashes[0],
                        _blobCommitments[0],
                        _blobHashes[1],
                        _blobCommitments[1]
                    );
            }
            /// @notice Returns the keccak hash of the ABI-encoded StoredBatchInfo
            function _hashStoredBatchInfo(StoredBatchInfo memory _storedBatchInfo) internal pure returns (bytes32) {
                return keccak256(abi.encode(_storedBatchInfo));
            }
            /// @notice Returns true if the bit at index {_index} is 1
            function _checkBit(uint256 _bitMap, uint8 _index) internal pure returns (bool) {
                return (_bitMap & (1 << _index)) > 0;
            }
            /// @notice Sets the given bit in {_num} at index {_index} to 1.
            function _setBit(uint256 _bitMap, uint8 _index) internal pure returns (uint256) {
                return _bitMap | (1 << _index);
            }
            /// @notice Calls the point evaluation precompile and verifies the output
            /// Verify p(z) = y given commitment that corresponds to the polynomial p(x) and a KZG proof.
            /// Also verify that the provided commitment matches the provided versioned_hash.
            ///
            function _pointEvaluationPrecompile(
                bytes32 _versionedHash,
                bytes32 _openingPoint,
                bytes calldata _openingValueCommitmentProof
            ) internal view {
                bytes memory precompileInput = abi.encodePacked(_versionedHash, _openingPoint, _openingValueCommitmentProof);
                (bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall(precompileInput);
                // We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the
                // response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
                require(success, "failed to call point evaluation precompile");
                (, uint256 result) = abi.decode(data, (uint256, uint256));
                require(result == BLS_MODULUS, "precompile unexpected output");
            }
            /// @dev Verifies that the blobs contain the correct data by calling the point evaluation precompile. For the precompile we need:
            /// versioned hash || opening point || opening value || commitment || proof
            /// the _pubdataCommitments will contain the last 4 values, the versioned hash is pulled from the BLOBHASH opcode
            /// pubdataCommitments is a list of: opening point (16 bytes) || claimed value (32 bytes) || commitment (48 bytes) || proof (48 bytes)) = 144 bytes
            function _verifyBlobInformation(
                bytes calldata _pubdataCommitments,
                bytes32[] memory _blobHashes
            ) internal view returns (bytes32[] memory blobCommitments) {
                uint256 versionedHashIndex = 0;
                require(_pubdataCommitments.length > 0, "pl");
                require(_pubdataCommitments.length <= PUBDATA_COMMITMENT_SIZE * MAX_NUMBER_OF_BLOBS, "bd");
                require(_pubdataCommitments.length % PUBDATA_COMMITMENT_SIZE == 0, "bs");
                blobCommitments = new bytes32[](MAX_NUMBER_OF_BLOBS);
                for (uint256 i = 0; i < _pubdataCommitments.length; i += PUBDATA_COMMITMENT_SIZE) {
                    bytes32 blobVersionedHash = _getBlobVersionedHash(versionedHashIndex);
                    require(blobVersionedHash != bytes32(0), "vh");
                    // First 16 bytes is the opening point. While we get the point as 16 bytes, the point evaluation precompile
                    // requires it to be 32 bytes. The blob commitment must use the opening point as 16 bytes though.
                    bytes32 openingPoint = bytes32(
                        uint256(uint128(bytes16(_pubdataCommitments[i:i + PUBDATA_COMMITMENT_CLAIMED_VALUE_OFFSET])))
                    );
                    _pointEvaluationPrecompile(
                        blobVersionedHash,
                        openingPoint,
                        _pubdataCommitments[i + PUBDATA_COMMITMENT_CLAIMED_VALUE_OFFSET:i + PUBDATA_COMMITMENT_SIZE]
                    );
                    // Take the hash of the versioned hash || opening point || claimed value
                    blobCommitments[versionedHashIndex] = keccak256(
                        abi.encodePacked(blobVersionedHash, _pubdataCommitments[i:i + PUBDATA_COMMITMENT_COMMITMENT_OFFSET])
                    );
                    versionedHashIndex += 1;
                }
                // This check is required because we want to ensure that there aren't any extra blobs trying to be published.
                // Calling the BLOBHASH opcode with an index > # blobs - 1 yields bytes32(0)
                bytes32 versionedHash = _getBlobVersionedHash(versionedHashIndex);
                require(versionedHash == bytes32(0), "lh");
                // We verify that for each set of blobHash/blobCommitment are either both empty
                // or there are values for both.
                for (uint256 i = 0; i < MAX_NUMBER_OF_BLOBS; i++) {
                    require(
                        (_blobHashes[i] == bytes32(0) && blobCommitments[i] == bytes32(0)) ||
                            (_blobHashes[i] != bytes32(0) && blobCommitments[i] != bytes32(0)),
                        "bh"
                    );
                }
            }
            /// @dev Since we don't have access to the new BLOBHASH opecode we need to leverage a static call to a yul contract
            /// that calls the opcode via a verbatim call. This should be swapped out once there is solidity support for the
            /// new opcode.
            function _getBlobVersionedHash(uint256 _index) internal view returns (bytes32 versionedHash) {
                (bool success, bytes memory data) = s.blobVersionedHashRetriever.staticcall(abi.encode(_index));
                require(success, "vc");
                versionedHash = abi.decode(data, (bytes32));
            }
        }
        pragma solidity 0.8.20;
        // SPDX-License-Identifier: UNLICENSED
        /// @title The interface of the zkSync contract, responsible for the main zkSync logic.
        /// @author Matter Labs
        /// @custom:security-contact [email protected]
        interface IBase {
            /// @return Returns facet name.
            function getName() external view returns (string memory);
        }
        pragma solidity 0.8.20;
        // SPDX-License-Identifier: MIT
        import {IBase} from "./IBase.sol";
        /// @dev Enum used by L2 System Contracts to differentiate logs.
        enum SystemLogKey {
            L2_TO_L1_LOGS_TREE_ROOT_KEY,
            TOTAL_L2_TO_L1_PUBDATA_KEY,
            STATE_DIFF_HASH_KEY,
            PACKED_BATCH_AND_L2_BLOCK_TIMESTAMP_KEY,
            PREV_BATCH_HASH_KEY,
            CHAINED_PRIORITY_TXN_HASH_KEY,
            NUMBER_OF_LAYER_1_TXS_KEY,
            BLOB_ONE_HASH_KEY,
            BLOB_TWO_HASH_KEY,
            EXPECTED_SYSTEM_CONTRACT_UPGRADE_TX_HASH_KEY
        }
        /// @dev Enum used to determine the source of pubdata. At first we will support calldata and blobs but this can be extended.
        enum PubdataSource {
            Calldata,
            Blob
        }
        struct LogProcessingOutput {
            uint256 numberOfLayer1Txs;
            bytes32 chainedPriorityTxsHash;
            bytes32 previousBatchHash;
            bytes32 pubdataHash;
            bytes32 stateDiffHash;
            bytes32 l2LogsTreeRoot;
            uint256 packedBatchAndL2BlockTimestamp;
            bytes32 blob1Hash;
            bytes32 blob2Hash;
        }
        /// @dev Offset used to pull Address From Log. Equal to 4 (bytes for isService)
        uint256 constant L2_LOG_ADDRESS_OFFSET = 4;
        /// @dev Offset used to pull Key From Log. Equal to 4 (bytes for isService) + 20 (bytes for address)
        uint256 constant L2_LOG_KEY_OFFSET = 24;
        /// @dev Offset used to pull Value From Log. Equal to 4 (bytes for isService) + 20 (bytes for address) + 32 (bytes for key)
        uint256 constant L2_LOG_VALUE_OFFSET = 56;
        /// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
        /// point evaluation precompile
        uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
        /// @dev Packed pubdata commitments.
        /// @dev Format: list of: opening point (16 bytes) || claimed value (32 bytes) || commitment (48 bytes) || proof (48 bytes)) = 144 bytes
        uint256 constant PUBDATA_COMMITMENT_SIZE = 144;
        /// @dev Offset in pubdata commitment of blobs for claimed value
        uint256 constant PUBDATA_COMMITMENT_CLAIMED_VALUE_OFFSET = 16;
        /// @dev Offset in pubdata commitment of blobs for kzg commitment
        uint256 constant PUBDATA_COMMITMENT_COMMITMENT_OFFSET = 48;
        /// @dev Max number of blobs currently supported
        uint256 constant MAX_NUMBER_OF_BLOBS = 2;
        /// @title The interface of the zkSync Executor contract capable of processing events emitted in the zkSync protocol.
        /// @author Matter Labs
        /// @custom:security-contact [email protected]
        interface IExecutor is IBase {
            /// @notice Rollup batch stored data
            /// @param batchNumber Rollup batch number
            /// @param batchHash Hash of L2 batch
            /// @param indexRepeatedStorageChanges The serial number of the shortcut index that's used as a unique identifier for storage keys that were used twice or more
            /// @param numberOfLayer1Txs Number of priority operations to be processed
            /// @param priorityOperationsHash Hash of all priority operations from this batch
            /// @param l2LogsTreeRoot Root hash of tree that contains L2 -> L1 messages from this batch
            /// @param timestamp Rollup batch timestamp, have the same format as Ethereum batch constant
            /// @param commitment Verified input for the zkSync circuit
            struct StoredBatchInfo {
                uint64 batchNumber;
                bytes32 batchHash;
                uint64 indexRepeatedStorageChanges;
                uint256 numberOfLayer1Txs;
                bytes32 priorityOperationsHash;
                bytes32 l2LogsTreeRoot;
                uint256 timestamp;
                bytes32 commitment;
            }
            /// @notice Data needed to commit new batch
            /// @param batchNumber Number of the committed batch
            /// @param timestamp Unix timestamp denoting the start of the batch execution
            /// @param indexRepeatedStorageChanges The serial number of the shortcut index that's used as a unique identifier for storage keys that were used twice or more
            /// @param newStateRoot The state root of the full state tree
            /// @param numberOfLayer1Txs Number of priority operations to be processed
            /// @param priorityOperationsHash Hash of all priority operations from this batch
            /// @param bootloaderHeapInitialContentsHash Hash of the initial contents of the bootloader heap. In practice it serves as the commitment to the transactions in the batch.
            /// @param eventsQueueStateHash Hash of the events queue state. In practice it serves as the commitment to the events in the batch.
            /// @param systemLogs concatenation of all L2 -> L1 system logs in the batch
            /// @param pubdataCommitments Packed pubdata commitments/data.
            /// @dev pubdataCommitments format: This will always start with a 1 byte pubdataSource flag. Current allowed values are 0 (calldata) or 1 (blobs)
            ///                             kzg: list of: opening point (16 bytes) || claimed value (32 bytes) || commitment (48 bytes) || proof (48 bytes) = 144 bytes
            ///                             calldata: pubdataCommitments.length - 1 - 32 bytes of pubdata
            ///                                       and 32 bytes appended to serve as the blob commitment part for the aux output part of the batch commitment
            /// @dev For 2 blobs we will be sending 288 bytes of calldata instead of the full amount for pubdata.
            /// @dev When using calldata, we only need to send one blob commitment since the max number of bytes in calldata fits in a single blob and we can pull the
            ///     linear hash from the system logs
            struct CommitBatchInfo {
                uint64 batchNumber;
                uint64 timestamp;
                uint64 indexRepeatedStorageChanges;
                bytes32 newStateRoot;
                uint256 numberOfLayer1Txs;
                bytes32 priorityOperationsHash;
                bytes32 bootloaderHeapInitialContentsHash;
                bytes32 eventsQueueStateHash;
                bytes systemLogs;
                bytes pubdataCommitments;
            }
            /// @notice Recursive proof input data (individual commitments are constructed onchain)
            struct ProofInput {
                uint256[] recursiveAggregationInput;
                uint256[] serializedProof;
            }
            /// @notice Function called by the operator to commit new batches. It is responsible for:
            /// - Verifying the correctness of their timestamps.
            /// - Processing their L2->L1 logs.
            /// - Storing batch commitments.
            /// @param _lastCommittedBatchData Stored data of the last committed batch.
            /// @param _newBatchesData Data of the new batches to be committed.
            function commitBatches(
                StoredBatchInfo calldata _lastCommittedBatchData,
                CommitBatchInfo[] calldata _newBatchesData
            ) external;
            /// @notice Batches commitment verification.
            /// @dev Only verifies batch commitments without any other processing.
            /// @param _prevBatch Stored data of the last committed batch.
            /// @param _committedBatches Stored data of the committed batches.
            /// @param _proof The zero knowledge proof.
            function proveBatches(
                StoredBatchInfo calldata _prevBatch,
                StoredBatchInfo[] calldata _committedBatches,
                ProofInput calldata _proof
            ) external;
            /// @notice The function called by the operator to finalize (execute) batches. It is responsible for:
            /// - Processing all pending operations (commpleting priority requests).
            /// - Finalizing this batch (i.e. allowing to withdraw funds from the system)
            /// @param _batchesData Data of the batches to be executed.
            function executeBatches(StoredBatchInfo[] calldata _batchesData) external;
            /// @notice Reverts unexecuted batches
            /// @param _newLastBatch batch number after which batches should be reverted
            /// NOTE: Doesn't delete the stored data about batches, but only decreases
            /// counters that are responsible for the number of batches
            function revertBatches(uint256 _newLastBatch) external;
            /// @notice Event emitted when a batch is committed
            /// @param batchNumber Number of the batch committed
            /// @param batchHash Hash of the L2 batch
            /// @param commitment Calculated input for the zkSync circuit
            /// @dev It has the name "BlockCommit" and not "BatchCommit" due to backward compatibility considerations
            event BlockCommit(uint256 indexed batchNumber, bytes32 indexed batchHash, bytes32 indexed commitment);
            /// @notice Event emitted when batches are verified
            /// @param previousLastVerifiedBatch Batch number of the previous last verified batch
            /// @param currentLastVerifiedBatch Batch number of the current last verified batch
            /// @dev It has the name "BlocksVerification" and not "BatchesVerification" due to backward compatibility considerations
            event BlocksVerification(uint256 indexed previousLastVerifiedBatch, uint256 indexed currentLastVerifiedBatch);
            /// @notice Event emitted when a batch is executed
            /// @param batchNumber Number of the batch executed
            /// @param batchHash Hash of the L2 batch
            /// @param commitment Verified input for the zkSync circuit
            /// @dev It has the name "BlockExecution" and not "BatchExecution" due to backward compatibility considerations
            event BlockExecution(uint256 indexed batchNumber, bytes32 indexed batchHash, bytes32 indexed commitment);
            /// @notice Event emitted when batches are reverted
            /// @param totalBatchesCommitted Total number of committed batches after the revert
            /// @param totalBatchesVerified Total number of verified batches after the revert
            /// @param totalBatchesExecuted Total number of executed batches
            /// @dev It has the name "BlocksRevert" and not "BatchesRevert" due to backward compatibility considerations
            event BlocksRevert(uint256 totalBatchesCommitted, uint256 totalBatchesVerified, uint256 totalBatchesExecuted);
        }
        pragma solidity 0.8.20;
        // SPDX-License-Identifier: MIT
        /// @title The interface of the Verifier contract, responsible for the zero knowledge proof verification.
        /// @author Matter Labs
        /// @custom:security-contact [email protected]
        interface IVerifier {
            /// @dev Verifies a zk-SNARK proof.
            /// @return A boolean value indicating whether the zk-SNARK proof is valid.
            /// Note: The function may revert execution instead of returning false in some cases.
            function verify(
                uint256[] calldata _publicInputs,
                uint256[] calldata _proof,
                uint256[] calldata _recursiveAggregationInput
            ) external view returns (bool);
            /// @notice Calculates a keccak256 hash of the runtime loaded verification keys.
            /// @return vkHash The keccak256 hash of the loaded verification keys.
            function verificationKeyHash() external pure returns (bytes32);
        }
        pragma solidity 0.8.20;
        // SPDX-License-Identifier: MIT
        /// @notice The structure that contains meta information of the L2 transaction that was requested from L1
        /// @dev The weird size of fields was selected specifically to minimize the structure storage size
        /// @param canonicalTxHash Hashed L2 transaction data that is needed to process it
        /// @param expirationTimestamp Expiration timestamp for this request (must be satisfied before)
        /// @param layer2Tip Additional payment to the validator as an incentive to perform the operation
        struct PriorityOperation {
            bytes32 canonicalTxHash;
            uint64 expirationTimestamp;
            uint192 layer2Tip;
        }
        /// @author Matter Labs
        /// @custom:security-contact [email protected]
        /// @dev The library provides the API to interact with the priority queue container
        /// @dev Order of processing operations from queue - FIFO (Fist in - first out)
        library PriorityQueue {
            using PriorityQueue for Queue;
            /// @notice Container that stores priority operations
            /// @param data The inner mapping that saves priority operation by its index
            /// @param head The pointer to the first unprocessed priority operation, equal to the tail if the queue is empty
            /// @param tail The pointer to the free slot
            struct Queue {
                mapping(uint256 priorityOpId => PriorityOperation priorityOp) data;
                uint256 tail;
                uint256 head;
            }
            /// @notice Returns zero if and only if no operations were processed from the queue
            /// @return Index of the oldest priority operation that wasn't processed yet
            function getFirstUnprocessedPriorityTx(Queue storage _queue) internal view returns (uint256) {
                return _queue.head;
            }
            /// @return The total number of priority operations that were added to the priority queue, including all processed ones
            function getTotalPriorityTxs(Queue storage _queue) internal view returns (uint256) {
                return _queue.tail;
            }
            /// @return The total number of unprocessed priority operations in a priority queue
            function getSize(Queue storage _queue) internal view returns (uint256) {
                return uint256(_queue.tail - _queue.head);
            }
            /// @return Whether the priority queue contains no operations
            function isEmpty(Queue storage _queue) internal view returns (bool) {
                return _queue.tail == _queue.head;
            }
            /// @notice Add the priority operation to the end of the priority queue
            function pushBack(Queue storage _queue, PriorityOperation memory _operation) internal {
                // Save value into the stack to avoid double reading from the storage
                uint256 tail = _queue.tail;
                _queue.data[tail] = _operation;
                _queue.tail = tail + 1;
            }
            /// @return The first unprocessed priority operation from the queue
            function front(Queue storage _queue) internal view returns (PriorityOperation memory) {
                require(!_queue.isEmpty(), "D"); // priority queue is empty
                return _queue.data[_queue.head];
            }
            /// @notice Remove the first unprocessed priority operation from the queue
            /// @return priorityOperation that was popped from the priority queue
            function popFront(Queue storage _queue) internal returns (PriorityOperation memory priorityOperation) {
                require(!_queue.isEmpty(), "s"); // priority queue is empty
                // Save value into the stack to avoid double reading from the storage
                uint256 head = _queue.head;
                priorityOperation = _queue.data[head];
                delete _queue.data[head];
                _queue.head = head + 1;
            }
        }
        pragma solidity 0.8.20;
        // SPDX-License-Identifier: MIT
        import {IVerifier} from "./../zksync/interfaces/IVerifier.sol";
        import {PriorityQueue} from "./libraries/PriorityQueue.sol";
        /// @notice Indicates whether an upgrade is initiated and if yes what type
        /// @param None Upgrade is NOT initiated
        /// @param Transparent Fully transparent upgrade is initiated, upgrade data is publicly known
        /// @param Shadow Shadow upgrade is initiated, upgrade data is hidden
        enum UpgradeState {
            None,
            Transparent,
            Shadow
        }
        /// @dev Logically separated part of the storage structure, which is responsible for everything related to proxy
        /// upgrades and diamond cuts
        /// @param proposedUpgradeHash The hash of the current upgrade proposal, zero if there is no active proposal
        /// @param state Indicates whether an upgrade is initiated and if yes what type
        /// @param securityCouncil Address which has the permission to approve instant upgrades (expected to be a Gnosis
        /// multisig)
        /// @param approvedBySecurityCouncil Indicates whether the security council has approved the upgrade
        /// @param proposedUpgradeTimestamp The timestamp when the upgrade was proposed, zero if there are no active proposals
        /// @param currentProposalId The serial number of proposed upgrades, increments when proposing a new one
        struct UpgradeStorage {
            bytes32 proposedUpgradeHash;
            UpgradeState state;
            address securityCouncil;
            bool approvedBySecurityCouncil;
            uint40 proposedUpgradeTimestamp;
            uint40 currentProposalId;
        }
        /// @dev The log passed from L2
        /// @param l2ShardId The shard identifier, 0 - rollup, 1 - porter. All other values are not used but are reserved for
        /// the future
        /// @param isService A boolean flag that is part of the log along with `key`, `value`, and `sender` address.
        /// This field is required formally but does not have any special meaning.
        /// @param txNumberInBatch The L2 transaction number in the batch, in which the log was sent
        /// @param sender The L2 address which sent the log
        /// @param key The 32 bytes of information that was sent in the log
        /// @param value The 32 bytes of information that was sent in the log
        // Both `key` and `value` are arbitrary 32-bytes selected by the log sender
        struct L2Log {
            uint8 l2ShardId;
            bool isService;
            uint16 txNumberInBatch;
            address sender;
            bytes32 key;
            bytes32 value;
        }
        /// @dev An arbitrary length message passed from L2
        /// @notice Under the hood it is `L2Log` sent from the special system L2 contract
        /// @param txNumberInBatch The L2 transaction number in the batch, in which the message was sent
        /// @param sender The address of the L2 account from which the message was passed
        /// @param data An arbitrary length message
        struct L2Message {
            uint16 txNumberInBatch;
            address sender;
            bytes data;
        }
        /// @notice Part of the configuration parameters of ZKP circuits
        struct VerifierParams {
            bytes32 recursionNodeLevelVkHash;
            bytes32 recursionLeafLevelVkHash;
            bytes32 recursionCircuitsSetVksHash;
        }
        /// @notice The struct that describes whether users will be charged for pubdata for L1->L2 transactions.
        /// @param Rollup The users are charged for pubdata & it is priced based on the gas price on Ethereum.
        /// @param Validium The pubdata is considered free with regard to the L1 gas price.
        enum PubdataPricingMode {
            Rollup,
            Validium
        }
        /// @notice The fee params for L1->L2 transactions for the network.
        /// @param pubdataPricingMode How the users will charged for pubdata in L1->L2 transactions.
        /// @param batchOverheadL1Gas The amount of L1 gas required to process the batch (except for the calldata).
        /// @param maxPubdataPerBatch The maximal number of pubdata that can be emitted per batch.
        /// @param priorityTxMaxPubdata The maximal amount of pubdata a priority transaction is allowed to publish.
        /// It can be slightly less than maxPubdataPerBatch in order to have some margin for the bootloader execution.
        /// @param minimalL2GasPrice The minimal L2 gas price to be used by L1->L2 transactions. It should represent
        /// the price that a single unit of compute costs.
        struct FeeParams {
            PubdataPricingMode pubdataPricingMode;
            uint32 batchOverheadL1Gas;
            uint32 maxPubdataPerBatch;
            uint32 maxL2GasPerBatch;
            uint32 priorityTxMaxPubdata;
            uint64 minimalL2GasPrice;
        }
        /// @dev storing all storage variables for zkSync facets
        /// NOTE: It is used in a proxy, so it is possible to add new variables to the end
        /// but NOT to modify already existing variables or change their order.
        /// NOTE: variables prefixed with '__DEPRECATED_' are deprecated and shouldn't be used.
        /// Their presence is maintained for compatibility and to prevent storage collision.
        struct AppStorage {
            /// @dev Storage of variables needed for deprecated diamond cut facet
            uint256[7] __DEPRECATED_diamondCutStorage;
            /// @notice Address which will exercise critical changes to the Diamond Proxy (upgrades, freezing & unfreezing)
            address governor;
            /// @notice Address that the governor proposed as one that will replace it
            address pendingGovernor;
            /// @notice List of permitted validators
            mapping(address validatorAddress => bool isValidator) validators;
            /// @dev Verifier contract. Used to verify aggregated proof for batches
            IVerifier verifier;
            /// @notice Total number of executed batches i.e. batches[totalBatchesExecuted] points at the latest executed batch
            /// (batch 0 is genesis)
            uint256 totalBatchesExecuted;
            /// @notice Total number of proved batches i.e. batches[totalBatchesProved] points at the latest proved batch
            uint256 totalBatchesVerified;
            /// @notice Total number of committed batches i.e. batches[totalBatchesCommitted] points at the latest committed
            /// batch
            uint256 totalBatchesCommitted;
            /// @dev Stored hashed StoredBatch for batch number
            mapping(uint256 batchNumber => bytes32 batchHash) storedBatchHashes;
            /// @dev Stored root hashes of L2 -> L1 logs
            mapping(uint256 batchNumber => bytes32 l2LogsRootHash) l2LogsRootHashes;
            /// @dev Container that stores transactions requested from L1
            PriorityQueue.Queue priorityQueue;
            /// @dev The smart contract that manages the list with permission to call contract functions
            address __DEPRECATED_allowList;
            /// @notice Part of the configuration parameters of ZKP circuits. Used as an input for the verifier smart contract
            VerifierParams verifierParams;
            /// @notice Bytecode hash of bootloader program.
            /// @dev Used as an input to zkp-circuit.
            bytes32 l2BootloaderBytecodeHash;
            /// @notice Bytecode hash of default account (bytecode for EOA).
            /// @dev Used as an input to zkp-circuit.
            bytes32 l2DefaultAccountBytecodeHash;
            /// @dev Indicates that the porter may be touched on L2 transactions.
            /// @dev Used as an input to zkp-circuit.
            bool zkPorterIsAvailable;
            /// @dev The maximum number of the L2 gas that a user can request for L1 -> L2 transactions
            /// @dev This is the maximum number of L2 gas that is available for the "body" of the transaction, i.e.
            /// without overhead for proving the batch.
            uint256 priorityTxMaxGasLimit;
            /// @dev Storage of variables needed for upgrade facet
            UpgradeStorage __DEPRECATED_upgrades;
            /// @dev A mapping L2 batch number => message number => flag.
            /// @dev The L2 -> L1 log is sent for every withdrawal, so this mapping is serving as
            /// a flag to indicate that the message was already processed.
            /// @dev Used to indicate that eth withdrawal was already processed
            mapping(uint256 l2BatchNumber => mapping(uint256 l2ToL1MessageNumber => bool isFinalized)) isEthWithdrawalFinalized;
            /// @dev The most recent withdrawal time and amount reset
            uint256 __DEPRECATED_lastWithdrawalLimitReset;
            /// @dev The accumulated withdrawn amount during the withdrawal limit window
            uint256 __DEPRECATED_withdrawnAmountInWindow;
            /// @dev A mapping user address => the total deposited amount by the user
            mapping(address => uint256) __DEPRECATED_totalDepositedAmountPerUser;
            /// @dev Stores the protocol version. Note, that the protocol version may not only encompass changes to the
            /// smart contracts, but also to the node behavior.
            uint256 protocolVersion;
            /// @dev Hash of the system contract upgrade transaction. If 0, then no upgrade transaction needs to be done.
            bytes32 l2SystemContractsUpgradeTxHash;
            /// @dev Batch number where the upgrade transaction has happened. If 0, then no upgrade transaction has happened
            /// yet.
            uint256 l2SystemContractsUpgradeBatchNumber;
            /// @dev Address which will exercise non-critical changes to the Diamond Proxy (changing validator set & unfreezing)
            address admin;
            /// @notice Address that the governor or admin proposed as one that will replace admin role
            address pendingAdmin;
            /// @dev Fee params used to derive gasPrice for the L1->L2 transactions. For L2 transactions,
            /// the bootloader gives enough freedom to the operator.
            FeeParams feeParams;
            /// @dev Address of the blob versioned hash getter smart contract used for EIP-4844 versioned hashes.
            address blobVersionedHashRetriever;
        }