// SPDX-License-Identifier: Unlicense pragma solidity ^0.8.24; import "./core/DAO.sol"; import "./Availability.sol"; contract Rollup is Availability { struct BatchItem { address sender; address worker; uint stakeAmount; uint fee; string postId; } mapping(uint => BatchItem) public items; uint public itemCount; address public batchWorker; uint batchWorkerStakeIndex; uint public immutable batchInterval; uint public batchStart; uint lastWorkerReset; uint constant minResetInterval = 120; event BatchItemAdded(string postId, address sender, uint fee); event BatchWorkerAssigned(address batchWorker); constructor(DAO dao, uint batchInterval_) Availability(dao) { batchInterval = batchInterval_; } /// Instead of initiating a validation pool, call this method to include /// the stakes and fee in the next batch validation pool function addItem( address author, uint stakeAmount, string calldata postId ) public payable { BatchItem storage item = items[itemCount++]; item.sender = msg.sender; item.worker = author; item.stakeAmount = stakeAmount; item.fee = msg.value; item.postId = postId; emit BatchItemAdded(postId, item.sender, item.fee); } /// To be called by the currently assigned batch worker, /// If no batch worker has been assigned this may be called by anybody, /// but it will only succeed if it is able to assign a new worker. function submitBatch( string calldata batchPostId, string[] calldata batchItems, uint poolDuration ) public returns (uint poolIndex) { if (batchWorker != address(0)) { require( msg.sender == batchWorker, "Batch result must be submitted by current batch worker" ); } require(batchItems.length <= itemCount, "Batch size too large"); // Make sure all batch items match for (uint i = 0; i < batchItems.length; i++) { require( keccak256(bytes(batchItems[i])) == keccak256(bytes(items[i].postId)), "Batch item mismatch" ); } // initiate a validation pool for this batch uint fee; for (uint i = 0; i < batchItems.length; i++) { fee += items[i].fee; } poolIndex = dao.initiateValidationPool{value: fee}( batchPostId, poolDuration, [uint256(1), uint256(3)], [uint256(1), uint256(2)], 100, true, false, "" ); // Include all the availability stakes from the batched work for (uint i = 0; i < batchItems.length; i++) { dao.delegatedStakeOnValidationPool( poolIndex, items[i].worker, items[i].stakeAmount, true ); } // Include availability stakes from the batch worker if (batchWorker != address(0)) { dao.delegatedStakeOnValidationPool( poolIndex, batchWorker, stakes[batchWorkerStakeIndex].amount, true ); } if (batchItems.length < itemCount) { // Some items were added after this batch was computed. // Keep them in the queue to be included in the next batch. for (uint i = 0; i < itemCount - batchItems.length; i++) { items[i] = items[batchItems.length + i]; } itemCount = itemCount - batchItems.length; } else { // Reset item count so we can start the next batch itemCount = 0; } // Select the next batch worker batchWorkerStakeIndex = assignWork(); batchWorker = stakes[batchWorkerStakeIndex].worker; batchStart = block.timestamp; emit BatchWorkerAssigned(batchWorker); } /// If the batch worker fails to submit the batch, a new batch worker may be selected function resetBatchWorker() public { // TODO: Grace period after the current batch is due and before the worker can be replaced require( block.timestamp - batchStart > batchInterval, "Current batch interval has not yet elapsed" ); require(itemCount > 0, "Current batch is empty"); require( lastWorkerReset == 0 || block.timestamp - lastWorkerReset >= minResetInterval, "Mininum reset interval has not elapsed since last batch worker reset" ); // TODO: Submit a validation pool targeting a null post, and send the worker's availability stake // This gives the DAO an opportunity to police the failed work // Select a new batch worker batchWorkerStakeIndex = assignWork(); batchWorker = stakes[batchWorkerStakeIndex].worker; } }