dgf-prototype/ethereum/contracts/Rollup.sol

100 lines
3.1 KiB
Solidity

// SPDX-License-Identifier: Unlicense
pragma solidity ^0.8.24;
import "./core/DAO.sol";
import "./Availability.sol";
contract Rollup is Availability {
constructor(DAO dao) Availability(dao) {}
struct BatchItem {
address author;
uint stakeAmount;
uint fee;
string postId;
}
mapping(uint => BatchItem) items;
uint itemCount;
address batchWorker;
uint batchWorkerStakeIndex;
/// Instead of initiating a validation pool, call this method to include
/// the stakes and fee in the next batch validation pool
function addItem(
address author,
uint stakeAmount,
string calldata postId
) public payable {
BatchItem storage item = items[itemCount++];
item.author = author;
item.stakeAmount = stakeAmount;
item.fee = msg.value;
item.postId = postId;
}
/// To be called by the currently assigned batch worker,
/// If no batch worker has been assigned this may be called by anybody,
/// but it will only succeed if it is able to assign a new worker.
function submitBatch(
string calldata batchPostId,
uint batchSize,
uint poolDuration
) public returns (uint poolIndex) {
require(batchSize <= itemCount, "Batch size too large");
if (batchWorker != address(0)) {
require(
msg.sender == batchWorker,
"Batch result must be submitted by current batch worker"
);
}
// initiate a validation pool for this batch
uint fee;
for (uint i = 0; i < itemCount; i++) {
fee += items[i].fee;
}
poolIndex = dao.initiateValidationPool{value: fee}(
batchPostId,
poolDuration,
[uint256(1), uint256(3)],
[uint256(1), uint256(2)],
100,
true,
false,
""
);
// Include all the availability stakes from the batched work
for (uint i = 0; i < itemCount; i++) {
dao.delegatedStakeOnValidationPool(
poolIndex,
items[i].author,
items[i].stakeAmount,
true
);
}
// Include availability stakes from the batch worker
if (batchWorker != address(0)) {
dao.delegatedStakeOnValidationPool(
poolIndex,
batchWorker,
stakes[batchWorkerStakeIndex].amount,
true
);
}
if (batchSize < itemCount) {
// Some items were added after this batch was computed.
// Keep them in the queue to be included in the next batch.
for (uint i = 0; i < itemCount - batchSize; i++) {
items[i] = items[batchSize + i];
}
itemCount = itemCount - batchSize;
} else {
// Reset item count so we can start the next batch
itemCount = 0;
}
// Select the next worker
batchWorkerStakeIndex = assignWork();
batchWorker = stakes[batchWorkerStakeIndex].worker;
}
}