dgf-prototype/ethereum/contracts/Rollup.sol

107 lines
3.4 KiB
Solidity
Raw Normal View History

2024-04-28 15:06:10 -05:00
// SPDX-License-Identifier: Unlicense
pragma solidity ^0.8.24;
import "./core/DAO.sol";
import "./Availability.sol";
contract Rollup is Availability {
constructor(DAO dao) Availability(dao) {}
2024-04-28 16:51:35 -05:00
struct BatchItem {
address sender;
address worker;
2024-04-28 16:51:35 -05:00
uint stakeAmount;
uint fee;
string postId;
2024-04-28 16:51:35 -05:00
}
mapping(uint => BatchItem) public items;
uint public itemCount;
address public batchWorker;
2024-04-28 16:51:35 -05:00
uint batchWorkerStakeIndex;
event BatchItemAdded(string postId, address sender, uint fee);
event BatchWorkerAssigned(address batchWorker);
/// Instead of initiating a validation pool, call this method to include
/// the stakes and fee in the next batch validation pool
2024-04-28 16:51:35 -05:00
function addItem(
address author,
2024-04-28 16:51:35 -05:00
uint stakeAmount,
string calldata postId
2024-04-28 17:45:07 -05:00
) public payable {
2024-04-28 16:51:35 -05:00
BatchItem storage item = items[itemCount++];
item.sender = msg.sender;
item.worker = author;
2024-04-28 16:51:35 -05:00
item.stakeAmount = stakeAmount;
2024-04-28 17:45:07 -05:00
item.fee = msg.value;
item.postId = postId;
emit BatchItemAdded(postId, item.sender, item.fee);
2024-04-28 16:51:35 -05:00
}
/// To be called by the currently assigned batch worker,
/// If no batch worker has been assigned this may be called by anybody,
/// but it will only succeed if it is able to assign a new worker.
2024-04-28 16:51:35 -05:00
function submitBatch(
string calldata batchPostId,
uint batchSize,
2024-04-28 16:51:35 -05:00
uint poolDuration
) public returns (uint poolIndex) {
require(batchSize <= itemCount, "Batch size too large");
2024-04-28 16:51:35 -05:00
if (batchWorker != address(0)) {
require(
msg.sender == batchWorker,
"Batch result must be submitted by current batch worker"
);
}
// initiate a validation pool for this batch
uint fee;
2024-05-02 19:08:53 -05:00
for (uint i = 0; i < batchSize; i++) {
2024-04-28 16:51:35 -05:00
fee += items[i].fee;
}
poolIndex = dao.initiateValidationPool{value: fee}(
2024-04-28 16:51:35 -05:00
batchPostId,
poolDuration,
[uint256(1), uint256(3)],
[uint256(1), uint256(2)],
100,
true,
false,
""
);
// Include all the availability stakes from the batched work
2024-05-02 19:08:53 -05:00
for (uint i = 0; i < batchSize; i++) {
2024-04-28 16:51:35 -05:00
dao.delegatedStakeOnValidationPool(
poolIndex,
items[i].worker,
2024-04-28 16:51:35 -05:00
items[i].stakeAmount,
true
);
}
// Include availability stakes from the batch worker
if (batchWorker != address(0)) {
dao.delegatedStakeOnValidationPool(
poolIndex,
batchWorker,
stakes[batchWorkerStakeIndex].amount,
true
);
}
if (batchSize < itemCount) {
// Some items were added after this batch was computed.
// Keep them in the queue to be included in the next batch.
for (uint i = 0; i < itemCount - batchSize; i++) {
items[i] = items[batchSize + i];
}
itemCount = itemCount - batchSize;
} else {
// Reset item count so we can start the next batch
itemCount = 0;
}
2024-04-28 16:51:35 -05:00
// Select the next worker
batchWorkerStakeIndex = assignWork();
batchWorker = stakes[batchWorkerStakeIndex].worker;
emit BatchWorkerAssigned(batchWorker);
2024-04-28 16:51:35 -05:00
}
2024-04-28 15:06:10 -05:00
}