notes
This commit is contained in:
parent
e242df5c7a
commit
8f97517075
67
README.md
67
README.md
|
@ -5,13 +5,78 @@
|
||||||
| Peering | Yes | Currently using `RHIZOME_SEED_PEERS`, no gossip / discovery |
|
| Peering | Yes | Currently using `RHIZOME_SEED_PEERS`, no gossip / discovery |
|
||||||
| Schemas | Not really | Currently very thin layer allowing TypedCollections |
|
| Schemas | Not really | Currently very thin layer allowing TypedCollections |
|
||||||
| Relationships | No | Supporting relational algebra among domain entities |
|
| Relationships | No | Supporting relational algebra among domain entities |
|
||||||
| Views | Yes | Currently using functions rather than JSON-Logic expressions |
|
| Views | Yes | Lossless: Map the `targetContext`s as properties of domain entities. |
|
||||||
|
| | | Lossy: Use a delta filter and a resolver function to produce a view. |
|
||||||
|
| | | Currently using functions rather than JSON-Logic expressions. |
|
||||||
| Functions | No | Arbitrary subscribers to delta stream (that can also emit deltas?) |
|
| Functions | No | Arbitrary subscribers to delta stream (that can also emit deltas?) |
|
||||||
| Tests | Minimal | So far we have a few `ts-jest` tests. Need a plan for multi-node tests. |
|
| Tests | Minimal | So far we have a few `ts-jest` tests. Need a plan for multi-node tests. |
|
||||||
| Identity | No | Probably a public key / private key system |
|
| Identity | No | Probably a public key / private key system |
|
||||||
| Contexts | No | Each context may involve different lossy functions and delta filters |
|
| Contexts | No | Each context may involve different lossy functions and delta filters |
|
||||||
| HTTP API | Yes | Basic peering info and entity CRUD |
|
| HTTP API | Yes | Basic peering info and entity CRUD |
|
||||||
|
|
||||||
|
If we express views and filter rules as JSON-Logic, we can easily include them in records.
|
||||||
|
|
||||||
|
## Clocks?
|
||||||
|
|
||||||
|
Do we want to involve a time synchronization protocol? e.g. ntpd
|
||||||
|
|
||||||
|
If not, what's the best we could do?
|
||||||
|
|
||||||
|
Maybe just expect nodes to record relative times, and
|
||||||
|
patch together a sequence based on the relative times.
|
||||||
|
This adds complexity and still has limited precision.
|
||||||
|
|
||||||
|
We could just let the clocks drift and so on, and make inferences at
|
||||||
|
query resolution time.
|
||||||
|
|
||||||
|
We could do some extra work and keep track of what time our peers think it is.
|
||||||
|
Then if their clocks drift relative to ours, we can seek consensus among a broader range of peers.
|
||||||
|
|
||||||
|
But at that point just run ntpd. Can still do consensus to verify
|
||||||
|
but probably no need to implement custom time synchronization protocol.
|
||||||
|
|
||||||
|
Wait NTP is centralized isn't it, not peer to peer...
|
||||||
|
|
||||||
|
## Peering
|
||||||
|
|
||||||
|
### ZeroMQ
|
||||||
|
Currently we're handling networking with ZeroMQ pub/sub over TCP transport.
|
||||||
|
|
||||||
|
* ZeroMQ supports encryption, with public/private key pairs.
|
||||||
|
* A subscriber needs to know the public key of the publisher in order to connect.
|
||||||
|
* We're aiming for symmetry, so we'll need a strategy to establish these reciprocal relationships.
|
||||||
|
|
||||||
|
### GossipSub
|
||||||
|
One option is to replace ZeroMQ with GossipSub,
|
||||||
|
which may function better in an open network envoronment.
|
||||||
|
|
||||||
|
Considerations with GossipSub may include
|
||||||
|
* topics -- namespacing
|
||||||
|
* peer discovery
|
||||||
|
|
||||||
|
### TincVPN
|
||||||
|
Another layer which is available would be [Tinc VPN](https://tinc-vpn.org).
|
||||||
|
|
||||||
|
Tinc...
|
||||||
|
* is a daemon
|
||||||
|
* creates a mesh VPN
|
||||||
|
* uses tap/tun network devices
|
||||||
|
* network can run in router, switch, or hub mode
|
||||||
|
* performs UDP hole punching
|
||||||
|
* forwards packets among peers
|
||||||
|
* performs spanning tree routing
|
||||||
|
* participants only see messages if they've added the sender's public key to their configuration
|
||||||
|
|
||||||
|
Ideally at least one node in a given network
|
||||||
|
needs to listen on a public interface address.
|
||||||
|
|
||||||
|
[Tinc configuration docs](https://tinc-vpn.org/documentation/Main-configuration-variables.html)
|
||||||
|
provide some insight into its functioning.
|
||||||
|
|
||||||
|
Considerations imposed by Tinc would include
|
||||||
|
* IP addressing
|
||||||
|
* public key management
|
||||||
|
|
||||||
# Development / Demo
|
# Development / Demo
|
||||||
|
|
||||||
## Setup
|
## Setup
|
||||||
|
|
|
@ -16,7 +16,7 @@ type CollectionsToServe = {
|
||||||
|
|
||||||
const docConverter = new Converter({
|
const docConverter = new Converter({
|
||||||
completeHTMLDocument: true,
|
completeHTMLDocument: true,
|
||||||
simpleLineBreaks: true,
|
// simpleLineBreaks: true,
|
||||||
tables: true,
|
tables: true,
|
||||||
tasklists: true
|
tasklists: true
|
||||||
});
|
});
|
||||||
|
@ -159,11 +159,18 @@ export function runHttpApi(collections?: CollectionsToServe) {
|
||||||
res.json({ids: collection.getIds()});
|
res.json({ids: collection.getIds()});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Get a single domain entity by ID
|
||||||
|
app.get(`/${name}/:id`, (req: express.Request, res: express.Response) => {
|
||||||
|
const {params: {id}} = req;
|
||||||
|
const ent = collection.get(id);
|
||||||
|
res.json(ent);
|
||||||
|
});
|
||||||
|
|
||||||
// Add a new domain entity
|
// Add a new domain entity
|
||||||
// TODO: schema validation
|
// TODO: schema validation
|
||||||
app.put(`/${name}`, (req: express.Request, res: express.Response) => {
|
app.put(`/${name}`, (req: express.Request, res: express.Response) => {
|
||||||
const {body: properties} = req;
|
const {body: properties} = req;
|
||||||
const ent = collection.put(undefined, properties);
|
const ent = collection.put(properties.id, properties);
|
||||||
res.json(ent);
|
res.json(ent);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
@ -107,4 +107,6 @@ export class Lossless {
|
||||||
}
|
}
|
||||||
return view;
|
return view;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: point-in-time queries
|
||||||
}
|
}
|
||||||
|
|
14
src/lossy.ts
14
src/lossy.ts
|
@ -41,3 +41,17 @@ export class Lossy {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Generate a rule
|
||||||
|
// Apply the rule -- When?
|
||||||
|
// - Maybe we shard a set of deltas and map/reduce the results --
|
||||||
|
// We are trying to implement CRDT, so the results
|
||||||
|
// must be composable to preserve that feature.
|
||||||
|
// That also seems to imply we want to stick with
|
||||||
|
// the lossless view until the delta set is chosen
|
||||||
|
// - So, in general on a set of deltas
|
||||||
|
// at times which seem opportune
|
||||||
|
// the results of which can be recorded
|
||||||
|
// and indexed such that the results can be reused
|
||||||
|
// i.e. you want to compute the result of a set which
|
||||||
|
// contains a prior one
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue