5
0
mirror of https://github.com/cwinfo/yggdrasil-go.git synced 2024-11-22 15:20:30 +00:00

more work in progress actorizing the remaining parts of the switch

This commit is contained in:
Arceliar 2020-03-29 19:01:50 -05:00
parent 15b850be6e
commit 9834f222db
9 changed files with 111 additions and 101 deletions

View File

@ -330,8 +330,11 @@ func (c *Core) EncryptionPublicKey() string {
// connected to any other nodes (effectively making you the root of a // connected to any other nodes (effectively making you the root of a
// single-node network). // single-node network).
func (c *Core) Coords() []uint64 { func (c *Core) Coords() []uint64 {
loc := c.switchTable.getLocator() var coords []byte
return wire_coordsBytestoUint64s(loc.getCoords()) phony.Block(&c.router, func() {
coords = c.router.table.self.getCoords()
})
return wire_coordsBytestoUint64s(coords)
} }
// Address gets the IPv6 address of the Yggdrasil node. This is always a /128 // Address gets the IPv6 address of the Yggdrasil node. This is always a /128

View File

@ -186,11 +186,9 @@ func dht_ordered(first, second, third *crypto.NodeID) bool {
// Update info about the node that sent the request. // Update info about the node that sent the request.
func (t *dht) handleReq(req *dhtReq) { func (t *dht) handleReq(req *dhtReq) {
// Send them what they asked for // Send them what they asked for
loc := t.router.core.switchTable.getLocator()
coords := loc.getCoords()
res := dhtRes{ res := dhtRes{
Key: t.router.core.boxPub, Key: t.router.core.boxPub,
Coords: coords, Coords: t.router.table.self.getCoords(),
Dest: req.Dest, Dest: req.Dest,
Infos: t.lookup(&req.Dest, false), Infos: t.lookup(&req.Dest, false),
} }
@ -300,11 +298,9 @@ func (t *dht) ping(info *dhtInfo, target *crypto.NodeID) {
if target == nil { if target == nil {
target = &t.nodeID target = &t.nodeID
} }
loc := t.router.core.switchTable.getLocator()
coords := loc.getCoords()
req := dhtReq{ req := dhtReq{
Key: t.router.core.boxPub, Key: t.router.core.boxPub,
Coords: coords, Coords: t.router.table.self.getCoords(),
Dest: *target, Dest: *target,
} }
t.sendReq(&req, info) t.sendReq(&req, info)
@ -378,7 +374,7 @@ func (t *dht) getImportant() []*dhtInfo {
}) })
// Keep the ones that are no further than the closest seen so far // Keep the ones that are no further than the closest seen so far
minDist := ^uint64(0) minDist := ^uint64(0)
loc := t.router.core.switchTable.getLocator() loc := t.router.table.self
important := infos[:0] important := infos[:0]
for _, info := range infos { for _, info := range infos {
dist := uint64(loc.dist(info.coords)) dist := uint64(loc.dist(info.coords))
@ -416,7 +412,7 @@ func (t *dht) isImportant(ninfo *dhtInfo) bool {
} }
important := t.getImportant() important := t.getImportant()
// Check if ninfo is of equal or greater importance to what we already know // Check if ninfo is of equal or greater importance to what we already know
loc := t.router.core.switchTable.getLocator() loc := t.router.table.self
ndist := uint64(loc.dist(ninfo.coords)) ndist := uint64(loc.dist(ninfo.coords))
minDist := ^uint64(0) minDist := ^uint64(0)
for _, info := range important { for _, info := range important {

View File

@ -300,7 +300,7 @@ func (intf *linkInterface) notifyBlockedSend() {
intf.Act(nil, func() { intf.Act(nil, func() {
if intf.sendTimer != nil { if intf.sendTimer != nil {
//As far as we know, we're still trying to send, and the timer fired. //As far as we know, we're still trying to send, and the timer fired.
intf.link.core.switchTable.blockPeer(intf.peer.port) intf.link.core.switchTable.blockPeer(intf, intf.peer.port)
} }
}) })
} }
@ -340,7 +340,7 @@ func (intf *linkInterface) notifyStalled() {
intf.stallTimer.Stop() intf.stallTimer.Stop()
intf.stallTimer = nil intf.stallTimer = nil
intf.stalled = true intf.stalled = true
intf.link.core.switchTable.blockPeer(intf.peer.port) intf.link.core.switchTable.blockPeer(intf, intf.peer.port)
} }
}) })
} }

View File

@ -18,6 +18,7 @@ type nodeinfo struct {
myNodeInfo NodeInfoPayload myNodeInfo NodeInfoPayload
callbacks map[crypto.BoxPubKey]nodeinfoCallback callbacks map[crypto.BoxPubKey]nodeinfoCallback
cache map[crypto.BoxPubKey]nodeinfoCached cache map[crypto.BoxPubKey]nodeinfoCached
table *lookupTable
} }
type nodeinfoCached struct { type nodeinfoCached struct {
@ -187,7 +188,7 @@ func (m *nodeinfo) sendNodeInfo(key crypto.BoxPubKey, coords []byte, isResponse
} }
func (m *nodeinfo) _sendNodeInfo(key crypto.BoxPubKey, coords []byte, isResponse bool) { func (m *nodeinfo) _sendNodeInfo(key crypto.BoxPubKey, coords []byte, isResponse bool) {
loc := m.core.switchTable.getLocator() loc := m.table.self
nodeinfo := nodeinfoReqRes{ nodeinfo := nodeinfoReqRes{
SendCoords: loc.getCoords(), SendCoords: loc.getCoords(),
IsResponse: isResponse, IsResponse: isResponse,

View File

@ -162,7 +162,7 @@ func (ps *peers) _removePeer(p *peer) {
if q := ps.ports[p.port]; p.port == 0 || q != p { if q := ps.ports[p.port]; p.port == 0 || q != p {
return return
} // Can't remove self peer or nonexistant peer } // Can't remove self peer or nonexistant peer
ps.core.switchTable.forgetPeer(p.port) ps.core.switchTable.forgetPeer(ps, p.port)
oldPorts := ps.ports oldPorts := ps.ports
newPorts := make(map[switchPort]*peer) newPorts := make(map[switchPort]*peer)
for k, v := range oldPorts { for k, v := range oldPorts {
@ -328,7 +328,7 @@ func (p *peer) _handleLinkTraffic(bs []byte) {
// Gets a switchMsg from the switch, adds signed next-hop info for this peer, and sends it to them. // Gets a switchMsg from the switch, adds signed next-hop info for this peer, and sends it to them.
func (p *peer) _sendSwitchMsg() { func (p *peer) _sendSwitchMsg() {
msg := p.core.switchTable.getMsg() msg := p.table.getMsg()
if msg == nil { if msg == nil {
return return
} }
@ -367,19 +367,26 @@ func (p *peer) _handleSwitchMsg(packet []byte) {
} }
prevKey = hop.Next prevKey = hop.Next
} }
p.core.switchTable.handleMsg(&msg, p.port) p.core.switchTable.Act(p, func() {
if !p.core.switchTable.checkRoot(&msg) { if !p.core.switchTable._checkRoot(&msg) {
// Bad switch message // Bad switch message
p.dinfo = nil p.Act(&p.core.switchTable, func() {
return p.dinfo = nil
} })
// Pass a message to the dht informing it that this peer (still) exists } else {
loc.coords = loc.coords[:len(loc.coords)-1] // handle the message
p.dinfo = &dhtInfo{ p.core.switchTable._handleMsg(&msg, p.port, false)
key: p.box, p.Act(&p.core.switchTable, func() {
coords: loc.getCoords(), // Pass a message to the dht informing it that this peer (still) exists
} loc.coords = loc.coords[:len(loc.coords)-1]
p._updateDHT() p.dinfo = &dhtInfo{
key: p.box,
coords: loc.getCoords(),
}
p._updateDHT()
})
}
})
} }
// This generates the bytes that we sign or check the signature of for a switchMsg. // This generates the bytes that we sign or check the signature of for a switchMsg.

View File

@ -46,6 +46,7 @@ type router struct {
nodeinfo nodeinfo nodeinfo nodeinfo
searches searches searches searches
sessions sessions sessions sessions
table *lookupTable // has a copy of our locator
} }
// Initializes the router struct, which includes setting up channels to/from the adapter. // Initializes the router struct, which includes setting up channels to/from the adapter.
@ -77,6 +78,21 @@ func (r *router) init(core *Core) {
r.sessions.init(r) r.sessions.init(r)
} }
func (r *router) updateTable(from phony.Actor, table *lookupTable) {
r.Act(from, func() {
r.table = table
r.nodeinfo.Act(r, func() {
r.nodeinfo.table = table
})
for _, ses := range r.sessions.sinfos {
sinfo := ses
sinfo.Act(r, func() {
sinfo.table = table
})
}
})
}
// Reconfigures the router and any child modules. This should only ever be run // Reconfigures the router and any child modules. This should only ever be run
// by the router actor. // by the router actor.
func (r *router) reconfigure() { func (r *router) reconfigure() {
@ -130,7 +146,7 @@ func (r *router) reset(from phony.Actor) {
func (r *router) doMaintenance() { func (r *router) doMaintenance() {
phony.Block(r, func() { phony.Block(r, func() {
// Any periodic maintenance stuff goes here // Any periodic maintenance stuff goes here
r.core.switchTable.doMaintenance() r.core.switchTable.doMaintenance(r)
r.dht.doMaintenance() r.dht.doMaintenance()
r.sessions.cleanup() r.sessions.cleanup()
}) })

View File

@ -161,11 +161,10 @@ func (sinfo *searchInfo) continueSearch(infos []*dhtInfo) {
// Initially start a search // Initially start a search
func (sinfo *searchInfo) startSearch() { func (sinfo *searchInfo) startSearch() {
loc := sinfo.searches.router.core.switchTable.getLocator()
var infos []*dhtInfo var infos []*dhtInfo
infos = append(infos, &dhtInfo{ infos = append(infos, &dhtInfo{
key: sinfo.searches.router.core.boxPub, key: sinfo.searches.router.core.boxPub,
coords: loc.getCoords(), coords: sinfo.searches.router.table.self.getCoords(),
}) })
// Start the search by asking ourself, useful if we're the destination // Start the search by asking ourself, useful if we're the destination
sinfo.continueSearch(infos) sinfo.continueSearch(infos)

View File

@ -52,6 +52,7 @@ type sessionInfo struct {
cancel util.Cancellation // Used to terminate workers cancel util.Cancellation // Used to terminate workers
conn *Conn // The associated Conn object conn *Conn // The associated Conn object
callbacks []chan func() // Finished work from crypto workers callbacks []chan func() // Finished work from crypto workers
table *lookupTable // table.self is a locator where we get our coords
} }
// Represents a session ping/pong packet, and includes information like public keys, a session handle, coords, a timestamp to prevent replays, and the tun/tap MTU. // Represents a session ping/pong packet, and includes information like public keys, a session handle, coords, a timestamp to prevent replays, and the tun/tap MTU.
@ -217,6 +218,7 @@ func (ss *sessions) createSession(theirPermKey *crypto.BoxPubKey) *sessionInfo {
sinfo.myHandle = *crypto.NewHandle() sinfo.myHandle = *crypto.NewHandle()
sinfo.theirAddr = *address.AddrForNodeID(crypto.GetNodeID(&sinfo.theirPermPub)) sinfo.theirAddr = *address.AddrForNodeID(crypto.GetNodeID(&sinfo.theirPermPub))
sinfo.theirSubnet = *address.SubnetForNodeID(crypto.GetNodeID(&sinfo.theirPermPub)) sinfo.theirSubnet = *address.SubnetForNodeID(crypto.GetNodeID(&sinfo.theirPermPub))
sinfo.table = ss.router.table
ss.sinfos[sinfo.myHandle] = &sinfo ss.sinfos[sinfo.myHandle] = &sinfo
ss.byTheirPerm[sinfo.theirPermPub] = &sinfo.myHandle ss.byTheirPerm[sinfo.theirPermPub] = &sinfo.myHandle
return &sinfo return &sinfo
@ -266,8 +268,7 @@ func (ss *sessions) removeSession(sinfo *sessionInfo) {
// Returns a session ping appropriate for the given session info. // Returns a session ping appropriate for the given session info.
func (sinfo *sessionInfo) _getPing() sessionPing { func (sinfo *sessionInfo) _getPing() sessionPing {
loc := sinfo.sessions.router.core.switchTable.getLocator() coords := sinfo.table.self.getCoords()
coords := loc.getCoords()
ping := sessionPing{ ping := sessionPing{
SendPermPub: sinfo.sessions.router.core.boxPub, SendPermPub: sinfo.sessions.router.core.boxPub,
Handle: sinfo.myHandle, Handle: sinfo.myHandle,

View File

@ -12,12 +12,9 @@ package yggdrasil
// A little annoying to do with constant changes from backpressure // A little annoying to do with constant changes from backpressure
import ( import (
//"math/rand"
"sync"
"time" "time"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto" "github.com/yggdrasil-network/yggdrasil-go/src/crypto"
//"github.com/yggdrasil-network/yggdrasil-go/src/util"
"github.com/Arceliar/phony" "github.com/Arceliar/phony"
) )
@ -149,6 +146,7 @@ type tableElem struct {
type lookupTable struct { type lookupTable struct {
self switchLocator self switchLocator
elems map[switchPort]tableElem elems map[switchPort]tableElem
_msg switchMsg
} }
// This is switch information which is mutable and needs to be modified by other goroutines, but is not accessed atomically. // This is switch information which is mutable and needs to be modified by other goroutines, but is not accessed atomically.
@ -168,7 +166,6 @@ type switchTable struct {
key crypto.SigPubKey // Our own key key crypto.SigPubKey // Our own key
time time.Time // Time when locator.tstamp was last updated time time.Time // Time when locator.tstamp was last updated
drop map[crypto.SigPubKey]int64 // Tstamp associated with a dropped root drop map[crypto.SigPubKey]int64 // Tstamp associated with a dropped root
mutex sync.RWMutex // Lock for reads/writes of switchData
parent switchPort // Port of whatever peer is our parent, or self if we're root parent switchPort // Port of whatever peer is our parent, or self if we're root
data switchData // data switchData //
phony.Inbox // Owns the below phony.Inbox // Owns the below
@ -208,24 +205,17 @@ func (t *switchTable) reconfigure() {
t.core.peers.reconfigure() t.core.peers.reconfigure()
} }
// Safely gets a copy of this node's locator.
func (t *switchTable) getLocator() switchLocator {
t.mutex.RLock()
defer t.mutex.RUnlock()
return t.data.locator.clone()
}
// Regular maintenance to possibly timeout/reset the root and similar. // Regular maintenance to possibly timeout/reset the root and similar.
func (t *switchTable) doMaintenance() { func (t *switchTable) doMaintenance(from phony.Actor) {
// Periodic maintenance work to keep things internally consistent t.Act(from, func() {
t.mutex.Lock() // Write lock // Periodic maintenance work to keep things internally consistent
defer t.mutex.Unlock() // Release lock when we're done t._cleanRoot()
t.cleanRoot() t._cleanDropped()
t.cleanDropped() })
} }
// Updates the root periodically if it is ourself, or promotes ourself to root if we're better than the current root or if the current root has timed out. // Updates the root periodically if it is ourself, or promotes ourself to root if we're better than the current root or if the current root has timed out.
func (t *switchTable) cleanRoot() { func (t *switchTable) _cleanRoot() {
// TODO rethink how this is done?... // TODO rethink how this is done?...
// Get rid of the root if it looks like its timed out // Get rid of the root if it looks like its timed out
now := time.Now() now := time.Now()
@ -259,49 +249,49 @@ func (t *switchTable) cleanRoot() {
} }
// Blocks and, if possible, unparents a peer // Blocks and, if possible, unparents a peer
func (t *switchTable) blockPeer(port switchPort) { func (t *switchTable) blockPeer(from phony.Actor, port switchPort) {
t.mutex.Lock() t.Act(from, func() {
defer t.mutex.Unlock() peer, isIn := t.data.peers[port]
peer, isIn := t.data.peers[port] if !isIn {
if !isIn { return
return
}
peer.blocked = true
t.data.peers[port] = peer
if port != t.parent {
return
}
t.parent = 0
for _, info := range t.data.peers {
if info.port == port {
continue
} }
t.unlockedHandleMsg(&info.msg, info.port, true) peer.blocked = true
} t.data.peers[port] = peer
t.unlockedHandleMsg(&peer.msg, peer.port, true) if port != t.parent {
return
}
t.parent = 0
for _, info := range t.data.peers {
if info.port == port {
continue
}
t._handleMsg(&info.msg, info.port, true)
}
t._handleMsg(&peer.msg, peer.port, true)
})
} }
// Removes a peer. // Removes a peer.
// Must be called by the router actor with a lambda that calls this. // Must be called by the router actor with a lambda that calls this.
// If the removed peer was this node's parent, it immediately tries to find a new parent. // If the removed peer was this node's parent, it immediately tries to find a new parent.
func (t *switchTable) forgetPeer(port switchPort) { func (t *switchTable) forgetPeer(from phony.Actor, port switchPort) {
t.mutex.Lock() t.Act(from, func() {
defer t.mutex.Unlock() delete(t.data.peers, port)
delete(t.data.peers, port) defer t._updateTable()
defer t._updateTable() if port != t.parent {
if port != t.parent { return
return }
} t.parent = 0
t.parent = 0 for _, info := range t.data.peers {
for _, info := range t.data.peers { t._handleMsg(&info.msg, info.port, true)
t.unlockedHandleMsg(&info.msg, info.port, true) }
} })
} }
// Dropped is a list of roots that are better than the current root, but stopped sending new timestamps. // Dropped is a list of roots that are better than the current root, but stopped sending new timestamps.
// If we switch to a new root, and that root is better than an old root that previously timed out, then we can clean up the old dropped root infos. // If we switch to a new root, and that root is better than an old root that previously timed out, then we can clean up the old dropped root infos.
// This function is called periodically to do that cleanup. // This function is called periodically to do that cleanup.
func (t *switchTable) cleanDropped() { func (t *switchTable) _cleanDropped() {
// TODO? only call this after root changes, not periodically // TODO? only call this after root changes, not periodically
for root := range t.drop { for root := range t.drop {
if !firstIsBetter(&root, &t.data.locator.root) { if !firstIsBetter(&root, &t.data.locator.root) {
@ -327,9 +317,7 @@ type switchMsgHop struct {
} }
// This returns a *switchMsg to a copy of this node's current switchMsg, which can safely have additional information appended to Hops and sent to a peer. // This returns a *switchMsg to a copy of this node's current switchMsg, which can safely have additional information appended to Hops and sent to a peer.
func (t *switchTable) getMsg() *switchMsg { func (t *switchTable) _getMsg() *switchMsg {
t.mutex.RLock()
defer t.mutex.RUnlock()
if t.parent == 0 { if t.parent == 0 {
return &switchMsg{Root: t.key, TStamp: t.data.locator.tstamp} return &switchMsg{Root: t.key, TStamp: t.data.locator.tstamp}
} else if parent, isIn := t.data.peers[t.parent]; isIn { } else if parent, isIn := t.data.peers[t.parent]; isIn {
@ -341,14 +329,18 @@ func (t *switchTable) getMsg() *switchMsg {
} }
} }
func (t *lookupTable) getMsg() *switchMsg {
msg := t._msg
msg.Hops = append([]switchMsgHop(nil), t._msg.Hops...)
return &msg
}
// This function checks that the root information in a switchMsg is OK. // This function checks that the root information in a switchMsg is OK.
// In particular, that the root is better, or else the same as the current root but with a good timestamp, and that this root+timestamp haven't been dropped due to timeout. // In particular, that the root is better, or else the same as the current root but with a good timestamp, and that this root+timestamp haven't been dropped due to timeout.
func (t *switchTable) checkRoot(msg *switchMsg) bool { func (t *switchTable) _checkRoot(msg *switchMsg) bool {
// returns false if it's a dropped root, not a better root, or has an older timestamp // returns false if it's a dropped root, not a better root, or has an older timestamp
// returns true otherwise // returns true otherwise
// used elsewhere to keep inserting peers into the dht only if root info is OK // used elsewhere to keep inserting peers into the dht only if root info is OK
t.mutex.RLock()
defer t.mutex.RUnlock()
dropTstamp, isIn := t.drop[msg.Root] dropTstamp, isIn := t.drop[msg.Root]
switch { switch {
case isIn && dropTstamp >= msg.TStamp: case isIn && dropTstamp >= msg.TStamp:
@ -364,20 +356,13 @@ func (t *switchTable) checkRoot(msg *switchMsg) bool {
} }
} }
// This is a mutexed wrapper to unlockedHandleMsg, and is called by the peer structs in peers.go to pass a switchMsg for that peer into the switch.
func (t *switchTable) handleMsg(msg *switchMsg, fromPort switchPort) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.unlockedHandleMsg(msg, fromPort, false)
}
// This updates the switch with information about a peer. // This updates the switch with information about a peer.
// Then the tricky part, it decides if it should update our own locator as a result. // Then the tricky part, it decides if it should update our own locator as a result.
// That happens if this node is already our parent, or is advertising a better root, or is advertising a better path to the same root, etc... // That happens if this node is already our parent, or is advertising a better root, or is advertising a better path to the same root, etc...
// There are a lot of very delicate order sensitive checks here, so its' best to just read the code if you need to understand what it's doing. // There are a lot of very delicate order sensitive checks here, so its' best to just read the code if you need to understand what it's doing.
// It's very important to not change the order of the statements in the case function unless you're absolutely sure that it's safe, including safe if used alongside nodes that used the previous order. // It's very important to not change the order of the statements in the case function unless you're absolutely sure that it's safe, including safe if used alongside nodes that used the previous order.
// Set the third arg to true if you're reprocessing an old message, e.g. to find a new parent after one disconnects, to avoid updating some timing related things. // Set the third arg to true if you're reprocessing an old message, e.g. to find a new parent after one disconnects, to avoid updating some timing related things.
func (t *switchTable) unlockedHandleMsg(msg *switchMsg, fromPort switchPort, reprocessing bool) { func (t *switchTable) _handleMsg(msg *switchMsg, fromPort switchPort, reprocessing bool) {
// TODO directly use a switchMsg instead of switchMessage + sigs // TODO directly use a switchMsg instead of switchMessage + sigs
now := time.Now() now := time.Now()
// Set up the sender peerInfo // Set up the sender peerInfo
@ -500,10 +485,10 @@ func (t *switchTable) unlockedHandleMsg(msg *switchMsg, fromPort switchPort, rep
if peer.port == sender.port { if peer.port == sender.port {
continue continue
} }
t.unlockedHandleMsg(&peer.msg, peer.port, true) t._handleMsg(&peer.msg, peer.port, true)
} }
// Process the sender last, to avoid keeping them as a parent if at all possible. // Process the sender last, to avoid keeping them as a parent if at all possible.
t.unlockedHandleMsg(&sender.msg, sender.port, true) t._handleMsg(&sender.msg, sender.port, true)
case now.Sub(t.time) < switch_throttle: case now.Sub(t.time) < switch_throttle:
// We've already gotten an update from this root recently, so ignore this one to avoid flooding. // We've already gotten an update from this root recently, so ignore this one to avoid flooding.
case sender.locator.tstamp > t.data.locator.tstamp: case sender.locator.tstamp > t.data.locator.tstamp:
@ -521,7 +506,7 @@ func (t *switchTable) unlockedHandleMsg(msg *switchMsg, fromPort switchPort, rep
} }
t.data.locator = sender.locator t.data.locator = sender.locator
t.parent = sender.port t.parent = sender.port
t.core.peers.sendSwitchMsgs(t) defer t.core.peers.sendSwitchMsgs(t)
} }
if true || doUpdate { if true || doUpdate {
defer t._updateTable() defer t._updateTable()
@ -560,7 +545,9 @@ func (t *switchTable) _updateTable() {
time: pinfo.time, time: pinfo.time,
} }
} }
t.core.peers.updateTables(nil, &newTable) // TODO not be from nil newTable._msg = *t._getMsg()
t.core.peers.updateTables(t, &newTable)
t.core.router.updateTable(t, &newTable)
} }
// Starts the switch worker // Starts the switch worker