5
0
mirror of https://github.com/cwinfo/yggdrasil-go.git synced 2024-11-29 15:31:35 +00:00

more peer migration

This commit is contained in:
Arceliar 2019-08-24 13:15:29 -05:00
parent ecd23ce9fc
commit 034fece33f
2 changed files with 21 additions and 33 deletions

View File

@ -110,9 +110,9 @@ type peer struct {
endpoint string endpoint string
firstSeen time.Time // To track uptime for getPeers firstSeen time.Time // To track uptime for getPeers
linkOut (chan []byte) // used for protocol traffic (to bypass queues) linkOut (chan []byte) // used for protocol traffic (to bypass queues)
doSend (chan struct{}) // tell the linkLoop to send a switchMsg dinfo *dhtInfo // used to keep the DHT working
dinfo (chan *dhtInfo) // used to keep the DHT working
out func([][]byte) // Set up by whatever created the peers struct, used to send packets to other nodes out func([][]byte) // Set up by whatever created the peers struct, used to send packets to other nodes
done (chan struct{}) // closed to exit the linkLoop
close func() // Called when a peer is removed, to close the underlying connection, or via admin api close func() // Called when a peer is removed, to close the underlying connection, or via admin api
} }
@ -124,8 +124,7 @@ func (ps *peers) newPeer(box *crypto.BoxPubKey, sig *crypto.SigPubKey, linkShare
shared: *crypto.GetSharedKey(&ps.core.boxPriv, box), shared: *crypto.GetSharedKey(&ps.core.boxPriv, box),
linkShared: *linkShared, linkShared: *linkShared,
firstSeen: now, firstSeen: now,
doSend: make(chan struct{}, 1), done: make(chan struct{}),
dinfo: make(chan *dhtInfo, 1),
close: closer, close: closer,
core: ps.core, core: ps.core,
intf: intf, intf: intf,
@ -170,29 +169,19 @@ func (ps *peers) removePeer(port switchPort) {
if p.close != nil { if p.close != nil {
p.close() p.close()
} }
close(p.doSend) close(p.done)
} }
} }
// If called, sends a notification to each peer that they should send a new switch message. // If called, sends a notification to each peer that they should send a new switch message.
// Mainly called by the switch after an update. // Mainly called by the switch after an update.
func (ps *peers) sendSwitchMsgs() { func (ps *peers) sendSwitchMsgs(from phony.IActor) {
ports := ps.getPorts() ports := ps.getPorts()
for _, p := range ports { for _, p := range ports {
if p.port == 0 { if p.port == 0 {
continue continue
} }
p.doSendSwitchMsgs() p.EnqueueFrom(from, p._sendSwitchMsg)
}
}
// If called, sends a notification to the peer's linkLoop to trigger a switchMsg send.
// Mainly called by sendSwitchMsgs or during linkLoop startup.
func (p *peer) doSendSwitchMsgs() {
defer func() { recover() }() // In case there's a race with close(p.doSend)
select {
case p.doSend <- struct{}{}:
default:
} }
} }
@ -201,24 +190,23 @@ func (p *peer) doSendSwitchMsgs() {
func (p *peer) linkLoop() { func (p *peer) linkLoop() {
tick := time.NewTicker(time.Second) tick := time.NewTicker(time.Second)
defer tick.Stop() defer tick.Stop()
p.doSendSwitchMsgs() <-p.SyncExec(p._sendSwitchMsg) // Startup message
var dinfo *dhtInfo
for { for {
select { select {
case _, ok := <-p.doSend: case <-p.done:
if !ok { return
return
}
<-p.SyncExec(p._sendSwitchMsg)
case dinfo = <-p.dinfo:
case _ = <-tick.C: case _ = <-tick.C:
if dinfo != nil { <-p.SyncExec(p._updateDHT)
<-p.SyncExec(func() { p.core.router.insertPeer(p, dinfo) })
}
} }
} }
} }
func (p *peer) _updateDHT() {
if p.dinfo != nil {
p.core.router.insertPeer(p, p.dinfo)
}
}
func (p *peer) handlePacketFrom(from phony.IActor, packet []byte) { func (p *peer) handlePacketFrom(from phony.IActor, packet []byte) {
p.EnqueueFrom(from, func() { p.EnqueueFrom(from, func() {
p._handlePacket(packet) p._handlePacket(packet)
@ -366,16 +354,16 @@ func (p *peer) _handleSwitchMsg(packet []byte) {
p.core.switchTable.handleMsg(&msg, p.port) p.core.switchTable.handleMsg(&msg, p.port)
if !p.core.switchTable.checkRoot(&msg) { if !p.core.switchTable.checkRoot(&msg) {
// Bad switch message // Bad switch message
p.dinfo <- nil p.dinfo = nil
return return
} }
// Pass a mesage to the dht informing it that this peer (still) exists // Pass a mesage to the dht informing it that this peer (still) exists
loc.coords = loc.coords[:len(loc.coords)-1] loc.coords = loc.coords[:len(loc.coords)-1]
dinfo := dhtInfo{ p.dinfo = &dhtInfo{
key: p.box, key: p.box,
coords: loc.getCoords(), coords: loc.getCoords(),
} }
p.dinfo <- &dinfo p._updateDHT()
} }
// This generates the bytes that we sign or check the signature of for a switchMsg. // This generates the bytes that we sign or check the signature of for a switchMsg.

View File

@ -248,7 +248,7 @@ func (t *switchTable) cleanRoot() {
t.core.router.reset(nil) t.core.router.reset(nil)
} }
t.data.locator = switchLocator{root: t.key, tstamp: now.Unix()} t.data.locator = switchLocator{root: t.key, tstamp: now.Unix()}
t.core.peers.sendSwitchMsgs() t.core.peers.sendSwitchMsgs(nil) // TODO update if/when the switch becomes an actor
} }
} }
@ -515,7 +515,7 @@ func (t *switchTable) unlockedHandleMsg(msg *switchMsg, fromPort switchPort, rep
} }
t.data.locator = sender.locator t.data.locator = sender.locator
t.parent = sender.port t.parent = sender.port
t.core.peers.sendSwitchMsgs() t.core.peers.sendSwitchMsgs(nil) // TODO update if/when the switch becomes an actor
} }
if doUpdate { if doUpdate {
t.updater.Store(&sync.Once{}) t.updater.Store(&sync.Once{})