2017-12-29 04:16:20 +00:00
package yggdrasil
// This part constructs a spanning tree of the network
// It routes packets based on distance on the spanning tree
// In general, this is *not* equivalent to routing on the tree
// It falls back to the tree in the worst case, but it can take shortcuts too
2019-01-09 09:49:12 +00:00
// This is the part that makes routing reasonably efficient on scale-free graphs
2017-12-29 04:16:20 +00:00
// TODO document/comment everything in a lot more detail
// TODO? use a pre-computed lookup table (python version had this)
2018-06-07 19:24:02 +00:00
// A little annoying to do with constant changes from backpressure
2017-12-29 04:16:20 +00:00
2018-06-12 22:50:08 +00:00
import (
2018-07-06 05:11:36 +00:00
"math/rand"
2018-06-12 22:50:08 +00:00
"sync"
"sync/atomic"
"time"
2018-12-15 02:49:18 +00:00
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/yggdrasil-network/yggdrasil-go/src/util"
2019-08-24 19:56:33 +00:00
"github.com/Arceliar/phony"
2018-06-12 22:50:08 +00:00
)
2017-12-29 04:16:20 +00:00
2018-12-02 22:36:25 +00:00
const (
switch_timeout = time . Minute
switch_updateInterval = switch_timeout / 2
switch_throttle = switch_updateInterval / 2
switch_faster_threshold = 240 //Number of switch updates before switching to a faster parent
)
2017-12-29 04:16:20 +00:00
2018-06-10 23:03:28 +00:00
// The switch locator represents the topology and network state dependent info about a node, minus the signatures that go with it.
// Nodes will pick the best root they see, provided that the root continues to push out updates with new timestamps.
// The coords represent a path from the root to a node.
// This path is generally part of a spanning tree, except possibly the last hop (it can loop when sending coords to your parent, but they see this and know not to use a looping path).
2017-12-29 04:16:20 +00:00
type switchLocator struct {
2018-12-15 02:49:18 +00:00
root crypto . SigPubKey
2018-01-04 22:37:51 +00:00
tstamp int64
coords [ ] switchPort
2017-12-29 04:16:20 +00:00
}
2018-06-10 23:03:28 +00:00
// Returns true if the first sigPubKey has a higher TreeID.
2018-12-15 02:49:18 +00:00
func firstIsBetter ( first , second * crypto . SigPubKey ) bool {
2018-01-04 22:37:51 +00:00
// Higher TreeID is better
2018-12-15 02:49:18 +00:00
ftid := crypto . GetTreeID ( first )
stid := crypto . GetTreeID ( second )
2018-01-04 22:37:51 +00:00
for idx := 0 ; idx < len ( ftid ) ; idx ++ {
if ftid [ idx ] == stid [ idx ] {
continue
}
return ftid [ idx ] > stid [ idx ]
}
// Edge case, when comparing identical IDs
return false
2017-12-29 04:16:20 +00:00
}
2018-06-10 23:03:28 +00:00
// Returns a copy of the locator which can safely be mutated.
2017-12-29 04:16:20 +00:00
func ( l * switchLocator ) clone ( ) switchLocator {
2018-01-04 22:37:51 +00:00
// Used to create a deep copy for use in messages
// Copy required because we need to mutate coords before sending
// (By appending the port from us to the destination)
loc := * l
loc . coords = make ( [ ] switchPort , len ( l . coords ) , len ( l . coords ) + 1 )
copy ( loc . coords , l . coords )
return loc
2017-12-29 04:16:20 +00:00
}
2018-06-10 23:03:28 +00:00
// Gets the distance a locator is from the provided destination coords, with the coords provided in []byte format (used to compress integers sent over the wire).
2017-12-29 04:16:20 +00:00
func ( l * switchLocator ) dist ( dest [ ] byte ) int {
2018-01-04 22:37:51 +00:00
// Returns distance (on the tree) from these coords
offset := 0
fdc := 0
for {
if fdc >= len ( l . coords ) {
break
}
coord , length := wire_decode_uint64 ( dest [ offset : ] )
if length == 0 {
break
}
if l . coords [ fdc ] != switchPort ( coord ) {
break
}
fdc ++
offset += length
}
dist := len ( l . coords [ fdc : ] )
for {
_ , length := wire_decode_uint64 ( dest [ offset : ] )
if length == 0 {
break
}
dist ++
offset += length
}
return dist
2017-12-29 04:16:20 +00:00
}
2018-06-10 23:03:28 +00:00
// Gets coords in wire encoded format, with *no* length prefix.
2017-12-29 04:16:20 +00:00
func ( l * switchLocator ) getCoords ( ) [ ] byte {
2018-01-04 22:37:51 +00:00
bs := make ( [ ] byte , 0 , len ( l . coords ) )
for _ , coord := range l . coords {
c := wire_encode_uint64 ( uint64 ( coord ) )
bs = append ( bs , c ... )
}
return bs
2017-12-29 04:16:20 +00:00
}
2018-06-10 23:03:28 +00:00
// Returns true if the this locator represents an ancestor of the locator given as an argument.
// Ancestor means that it's the parent node, or the parent of parent, and so on...
2017-12-29 04:16:20 +00:00
func ( x * switchLocator ) isAncestorOf ( y * switchLocator ) bool {
2018-01-04 22:37:51 +00:00
if x . root != y . root {
return false
}
if len ( x . coords ) > len ( y . coords ) {
return false
}
for idx := range x . coords {
if x . coords [ idx ] != y . coords [ idx ] {
return false
}
}
return true
2017-12-29 04:16:20 +00:00
}
2018-06-10 23:03:28 +00:00
// Information about a peer, used by the switch to build the tree and eventually make routing decisions.
2017-12-29 04:16:20 +00:00
type peerInfo struct {
2018-12-15 02:49:18 +00:00
key crypto . SigPubKey // ID of this peer
2018-12-02 22:36:25 +00:00
locator switchLocator // Should be able to respond with signatures upon request
degree uint64 // Self-reported degree
time time . Time // Time this node was last seen
faster map [ switchPort ] uint64 // Counter of how often a node is faster than the current parent, penalized extra if slower
port switchPort // Interface number of this peer
msg switchMsg // The wire switchMsg used
2019-03-08 03:36:12 +00:00
blocked bool // True if the link is blocked, used to avoid parenting a blocked link
2017-12-29 04:16:20 +00:00
}
2018-06-10 23:03:28 +00:00
// This is just a uint64 with a named type for clarity reasons.
2017-12-29 04:16:20 +00:00
type switchPort uint64
2018-06-10 23:03:28 +00:00
// This is the subset of the information about a peer needed to make routing decisions, and it stored separately in an atomically accessed table, which gets hammered in the "hot loop" of the routing logic (see: peer.handleTraffic in peers.go).
2017-12-29 04:16:20 +00:00
type tableElem struct {
2018-05-16 22:48:53 +00:00
port switchPort
2018-05-16 04:57:00 +00:00
locator switchLocator
2017-12-29 04:16:20 +00:00
}
2018-06-10 23:03:28 +00:00
// This is the subset of the information about all peers needed to make routing decisions, and it stored separately in an atomically accessed table, which gets hammered in the "hot loop" of the routing logic (see: peer.handleTraffic in peers.go).
2017-12-29 04:16:20 +00:00
type lookupTable struct {
2018-01-04 22:37:51 +00:00
self switchLocator
2018-06-24 01:59:26 +00:00
elems map [ switchPort ] tableElem
2017-12-29 04:16:20 +00:00
}
2018-06-10 23:03:28 +00:00
// This is switch information which is mutable and needs to be modified by other goroutines, but is not accessed atomically.
// Use the switchTable functions to access it safely using the RWMutex for synchronization.
2017-12-29 04:16:20 +00:00
type switchData struct {
2018-01-04 22:37:51 +00:00
// All data that's mutable and used by exported Table methods
// To be read/written with atomic.Value Store/Load calls
locator switchLocator
seq uint64 // Sequence number, reported to peers, so they know about changes
peers map [ switchPort ] peerInfo
2018-06-07 18:56:11 +00:00
msg * switchMsg
2017-12-29 04:16:20 +00:00
}
2018-06-10 23:03:28 +00:00
// All the information stored by the switch.
2017-12-29 04:16:20 +00:00
type switchTable struct {
2019-08-24 20:22:46 +00:00
core * Core
key crypto . SigPubKey // Our own key
time time . Time // Time when locator.tstamp was last updated
drop map [ crypto . SigPubKey ] int64 // Tstamp associated with a dropped root
mutex sync . RWMutex // Lock for reads/writes of switchData
parent switchPort // Port of whatever peer is our parent, or self if we're root
data switchData //
updater atomic . Value // *sync.Once
table atomic . Value // lookupTable
2019-08-25 15:36:09 +00:00
phony . Inbox // Owns the below
2019-08-24 20:22:46 +00:00
queues switch_buffers // Queues - not atomic so ONLY use through the actor
idle map [ switchPort ] time . Time // idle peers - not atomic so ONLY use through the actor
2017-12-29 04:16:20 +00:00
}
2018-12-02 23:20:11 +00:00
// Minimum allowed total size of switch queues.
const SwitchQueueTotalMinSize = 4 * 1024 * 1024
2018-06-10 23:03:28 +00:00
// Initializes the switchTable struct.
2018-12-29 19:14:26 +00:00
func ( t * switchTable ) init ( core * Core ) {
2018-01-04 22:37:51 +00:00
now := time . Now ( )
t . core = core
2018-12-29 19:14:26 +00:00
t . key = t . core . sigPub
locator := switchLocator { root : t . key , tstamp : now . Unix ( ) }
2018-01-04 22:37:51 +00:00
peers := make ( map [ switchPort ] peerInfo )
t . data = switchData { locator : locator , peers : peers }
t . updater . Store ( & sync . Once { } )
2018-01-19 00:48:34 +00:00
t . table . Store ( lookupTable { } )
2018-12-15 02:49:18 +00:00
t . drop = make ( map [ crypto . SigPubKey ] int64 )
2019-08-28 00:43:54 +00:00
phony . Block ( t , func ( ) {
2019-08-24 20:22:46 +00:00
t . queues . totalMaxSize = SwitchQueueTotalMinSize
t . queues . bufs = make ( map [ string ] switch_buffer )
t . idle = make ( map [ switchPort ] time . Time )
} )
2018-05-27 21:13:37 +00:00
}
2019-08-25 17:10:59 +00:00
func ( t * switchTable ) reconfigure ( e chan error ) {
2019-08-25 22:00:02 +00:00
defer close ( e )
// This is where reconfiguration would go, if we had anything useful to do.
2019-08-25 17:10:59 +00:00
}
2018-06-10 23:03:28 +00:00
// Safely gets a copy of this node's locator.
2017-12-29 04:16:20 +00:00
func ( t * switchTable ) getLocator ( ) switchLocator {
2018-01-04 22:37:51 +00:00
t . mutex . RLock ( )
defer t . mutex . RUnlock ( )
return t . data . locator . clone ( )
2017-12-29 04:16:20 +00:00
}
2018-06-10 23:03:28 +00:00
// Regular maintenance to possibly timeout/reset the root and similar.
2018-06-07 04:10:33 +00:00
func ( t * switchTable ) doMaintenance ( ) {
2018-01-04 22:37:51 +00:00
// Periodic maintenance work to keep things internally consistent
t . mutex . Lock ( ) // Write lock
defer t . mutex . Unlock ( ) // Release lock when we're done
t . cleanRoot ( )
t . cleanDropped ( )
2017-12-29 04:16:20 +00:00
}
2018-06-10 23:03:28 +00:00
// Updates the root periodically if it is ourself, or promotes ourself to root if we're better than the current root or if the current root has timed out.
2017-12-29 04:16:20 +00:00
func ( t * switchTable ) cleanRoot ( ) {
2018-01-04 22:37:51 +00:00
// TODO rethink how this is done?...
// Get rid of the root if it looks like its timed out
now := time . Now ( )
doUpdate := false
if now . Sub ( t . time ) > switch_timeout {
dropped := t . data . peers [ t . parent ]
dropped . time = t . time
t . drop [ t . data . locator . root ] = t . data . locator . tstamp
doUpdate = true
}
// Or, if we're better than our root, root ourself
if firstIsBetter ( & t . key , & t . data . locator . root ) {
doUpdate = true
}
// Or, if we are the root, possibly update our timestamp
if t . data . locator . root == t . key &&
2018-02-18 05:57:24 +00:00
now . Sub ( t . time ) > switch_updateInterval {
2018-01-04 22:37:51 +00:00
doUpdate = true
}
if doUpdate {
t . parent = switchPort ( 0 )
t . time = now
if t . data . locator . root != t . key {
t . data . seq ++
t . updater . Store ( & sync . Once { } )
2019-08-24 03:23:01 +00:00
t . core . router . reset ( nil )
2018-01-04 22:37:51 +00:00
}
t . data . locator = switchLocator { root : t . key , tstamp : now . Unix ( ) }
2019-08-26 04:07:56 +00:00
t . core . peers . sendSwitchMsgs ( t )
2018-01-04 22:37:51 +00:00
}
2017-12-29 04:16:20 +00:00
}
2019-03-08 03:36:12 +00:00
// Blocks and, if possible, unparents a peer
func ( t * switchTable ) blockPeer ( port switchPort ) {
t . mutex . Lock ( )
defer t . mutex . Unlock ( )
peer , isIn := t . data . peers [ port ]
if ! isIn {
return
}
peer . blocked = true
t . data . peers [ port ] = peer
if port != t . parent {
return
}
t . parent = 0
for _ , info := range t . data . peers {
if info . port == port {
continue
}
t . unlockedHandleMsg ( & info . msg , info . port , true )
}
t . unlockedHandleMsg ( & peer . msg , peer . port , true )
}
2018-06-10 23:03:28 +00:00
// Removes a peer.
// Must be called by the router mainLoop goroutine, e.g. call router.doAdmin with a lambda that calls this.
// If the removed peer was this node's parent, it immediately tries to find a new parent.
2018-10-08 21:09:55 +00:00
func ( t * switchTable ) forgetPeer ( port switchPort ) {
t . mutex . Lock ( )
defer t . mutex . Unlock ( )
2018-06-07 04:23:16 +00:00
delete ( t . data . peers , port )
t . updater . Store ( & sync . Once { } )
2018-06-10 23:03:28 +00:00
if port != t . parent {
return
}
2018-12-06 00:22:39 +00:00
t . parent = 0
2018-06-07 21:49:51 +00:00
for _ , info := range t . data . peers {
2018-12-02 20:46:58 +00:00
t . unlockedHandleMsg ( & info . msg , info . port , true )
2018-06-07 21:49:51 +00:00
}
2017-12-29 04:16:20 +00:00
}
2018-06-10 23:03:28 +00:00
// Dropped is a list of roots that are better than the current root, but stopped sending new timestamps.
// If we switch to a new root, and that root is better than an old root that previously timed out, then we can clean up the old dropped root infos.
// This function is called periodically to do that cleanup.
2017-12-29 04:16:20 +00:00
func ( t * switchTable ) cleanDropped ( ) {
2018-01-26 23:30:51 +00:00
// TODO? only call this after root changes, not periodically
2018-01-04 22:37:51 +00:00
for root := range t . drop {
if ! firstIsBetter ( & root , & t . data . locator . root ) {
delete ( t . drop , root )
}
}
2017-12-29 04:16:20 +00:00
}
2018-06-10 23:03:28 +00:00
// A switchMsg contains the root node's sig key, timestamp, and signed per-hop information about a path from the root node to some other node in the network.
// This is exchanged with peers to construct the spanning tree.
// A subset of this information, excluding the signatures, is used to construct locators that are used elsewhere in the code.
2018-06-07 18:56:11 +00:00
type switchMsg struct {
2018-12-15 02:49:18 +00:00
Root crypto . SigPubKey
2018-06-07 18:56:11 +00:00
TStamp int64
Hops [ ] switchMsgHop
}
2018-06-10 23:03:28 +00:00
// This represents the signed information about the path leading from the root the Next node, via the Port specified here.
2018-06-07 18:56:11 +00:00
type switchMsgHop struct {
Port switchPort
2018-12-15 02:49:18 +00:00
Next crypto . SigPubKey
Sig crypto . SigBytes
2018-06-07 18:56:11 +00:00
}
2018-06-10 23:03:28 +00:00
// This returns a *switchMsg to a copy of this node's current switchMsg, which can safely have additional information appended to Hops and sent to a peer.
2018-06-07 18:56:11 +00:00
func ( t * switchTable ) getMsg ( ) * switchMsg {
t . mutex . RLock ( )
defer t . mutex . RUnlock ( )
if t . parent == 0 {
return & switchMsg { Root : t . key , TStamp : t . data . locator . tstamp }
} else if parent , isIn := t . data . peers [ t . parent ] ; isIn {
2018-06-07 19:13:31 +00:00
msg := parent . msg
2018-06-07 18:56:11 +00:00
msg . Hops = append ( [ ] switchMsgHop ( nil ) , msg . Hops ... )
return & msg
} else {
return nil
}
}
2018-06-10 23:03:28 +00:00
// This function checks that the root information in a switchMsg is OK.
// In particular, that the root is better, or else the same as the current root but with a good timestamp, and that this root+timestamp haven't been dropped due to timeout.
2018-06-08 22:33:16 +00:00
func ( t * switchTable ) checkRoot ( msg * switchMsg ) bool {
// returns false if it's a dropped root, not a better root, or has an older timestamp
// returns true otherwise
// used elsewhere to keep inserting peers into the dht only if root info is OK
t . mutex . RLock ( )
defer t . mutex . RUnlock ( )
dropTstamp , isIn := t . drop [ msg . Root ]
switch {
case isIn && dropTstamp >= msg . TStamp :
return false
case firstIsBetter ( & msg . Root , & t . data . locator . root ) :
return true
case t . data . locator . root != msg . Root :
return false
case t . data . locator . tstamp > msg . TStamp :
return false
default :
return true
}
}
2018-06-10 23:03:28 +00:00
// This is a mutexed wrapper to unlockedHandleMsg, and is called by the peer structs in peers.go to pass a switchMsg for that peer into the switch.
2018-06-07 19:13:31 +00:00
func ( t * switchTable ) handleMsg ( msg * switchMsg , fromPort switchPort ) {
2018-01-04 22:37:51 +00:00
t . mutex . Lock ( )
defer t . mutex . Unlock ( )
2018-12-02 20:46:58 +00:00
t . unlockedHandleMsg ( msg , fromPort , false )
2018-06-07 21:49:51 +00:00
}
2018-06-10 23:03:28 +00:00
// This updates the switch with information about a peer.
// Then the tricky part, it decides if it should update our own locator as a result.
// That happens if this node is already our parent, or is advertising a better root, or is advertising a better path to the same root, etc...
// There are a lot of very delicate order sensitive checks here, so its' best to just read the code if you need to understand what it's doing.
// It's very important to not change the order of the statements in the case function unless you're absolutely sure that it's safe, including safe if used along side nodes that used the previous order.
2018-12-02 20:46:58 +00:00
// Set the third arg to true if you're reprocessing an old message, e.g. to find a new parent after one disconnects, to avoid updating some timing related things.
func ( t * switchTable ) unlockedHandleMsg ( msg * switchMsg , fromPort switchPort , reprocessing bool ) {
2018-06-07 21:49:51 +00:00
// TODO directly use a switchMsg instead of switchMessage + sigs
2018-01-04 22:37:51 +00:00
now := time . Now ( )
2018-06-07 19:13:31 +00:00
// Set up the sender peerInfo
var sender peerInfo
sender . locator . root = msg . Root
sender . locator . tstamp = msg . TStamp
prevKey := msg . Root
for _ , hop := range msg . Hops {
2018-06-10 23:03:28 +00:00
// Build locator
2018-06-07 19:13:31 +00:00
sender . locator . coords = append ( sender . locator . coords , hop . Port )
sender . key = prevKey
2018-06-07 18:56:11 +00:00
prevKey = hop . Next
}
2018-06-07 19:13:31 +00:00
sender . msg = * msg
sender . port = fromPort
sender . time = now
// Decide what to do
2018-01-04 22:37:51 +00:00
equiv := func ( x * switchLocator , y * switchLocator ) bool {
if x . root != y . root {
return false
}
if len ( x . coords ) != len ( y . coords ) {
return false
}
for idx := range x . coords {
if x . coords [ idx ] != y . coords [ idx ] {
return false
}
}
return true
}
doUpdate := false
2018-12-02 22:36:25 +00:00
oldSender := t . data . peers [ fromPort ]
2018-06-07 19:13:31 +00:00
if ! equiv ( & sender . locator , & oldSender . locator ) {
2018-12-04 01:21:23 +00:00
// Reset faster info, we'll start refilling it right after this
sender . faster = nil
2018-01-04 22:37:51 +00:00
doUpdate = true
}
2018-12-02 22:36:25 +00:00
// Update the matrix of peer "faster" thresholds
if reprocessing {
sender . faster = oldSender . faster
2019-02-09 21:30:17 +00:00
sender . time = oldSender . time
2019-03-08 03:36:12 +00:00
sender . blocked = oldSender . blocked
2018-12-02 22:36:25 +00:00
} else {
sender . faster = make ( map [ switchPort ] uint64 , len ( oldSender . faster ) )
for port , peer := range t . data . peers {
if port == fromPort {
continue
2018-12-04 01:21:23 +00:00
} else if sender . locator . root != peer . locator . root || sender . locator . tstamp > peer . locator . tstamp {
2018-12-02 22:36:25 +00:00
// We were faster than this node, so increment, as long as we don't overflow because of it
if oldSender . faster [ peer . port ] < switch_faster_threshold {
sender . faster [ port ] = oldSender . faster [ peer . port ] + 1
} else {
sender . faster [ port ] = switch_faster_threshold
}
2018-12-04 01:21:23 +00:00
} else {
// Slower than this node, penalize (more than the reward amount)
if oldSender . faster [ port ] > 1 {
sender . faster [ port ] = oldSender . faster [ peer . port ] - 2
} else {
sender . faster [ port ] = 0
}
2018-12-02 22:36:25 +00:00
}
2018-12-02 20:46:58 +00:00
}
}
// Update sender
2018-01-04 22:37:51 +00:00
t . data . peers [ fromPort ] = sender
2018-12-02 20:46:58 +00:00
// Decide if we should also update our root info to make the sender our parent
2018-01-04 22:37:51 +00:00
updateRoot := false
oldParent , isIn := t . data . peers [ t . parent ]
noParent := ! isIn
noLoop := func ( ) bool {
2018-06-07 19:13:31 +00:00
for idx := 0 ; idx < len ( msg . Hops ) - 1 ; idx ++ {
if msg . Hops [ idx ] . Next == t . core . sigPub {
2018-01-04 22:37:51 +00:00
return false
}
}
2018-06-07 19:13:31 +00:00
if sender . locator . root == t . core . sigPub {
2018-01-04 22:37:51 +00:00
return false
}
return true
} ( )
2018-06-07 19:13:31 +00:00
dropTstamp , isIn := t . drop [ sender . locator . root ]
2018-12-02 20:46:58 +00:00
// Decide if we need to update info about the root or change parents.
2018-01-04 22:37:51 +00:00
switch {
2018-11-20 03:30:52 +00:00
case ! noLoop :
// This route loops, so we can't use the sender as our parent.
case isIn && dropTstamp >= sender . locator . tstamp :
// This is a known root with a timestamp older than a known timeout, so we can't trust it to be a new announcement.
2018-06-07 19:13:31 +00:00
case firstIsBetter ( & sender . locator . root , & t . data . locator . root ) :
2018-11-20 03:30:52 +00:00
// This is a better root than what we're currently using, so we should update.
2018-01-04 22:37:51 +00:00
updateRoot = true
2018-11-20 03:30:52 +00:00
case t . data . locator . root != sender . locator . root :
// This is not the same root, and it's apparently not better (from the above), so we should ignore it.
case t . data . locator . tstamp > sender . locator . tstamp :
// This timetsamp is older than the most recently seen one from this root, so we should ignore it.
2018-01-04 22:37:51 +00:00
case noParent :
2018-11-20 03:30:52 +00:00
// We currently have no working parent, and at this point in the switch statement, anything is better than nothing.
2018-01-04 22:37:51 +00:00
updateRoot = true
2018-12-02 22:36:25 +00:00
case sender . faster [ t . parent ] >= switch_faster_threshold :
2018-12-02 20:46:58 +00:00
// The is reliably faster than the current parent.
updateRoot = true
2019-03-08 03:36:12 +00:00
case ! sender . blocked && oldParent . blocked :
// Replace a blocked parent
updateRoot = true
case reprocessing && sender . blocked && ! oldParent . blocked :
// Don't replace an unblocked parent when reprocessing
2018-12-02 22:36:25 +00:00
case reprocessing && sender . faster [ t . parent ] > oldParent . faster [ sender . port ] :
// The sender seems to be reliably faster than the current parent, so switch to them instead.
2018-01-04 22:37:51 +00:00
updateRoot = true
2018-11-20 03:30:52 +00:00
case sender . port != t . parent :
// Ignore further cases if the sender isn't our parent.
2018-12-02 22:36:25 +00:00
case ! reprocessing && ! equiv ( & sender . locator , & t . data . locator ) :
2018-11-20 03:30:52 +00:00
// Special case:
2018-12-02 22:36:25 +00:00
// If coords changed, then we need to penalize this node somehow, to prevent flapping.
// First, reset all faster-related info to 0.
// Then, de-parent the node and reprocess all messages to find a new parent.
2018-06-14 17:32:18 +00:00
t . parent = 0
2018-12-02 22:36:25 +00:00
for _ , peer := range t . data . peers {
if peer . port == sender . port {
continue
}
t . unlockedHandleMsg ( & peer . msg , peer . port , true )
2018-06-14 17:32:18 +00:00
}
2018-12-02 22:36:25 +00:00
// Process the sender last, to avoid keeping them as a parent if at all possible.
t . unlockedHandleMsg ( & sender . msg , sender . port , true )
2018-11-20 03:30:52 +00:00
case now . Sub ( t . time ) < switch_throttle :
// We've already gotten an update from this root recently, so ignore this one to avoid flooding.
2018-06-07 19:13:31 +00:00
case sender . locator . tstamp > t . data . locator . tstamp :
2018-11-20 03:30:52 +00:00
// The timestamp was updated, so we need to update locally and send to our peers.
2018-01-04 22:37:51 +00:00
updateRoot = true
}
if updateRoot {
2018-06-07 19:13:31 +00:00
if ! equiv ( & sender . locator , & t . data . locator ) {
2018-01-04 22:37:51 +00:00
doUpdate = true
t . data . seq ++
2019-08-24 03:23:01 +00:00
t . core . router . reset ( nil )
2018-01-04 22:37:51 +00:00
}
2018-06-07 19:13:31 +00:00
if t . data . locator . tstamp != sender . locator . tstamp {
2018-01-04 22:37:51 +00:00
t . time = now
}
2018-06-07 19:13:31 +00:00
t . data . locator = sender . locator
2018-01-04 22:37:51 +00:00
t . parent = sender . port
2019-08-26 04:07:56 +00:00
t . core . peers . sendSwitchMsgs ( t )
2018-01-04 22:37:51 +00:00
}
if doUpdate {
t . updater . Store ( & sync . Once { } )
}
return
2017-12-29 04:16:20 +00:00
}
2018-06-24 00:08:32 +00:00
////////////////////////////////////////////////////////////////////////////////
// The rest of these are related to the switch worker
2018-06-10 23:03:28 +00:00
// This is called via a sync.Once to update the atomically readable subset of switch information that gets used for routing decisions.
2017-12-29 04:16:20 +00:00
func ( t * switchTable ) updateTable ( ) {
2018-01-04 22:37:51 +00:00
// WARNING this should only be called from within t.data.updater.Do()
// It relies on the sync.Once for synchronization with messages and lookups
// TODO use a pre-computed faster lookup table
// Instead of checking distance for every destination every time
// Array of structs, indexed by first coord that differs from self
// Each struct has stores the best port to forward to, and a next coord map
// Move to struct, then iterate over coord maps until you dead end
// The last port before the dead end should be the closest
t . mutex . RLock ( )
defer t . mutex . RUnlock ( )
newTable := lookupTable {
self : t . data . locator . clone ( ) ,
2018-06-24 01:59:26 +00:00
elems : make ( map [ switchPort ] tableElem , len ( t . data . peers ) ) ,
2018-01-04 22:37:51 +00:00
}
for _ , pinfo := range t . data . peers {
//if !pinfo.forward { continue }
2018-02-18 05:14:23 +00:00
if pinfo . locator . root != newTable . self . root {
continue
}
2018-01-04 22:37:51 +00:00
loc := pinfo . locator . clone ( )
loc . coords = loc . coords [ : len ( loc . coords ) - 1 ] // Remove the them->self link
2018-06-24 01:59:26 +00:00
newTable . elems [ pinfo . port ] = tableElem {
2018-01-04 22:37:51 +00:00
locator : loc ,
2018-05-16 22:48:53 +00:00
port : pinfo . port ,
2018-06-24 01:59:26 +00:00
}
2018-01-04 22:37:51 +00:00
}
t . table . Store ( newTable )
2017-12-29 04:16:20 +00:00
}
2018-06-24 01:59:26 +00:00
// Returns a copy of the atomically-updated table used for switch lookups
func ( t * switchTable ) getTable ( ) lookupTable {
t . updater . Load ( ) . ( * sync . Once ) . Do ( t . updateTable )
return t . table . Load ( ) . ( lookupTable )
}
2018-06-24 00:08:32 +00:00
// Starts the switch worker
func ( t * switchTable ) start ( ) error {
2019-01-27 13:31:43 +00:00
t . core . log . Infoln ( "Starting switch" )
2019-08-25 17:10:59 +00:00
// There's actually nothing to do to start it...
2018-06-24 00:08:32 +00:00
return nil
}
2019-05-16 23:10:47 +00:00
type closerInfo struct {
port switchPort
dist int
}
2019-02-09 01:46:11 +00:00
// Return a map of ports onto distance, keeping only ports closer to the destination than this node
// If the map is empty (or nil), then no peer is closer
2019-05-16 23:10:47 +00:00
func ( t * switchTable ) getCloser ( dest [ ] byte ) [ ] closerInfo {
2018-06-24 01:59:26 +00:00
table := t . getTable ( )
myDist := table . self . dist ( dest )
if myDist == 0 {
// Skip the iteration step if it's impossible to be closer
2019-02-09 01:46:11 +00:00
return nil
2018-06-24 01:59:26 +00:00
}
2019-05-16 23:10:47 +00:00
t . queues . closer = t . queues . closer [ : 0 ]
2018-06-24 01:59:26 +00:00
for _ , info := range table . elems {
dist := info . locator . dist ( dest )
if dist < myDist {
2019-05-16 23:10:47 +00:00
t . queues . closer = append ( t . queues . closer , closerInfo { info . port , dist } )
2018-06-24 01:59:26 +00:00
}
}
2019-05-16 23:10:47 +00:00
return t . queues . closer
2018-06-24 01:59:26 +00:00
}
// Returns true if the peer is closer to the destination than ourself
func ( t * switchTable ) portIsCloser ( dest [ ] byte , port switchPort ) bool {
table := t . getTable ( )
if info , isIn := table . elems [ port ] ; isIn {
theirDist := info . locator . dist ( dest )
myDist := table . self . dist ( dest )
return theirDist < myDist
} else {
return false
}
}
2018-06-24 22:39:43 +00:00
// Get the coords of a packet without decoding
func switch_getPacketCoords ( packet [ ] byte ) [ ] byte {
_ , pTypeLen := wire_decode_uint64 ( packet )
coords , _ := wire_decode_coords ( packet [ pTypeLen : ] )
return coords
}
// Returns a unique string for each stream of traffic
2018-07-21 23:59:29 +00:00
// Equal to coords
// The sender may append arbitrary info to the end of coords (as long as it's begins with a 0x00) to designate separate traffic streams
// Currently, it's the IPv6 next header type and the first 2 uint16 of the next header
// This is equivalent to the TCP/UDP protocol numbers and the source / dest ports
// TODO figure out if something else would make more sense (other transport protocols?)
2018-06-24 22:39:43 +00:00
func switch_getPacketStreamID ( packet [ ] byte ) string {
2018-07-21 23:59:29 +00:00
return string ( switch_getPacketCoords ( packet ) )
2018-06-24 22:39:43 +00:00
}
2019-08-15 09:54:04 +00:00
// Returns the flowlabel from a given set of coords
func switch_getFlowLabelFromCoords ( in [ ] byte ) [ ] byte {
for i , v := range in {
if v == 0 {
return in [ i + 1 : ]
}
}
return [ ] byte { }
}
2018-09-27 14:05:45 +00:00
// Find the best port for a given set of coords
func ( t * switchTable ) bestPortForCoords ( coords [ ] byte ) switchPort {
table := t . getTable ( )
var best switchPort
bestDist := table . self . dist ( coords )
for to , elem := range table . elems {
dist := elem . locator . dist ( coords )
if ! ( dist < bestDist ) {
continue
}
best = to
bestDist = dist
}
return best
}
2018-06-24 01:59:26 +00:00
// Handle an incoming packet
// Either send it to ourself, or to the first idle peer that's free
2018-06-24 02:51:32 +00:00
// Returns true if the packet has been handled somehow, false if it should be queued
2019-08-24 19:56:33 +00:00
func ( t * switchTable ) _handleIn ( packet [ ] byte , idle map [ switchPort ] time . Time ) bool {
2018-06-24 22:39:43 +00:00
coords := switch_getPacketCoords ( packet )
2019-02-09 01:46:11 +00:00
closer := t . getCloser ( coords )
if len ( closer ) == 0 {
2018-06-24 04:55:27 +00:00
// TODO? call the router directly, and remove the whole concept of a self peer?
2019-08-24 19:56:33 +00:00
self := t . core . peers . getPorts ( ) [ 0 ]
self . sendPacketsFrom ( t , [ ] [ ] byte { packet } )
2018-06-24 01:59:26 +00:00
return true
}
2018-06-24 04:33:03 +00:00
var best * peer
2019-02-09 01:46:11 +00:00
var bestDist int
2019-03-10 01:27:52 +00:00
var bestTime time . Time
2019-03-01 01:08:56 +00:00
ports := t . core . peers . getPorts ( )
2019-05-16 23:10:47 +00:00
for _ , cinfo := range closer {
to := ports [ cinfo . port ]
thisTime , isIdle := idle [ cinfo . port ]
2019-02-09 23:44:25 +00:00
var update bool
2019-02-09 01:46:11 +00:00
switch {
2019-02-09 23:44:25 +00:00
case to == nil :
2019-08-15 09:54:04 +00:00
// no port was found, ignore it
2019-02-09 23:44:25 +00:00
case ! isIdle :
2019-08-15 09:54:04 +00:00
// the port is busy, ignore it
2019-02-09 23:44:25 +00:00
case best == nil :
2019-08-15 09:54:04 +00:00
// this is the first idle port we've found, so select it until we find a
// better candidate port to use instead
2019-02-09 23:44:25 +00:00
update = true
2019-05-16 23:10:47 +00:00
case cinfo . dist < bestDist :
2019-08-15 09:54:04 +00:00
// the port takes a shorter path/is more direct than our current
// candidate, so select that instead
2019-02-09 23:44:25 +00:00
update = true
2019-05-16 23:10:47 +00:00
case cinfo . dist > bestDist :
2019-08-15 09:54:04 +00:00
// the port takes a longer path/is less direct than our current candidate,
// ignore it
2019-08-14 22:57:36 +00:00
case thisTime . After ( bestTime ) :
2019-08-15 09:54:04 +00:00
// all else equal, this port was used more recently than our current
// candidate, so choose that instead. this should mean that, in low
// traffic scenarios, we consistently pick the same link which helps with
// packet ordering
2019-02-09 23:44:25 +00:00
update = true
default :
2019-08-15 09:54:04 +00:00
// the search for a port has finished
2019-02-09 23:44:25 +00:00
}
if update {
2019-02-09 01:46:11 +00:00
best = to
2019-05-16 23:10:47 +00:00
bestDist = cinfo . dist
2019-03-10 01:27:52 +00:00
bestTime = thisTime
2018-06-24 01:59:26 +00:00
}
}
2018-06-24 04:33:03 +00:00
if best != nil {
// Send to the best idle next hop
delete ( idle , best . port )
2019-08-24 21:04:05 +00:00
best . sendPacketsFrom ( t , [ ] [ ] byte { packet } )
2018-06-24 04:33:03 +00:00
return true
}
2019-08-15 09:54:04 +00:00
// Didn't find anyone idle to send it to
return false
2018-06-24 01:59:26 +00:00
}
2018-06-25 23:12:18 +00:00
// Info about a buffered packet
type switch_packetInfo struct {
bytes [ ] byte
time time . Time // Timestamp of when the packet arrived
}
// Used to keep track of buffered packets
type switch_buffer struct {
packets [ ] switch_packetInfo // Currently buffered packets, which may be dropped if it grows too large
2018-07-06 04:07:01 +00:00
size uint64 // Total queue size in bytes
2018-06-25 23:12:18 +00:00
}
2018-07-06 04:39:41 +00:00
type switch_buffers struct {
2019-08-24 20:22:46 +00:00
totalMaxSize uint64
bufs map [ string ] switch_buffer // Buffers indexed by StreamID
size uint64 // Total size of all buffers, in bytes
maxbufs int
maxsize uint64
closer [ ] closerInfo // Scratch space
2018-07-06 04:39:41 +00:00
}
2019-08-24 19:56:33 +00:00
func ( b * switch_buffers ) _cleanup ( t * switchTable ) {
2018-07-06 04:39:41 +00:00
for streamID , buf := range b . bufs {
// Remove queues for which we have no next hop
packet := buf . packets [ 0 ]
coords := switch_getPacketCoords ( packet . bytes )
2019-02-09 01:46:11 +00:00
if len ( t . getCloser ( coords ) ) == 0 {
2018-07-06 05:55:00 +00:00
for _ , packet := range buf . packets {
2018-12-15 02:49:18 +00:00
util . PutBytes ( packet . bytes )
2018-07-06 05:55:00 +00:00
}
b . size -= buf . size
delete ( b . bufs , streamID )
2018-07-06 04:39:41 +00:00
}
}
2018-09-27 15:19:47 +00:00
2019-08-24 20:22:46 +00:00
for b . size > b . totalMaxSize {
2018-07-06 04:39:41 +00:00
// Drop a random queue
2018-07-06 05:11:36 +00:00
target := rand . Uint64 ( ) % b . size
var size uint64 // running total
for streamID , buf := range b . bufs {
size += buf . size
if size < target {
continue
}
2018-07-06 05:55:00 +00:00
var packet switch_packetInfo
packet , buf . packets = buf . packets [ 0 ] , buf . packets [ 1 : ]
buf . size -= uint64 ( len ( packet . bytes ) )
b . size -= uint64 ( len ( packet . bytes ) )
2018-12-15 02:49:18 +00:00
util . PutBytes ( packet . bytes )
2018-07-06 05:55:00 +00:00
if len ( buf . packets ) == 0 {
delete ( b . bufs , streamID )
2018-07-06 22:27:04 +00:00
} else {
// Need to update the map, since buf was retrieved by value
b . bufs [ streamID ] = buf
2018-07-06 05:55:00 +00:00
}
2018-07-06 04:39:41 +00:00
break
}
2018-06-25 23:12:18 +00:00
}
}
2018-06-24 22:39:43 +00:00
// Handles incoming idle notifications
// Loops over packets and sends the newest one that's OK for this peer to send
// Returns true if the peer is no longer idle, false if it should be added to the idle list
2019-08-24 19:56:33 +00:00
func ( t * switchTable ) _handleIdle ( port switchPort ) bool {
2018-06-24 22:39:43 +00:00
to := t . core . peers . getPorts ( ) [ port ]
if to == nil {
return true
}
2019-08-18 17:29:07 +00:00
var packets [ ] [ ] byte
var psize int
2019-08-24 19:56:33 +00:00
t . queues . _cleanup ( t )
2018-07-06 04:56:37 +00:00
now := time . Now ( )
2019-08-18 17:29:07 +00:00
for psize < 65535 {
var best string
var bestPriority float64
for streamID , buf := range t . queues . bufs {
// Filter over the streams that this node is closer to
// Keep the one with the smallest queue
packet := buf . packets [ 0 ]
coords := switch_getPacketCoords ( packet . bytes )
priority := float64 ( now . Sub ( packet . time ) ) / float64 ( buf . size )
if priority > bestPriority && t . portIsCloser ( coords , port ) {
best = streamID
bestPriority = priority
}
2018-06-24 22:39:43 +00:00
}
2019-08-18 17:29:07 +00:00
if bestPriority != 0 {
buf := t . queues . bufs [ best ]
var packet switch_packetInfo
// TODO decide if this should be LIFO or FIFO
packet , buf . packets = buf . packets [ 0 ] , buf . packets [ 1 : ]
buf . size -= uint64 ( len ( packet . bytes ) )
t . queues . size -= uint64 ( len ( packet . bytes ) )
if len ( buf . packets ) == 0 {
delete ( t . queues . bufs , best )
} else {
// Need to update the map, since buf was retrieved by value
t . queues . bufs [ best ] = buf
}
packets = append ( packets , packet . bytes )
psize += len ( packet . bytes )
2018-06-24 22:39:43 +00:00
} else {
2019-08-18 17:29:07 +00:00
// Finished finding packets
break
2018-06-24 22:39:43 +00:00
}
2019-08-18 17:29:07 +00:00
}
if len ( packets ) > 0 {
2019-08-24 21:04:05 +00:00
to . sendPacketsFrom ( t , packets )
2018-06-24 22:39:43 +00:00
return true
}
2019-08-18 17:29:07 +00:00
return false
2018-06-24 22:39:43 +00:00
}
2018-06-24 02:51:32 +00:00
2019-08-25 15:36:09 +00:00
func ( t * switchTable ) packetInFrom ( from phony . Actor , bytes [ ] byte ) {
2019-08-28 00:43:54 +00:00
t . Act ( from , func ( ) {
2019-08-24 20:22:46 +00:00
t . _packetIn ( bytes )
} )
}
2019-08-24 19:56:33 +00:00
func ( t * switchTable ) _packetIn ( bytes [ ] byte ) {
// Try to send it somewhere (or drop it if it's corrupt or at a dead end)
if ! t . _handleIn ( bytes , t . idle ) {
// There's nobody free to take it right now, so queue it for later
packet := switch_packetInfo { bytes , time . Now ( ) }
streamID := switch_getPacketStreamID ( packet . bytes )
buf , bufExists := t . queues . bufs [ streamID ]
buf . packets = append ( buf . packets , packet )
buf . size += uint64 ( len ( packet . bytes ) )
t . queues . size += uint64 ( len ( packet . bytes ) )
// Keep a track of the max total queue size
if t . queues . size > t . queues . maxsize {
t . queues . maxsize = t . queues . size
}
t . queues . bufs [ streamID ] = buf
if ! bufExists {
// Keep a track of the max total queue count. Only recalculate this
// when the queue is new because otherwise repeating len(dict) might
// cause unnecessary processing overhead
if len ( t . queues . bufs ) > t . queues . maxbufs {
t . queues . maxbufs = len ( t . queues . bufs )
2019-03-01 01:08:56 +00:00
}
}
2019-08-24 19:56:33 +00:00
t . queues . _cleanup ( t )
}
}
func ( t * switchTable ) _idleIn ( port switchPort ) {
// Try to find something to send to this peer
if ! t . _handleIdle ( port ) {
// Didn't find anything ready to send yet, so stay idle
t . idle [ port ] = time . Now ( )
}
}
2018-09-27 09:53:19 +00:00
// Passed a function to call.
// This will send the function to t.admin and block until it finishes.
func ( t * switchTable ) doAdmin ( f func ( ) ) {
2019-08-28 00:43:54 +00:00
phony . Block ( t , f )
2018-09-27 09:53:19 +00:00
}