2017-12-29 04:16:20 +00:00
package yggdrasil
// This thing manages search packets
// The basic idea is as follows:
// We may know a NodeID (with a mask) and want to connect
2018-06-02 21:45:45 +00:00
// We begin a search by initializing a list of all nodes in our DHT, sorted by closest to the destination
// We then iteratively ping nodes from the search, marking each pinged node as visited
// We add any unvisited nodes from ping responses to the search, truncating to some maximum search size
// This stops when we either run out of nodes to ping (we hit a dead end where we can't make progress without going back), or we reach the destination
// A new search packet is sent immediately after receiving a response
// A new search packet is sent periodically, once per second, in case a packet was dropped (this slowly causes the search to become parallel if the search doesn't timeout but also doesn't finish within 1 second for whatever reason)
2017-12-29 04:16:20 +00:00
2018-10-30 03:24:18 +00:00
// TODO?
// Some kind of max search steps, in case the node is offline, so we don't crawl through too much of the network looking for a destination that isn't there?
2018-06-12 22:50:08 +00:00
import (
2019-04-18 22:38:23 +00:00
"errors"
2018-06-12 22:50:08 +00:00
"sort"
"time"
2018-12-15 02:49:18 +00:00
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
2018-06-12 22:50:08 +00:00
)
2017-12-29 04:16:20 +00:00
2018-06-10 23:03:28 +00:00
// This defines the maximum number of dhtInfo that we keep track of for nodes to query in an ongoing search.
2018-06-02 19:57:06 +00:00
const search_MAX_SEARCH_SIZE = 16
2018-06-10 23:03:28 +00:00
// This defines the time after which we send a new search packet.
// Search packets are sent automatically immediately after a response is received.
// So this allows for timeouts and for long searches to become increasingly parallel.
2018-06-02 19:57:06 +00:00
const search_RETRY_TIME = time . Second
2018-06-10 23:03:28 +00:00
// Information about an ongoing search.
2019-01-09 09:44:45 +00:00
// Includes the target NodeID, the bitmask to match it to an IP, and the list of nodes to visit / already visited.
2017-12-29 04:16:20 +00:00
type searchInfo struct {
2019-06-26 00:31:29 +00:00
core * Core
2019-04-18 22:38:23 +00:00
dest crypto . NodeID
mask crypto . NodeID
time time . Time
packet [ ] byte
toVisit [ ] * dhtInfo
visited map [ crypto . NodeID ] bool
callback func ( * sessionInfo , error )
2019-06-26 00:31:29 +00:00
// TODO context.Context for timeout and cancellation
2017-12-29 04:16:20 +00:00
}
2018-06-10 23:03:28 +00:00
// This stores a map of active searches.
2017-12-29 04:16:20 +00:00
type searches struct {
2018-12-29 18:51:51 +00:00
core * Core
2018-12-30 12:04:42 +00:00
reconfigure chan chan error
2018-12-29 18:51:51 +00:00
searches map [ crypto . NodeID ] * searchInfo
2017-12-29 04:16:20 +00:00
}
2019-06-26 00:31:29 +00:00
// Initializes the searches struct.
2017-12-29 04:16:20 +00:00
func ( s * searches ) init ( core * Core ) {
2018-01-04 22:37:51 +00:00
s . core = core
2018-12-30 12:04:42 +00:00
s . reconfigure = make ( chan chan error , 1 )
2018-12-29 18:51:51 +00:00
go func ( ) {
for {
2019-01-15 08:51:19 +00:00
e := <- s . reconfigure
e <- nil
2018-12-29 18:51:51 +00:00
}
} ( )
2018-12-15 02:49:18 +00:00
s . searches = make ( map [ crypto . NodeID ] * searchInfo )
2017-12-29 04:16:20 +00:00
}
2018-06-10 23:03:28 +00:00
// Creates a new search info, adds it to the searches struct, and returns a pointer to the info.
2019-04-18 22:38:23 +00:00
func ( s * searches ) createSearch ( dest * crypto . NodeID , mask * crypto . NodeID , callback func ( * sessionInfo , error ) ) * searchInfo {
2018-01-04 22:37:51 +00:00
now := time . Now ( )
2019-06-26 00:31:29 +00:00
//for dest, sinfo := range s.searches {
// if now.Sub(sinfo.time) > time.Minute {
// delete(s.searches, dest)
// }
//}
2018-01-04 22:37:51 +00:00
info := searchInfo {
2019-06-26 00:31:29 +00:00
core : s . core ,
2019-04-18 22:38:23 +00:00
dest : * dest ,
mask : * mask ,
time : now . Add ( - time . Second ) ,
callback : callback ,
2018-01-04 22:37:51 +00:00
}
s . searches [ * dest ] = & info
return & info
2017-12-29 04:16:20 +00:00
}
////////////////////////////////////////////////////////////////////////////////
2019-06-26 00:31:29 +00:00
// Checks if there's an ongoing search related to a dhtRes.
2018-06-10 23:03:28 +00:00
// If there is, it adds the response info to the search and triggers a new search step.
// If there's no ongoing search, or we if the dhtRes finished the search (it was from the target node), then don't do anything more.
2019-06-26 00:31:29 +00:00
func ( sinfo * searchInfo ) handleDHTRes ( res * dhtRes ) {
if res == nil || sinfo . checkDHTRes ( res ) {
2018-06-02 05:16:47 +00:00
// Either we don't recognize this search, or we just finished it
2018-06-02 04:34:21 +00:00
return
}
2019-04-22 14:00:19 +00:00
// Add to the search and continue
2019-06-26 00:31:29 +00:00
sinfo . addToSearch ( res )
sinfo . doSearchStep ( )
2018-06-02 04:34:21 +00:00
}
2018-06-10 23:03:28 +00:00
// Adds the information from a dhtRes to an ongoing search.
// Info about a node that has already been visited is not re-added to the search.
// Duplicate information about nodes toVisit is deduplicated (the newest information is kept).
// The toVisit list is sorted in ascending order of keyspace distance from the destination.
2019-06-26 00:31:29 +00:00
func ( sinfo * searchInfo ) addToSearch ( res * dhtRes ) {
2018-06-02 05:29:36 +00:00
// Add responses to toVisit if closer to dest than the res node
2018-06-02 21:30:05 +00:00
from := dhtInfo { key : res . Key , coords : res . Coords }
2018-10-30 03:24:18 +00:00
sinfo . visited [ * from . getNodeID ( ) ] = true
2018-06-02 21:30:05 +00:00
for _ , info := range res . Infos {
2019-06-26 00:31:29 +00:00
if * info . getNodeID ( ) == sinfo . core . dht . nodeID || sinfo . visited [ * info . getNodeID ( ) ] {
2018-06-02 19:57:06 +00:00
continue
}
2018-10-25 03:03:27 +00:00
if dht_ordered ( & sinfo . dest , info . getNodeID ( ) , from . getNodeID ( ) ) {
// Response is closer to the destination
2018-06-02 04:34:21 +00:00
sinfo . toVisit = append ( sinfo . toVisit , info )
}
}
2018-06-02 05:29:36 +00:00
// Deduplicate
2018-12-15 02:49:18 +00:00
vMap := make ( map [ crypto . NodeID ] * dhtInfo )
2018-06-02 05:29:36 +00:00
for _ , info := range sinfo . toVisit {
vMap [ * info . getNodeID ( ) ] = info
}
sinfo . toVisit = sinfo . toVisit [ : 0 ]
for _ , info := range vMap {
sinfo . toVisit = append ( sinfo . toVisit , info )
}
// Sort
2018-06-02 04:34:21 +00:00
sort . SliceStable ( sinfo . toVisit , func ( i , j int ) bool {
2018-10-21 22:40:43 +00:00
// Should return true if i is closer to the destination than j
2018-10-21 23:15:04 +00:00
return dht_ordered ( & res . Dest , sinfo . toVisit [ i ] . getNodeID ( ) , sinfo . toVisit [ j ] . getNodeID ( ) )
2018-06-02 04:34:21 +00:00
} )
2018-06-02 05:29:36 +00:00
// Truncate to some maximum size
2018-06-02 19:57:06 +00:00
if len ( sinfo . toVisit ) > search_MAX_SEARCH_SIZE {
2018-11-10 05:02:38 +00:00
sinfo . toVisit = sinfo . toVisit [ : search_MAX_SEARCH_SIZE ]
2018-06-02 05:29:36 +00:00
}
2018-06-02 04:34:21 +00:00
}
2018-06-10 23:03:28 +00:00
// If there are no nodes left toVisit, then this cleans up the search.
// Otherwise, it pops the closest node to the destination (in keyspace) off of the toVisit list and sends a dht ping.
2019-06-26 00:31:29 +00:00
func ( sinfo * searchInfo ) doSearchStep ( ) {
2018-06-02 19:57:06 +00:00
if len ( sinfo . toVisit ) == 0 {
// Dead end, do cleanup
2019-06-26 00:31:29 +00:00
delete ( sinfo . core . searches . searches , sinfo . dest )
2019-04-22 03:31:56 +00:00
go sinfo . callback ( nil , errors . New ( "search reached dead end" ) )
2018-06-02 04:34:21 +00:00
return
}
2019-04-18 22:38:23 +00:00
// Send to the next search target
var next * dhtInfo
next , sinfo . toVisit = sinfo . toVisit [ 0 ] , sinfo . toVisit [ 1 : ]
rq := dhtReqKey { next . key , sinfo . dest }
2019-06-26 00:31:29 +00:00
sinfo . core . dht . addCallback ( & rq , sinfo . handleDHTRes )
sinfo . core . dht . ping ( next , & sinfo . dest )
2018-06-02 04:34:21 +00:00
}
2018-06-10 23:03:28 +00:00
// If we've recenty sent a ping for this search, do nothing.
// Otherwise, doSearchStep and schedule another continueSearch to happen after search_RETRY_TIME.
2019-06-26 00:31:29 +00:00
func ( sinfo * searchInfo ) continueSearch ( ) {
2018-06-02 19:57:06 +00:00
if time . Since ( sinfo . time ) < search_RETRY_TIME {
2018-06-02 04:34:21 +00:00
return
}
sinfo . time = time . Now ( )
2019-06-26 00:31:29 +00:00
sinfo . doSearchStep ( )
2018-06-02 19:57:06 +00:00
// In case the search dies, try to spawn another thread later
// Note that this will spawn multiple parallel searches as time passes
// Any that die aren't restarted, but a new one will start later
retryLater := func ( ) {
2019-06-26 00:31:29 +00:00
// FIXME this keeps the search alive forever if not for the searches map, fix that
newSearchInfo := sinfo . core . searches . searches [ sinfo . dest ]
2018-06-02 19:57:06 +00:00
if newSearchInfo != sinfo {
return
}
2019-06-26 00:31:29 +00:00
sinfo . continueSearch ( )
2018-06-02 19:57:06 +00:00
}
go func ( ) {
time . Sleep ( search_RETRY_TIME )
2019-06-26 00:31:29 +00:00
sinfo . core . router . admin <- retryLater
2018-06-02 19:57:06 +00:00
} ( )
2018-06-02 04:34:21 +00:00
}
2018-06-10 23:03:28 +00:00
// Calls create search, and initializes the iterative search parts of the struct before returning it.
2019-04-18 22:38:23 +00:00
func ( s * searches ) newIterSearch ( dest * crypto . NodeID , mask * crypto . NodeID , callback func ( * sessionInfo , error ) ) * searchInfo {
sinfo := s . createSearch ( dest , mask , callback )
2018-06-02 21:39:34 +00:00
sinfo . toVisit = s . core . dht . lookup ( dest , true )
2018-12-15 02:49:18 +00:00
sinfo . visited = make ( map [ crypto . NodeID ] bool )
2018-06-02 04:34:21 +00:00
return sinfo
}
2018-06-10 23:03:28 +00:00
// Checks if a dhtRes is good (called by handleDHTRes).
// If the response is from the target, get/create a session, trigger a session ping, and return true.
// Otherwise return false.
2019-06-26 00:31:29 +00:00
func ( sinfo * searchInfo ) checkDHTRes ( res * dhtRes ) bool {
2018-12-15 02:49:18 +00:00
them := crypto . GetNodeID ( & res . Key )
var destMasked crypto . NodeID
var themMasked crypto . NodeID
for idx := 0 ; idx < crypto . NodeIDLen ; idx ++ {
2019-06-26 00:31:29 +00:00
destMasked [ idx ] = sinfo . dest [ idx ] & sinfo . mask [ idx ]
themMasked [ idx ] = them [ idx ] & sinfo . mask [ idx ]
2018-06-02 04:34:21 +00:00
}
if themMasked != destMasked {
return false
}
// They match, so create a session and send a sessionRequest
2019-06-26 00:31:29 +00:00
sess , isIn := sinfo . core . sessions . getByTheirPerm ( & res . Key )
2018-06-02 04:34:21 +00:00
if ! isIn {
2019-06-26 00:31:29 +00:00
sess = sinfo . core . sessions . createSession ( & res . Key )
if sess == nil {
2018-10-08 18:51:51 +00:00
// nil if the DHT search finished but the session wasn't allowed
2019-06-26 00:31:29 +00:00
go sinfo . callback ( nil , errors . New ( "session not allowed" ) )
2018-10-08 18:51:51 +00:00
return true
}
2019-06-26 00:31:29 +00:00
_ , isIn := sinfo . core . sessions . getByTheirPerm ( & res . Key )
2018-06-02 04:34:21 +00:00
if ! isIn {
panic ( "This should never happen" )
}
}
// FIXME (!) replay attacks could mess with coords? Give it a handle (tstamp)?
2019-06-26 00:31:29 +00:00
sess . coords = res . Coords
sess . packet = sinfo . packet
sinfo . core . sessions . ping ( sess )
go sinfo . callback ( sess , nil )
2018-06-02 04:34:21 +00:00
// Cleanup
2019-06-26 00:31:29 +00:00
delete ( sinfo . core . searches . searches , res . Dest )
2018-06-02 04:34:21 +00:00
return true
}