mirror of
https://github.com/cwinfo/yggdrasil-go.git
synced 2024-11-22 18:50:27 +00:00
fix some chord dht bootstrapping bugs, no known cases where it now fails
This commit is contained in:
parent
253861ebd3
commit
c0531627bc
@ -300,6 +300,7 @@ func pingNodes(store map[[32]byte]*Node) {
|
|||||||
}
|
}
|
||||||
case <-ch:
|
case <-ch:
|
||||||
sendTo(payload, destAddr)
|
sendTo(payload, destAddr)
|
||||||
|
//dumpDHTSize(store) // note that this uses racey functions to read things...
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ticker.Stop()
|
ticker.Stop()
|
||||||
|
@ -233,7 +233,6 @@ func (c *Core) DEBUG_getDHTSize() int {
|
|||||||
c.router.doAdmin(func() {
|
c.router.doAdmin(func() {
|
||||||
total = len(c.dht.table)
|
total = len(c.dht.table)
|
||||||
})
|
})
|
||||||
fmt.Println("DEBUG_getDHTSize():", total)
|
|
||||||
return total
|
return total
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,6 +5,16 @@ package yggdrasil
|
|||||||
// That should encorage them to ping us again sooner, and then we can reply with new info
|
// That should encorage them to ping us again sooner, and then we can reply with new info
|
||||||
// Maybe remember old predecessor and check this during maintenance?
|
// Maybe remember old predecessor and check this during maintenance?
|
||||||
|
|
||||||
|
// TODO make sure that, if your peer is your successor or predecessor, you still bother to ping them and ask for better nodes
|
||||||
|
// Basically, don't automatically reset the dhtInfo.recv to time.Now() whenever updating them from the outside
|
||||||
|
// But *do* set it to something that won't instantly time them out or make them get pingspammed?
|
||||||
|
// Could set throttle to 0, but that's imperfect at best... pingspam
|
||||||
|
|
||||||
|
// TODO? cache all nodes we ping (from e.g. searches), not just the important ones
|
||||||
|
// But only send maintenance pings to the important ones
|
||||||
|
|
||||||
|
// TODO reoptimize search stuff (size, timeouts, etc) to play nicer with DHT churn
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
@ -77,8 +87,12 @@ func (t *dht) reset() {
|
|||||||
// Does a DHT lookup and returns up to dht_lookup_size results
|
// Does a DHT lookup and returns up to dht_lookup_size results
|
||||||
func (t *dht) lookup(nodeID *NodeID, everything bool) []*dhtInfo {
|
func (t *dht) lookup(nodeID *NodeID, everything bool) []*dhtInfo {
|
||||||
results := make([]*dhtInfo, 0, len(t.table))
|
results := make([]*dhtInfo, 0, len(t.table))
|
||||||
|
//imp := t.getImportant()
|
||||||
for _, info := range t.table {
|
for _, info := range t.table {
|
||||||
results = append(results, info)
|
results = append(results, info)
|
||||||
|
//if t.isImportant(info, imp) {
|
||||||
|
// results = append(results, info)
|
||||||
|
//}
|
||||||
}
|
}
|
||||||
sort.SliceStable(results, func(i, j int) bool {
|
sort.SliceStable(results, func(i, j int) bool {
|
||||||
return dht_ordered(nodeID, results[i].getNodeID(), results[j].getNodeID())
|
return dht_ordered(nodeID, results[i].getNodeID(), results[j].getNodeID())
|
||||||
@ -165,9 +179,11 @@ func (t *dht) handleReq(req *dhtReq) {
|
|||||||
key: req.Key,
|
key: req.Key,
|
||||||
coords: req.Coords,
|
coords: req.Coords,
|
||||||
}
|
}
|
||||||
// For bootstrapping to work, we need to add these nodes to the table
|
imp := t.getImportant()
|
||||||
|
if t.isImportant(&info, imp) {
|
||||||
t.insert(&info)
|
t.insert(&info)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Sends a lookup response to the specified node.
|
// Sends a lookup response to the specified node.
|
||||||
func (t *dht) sendRes(res *dhtRes, req *dhtReq) {
|
func (t *dht) sendRes(res *dhtRes, req *dhtReq) {
|
||||||
@ -186,28 +202,6 @@ func (t *dht) sendRes(res *dhtRes, req *dhtReq) {
|
|||||||
t.core.router.out(packet)
|
t.core.router.out(packet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns nodeID + 1
|
|
||||||
func (nodeID NodeID) next() NodeID {
|
|
||||||
for idx := len(nodeID) - 1; idx >= 0; idx-- {
|
|
||||||
nodeID[idx] += 1
|
|
||||||
if nodeID[idx] != 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodeID
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns nodeID - 1
|
|
||||||
func (nodeID NodeID) prev() NodeID {
|
|
||||||
for idx := len(nodeID) - 1; idx >= 0; idx-- {
|
|
||||||
nodeID[idx] -= 1
|
|
||||||
if nodeID[idx] != 0xff {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nodeID
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reads a lookup response, checks that we had sent a matching request, and processes the response info.
|
// Reads a lookup response, checks that we had sent a matching request, and processes the response info.
|
||||||
// This mainly consists of updating the node we asked in our DHT (they responded, so we know they're still alive), and deciding if we want to do anything with their responses
|
// This mainly consists of updating the node we asked in our DHT (they responded, so we know they're still alive), and deciding if we want to do anything with their responses
|
||||||
func (t *dht) handleRes(res *dhtRes) {
|
func (t *dht) handleRes(res *dhtRes) {
|
||||||
@ -225,11 +219,14 @@ func (t *dht) handleRes(res *dhtRes) {
|
|||||||
key: res.Key,
|
key: res.Key,
|
||||||
coords: res.Coords,
|
coords: res.Coords,
|
||||||
}
|
}
|
||||||
t.insert(&rinfo) // Or at the end, after checking successor/predecessor?
|
imp := t.getImportant()
|
||||||
|
if t.isImportant(&rinfo, imp) {
|
||||||
|
t.insert(&rinfo)
|
||||||
|
}
|
||||||
|
//t.insert(&rinfo) // Or at the end, after checking successor/predecessor?
|
||||||
if len(res.Infos) > dht_lookup_size {
|
if len(res.Infos) > dht_lookup_size {
|
||||||
//res.Infos = res.Infos[:dht_lookup_size] //FIXME debug
|
//res.Infos = res.Infos[:dht_lookup_size] //FIXME debug
|
||||||
}
|
}
|
||||||
imp := t.getImportant()
|
|
||||||
for _, info := range res.Infos {
|
for _, info := range res.Infos {
|
||||||
if *info.getNodeID() == t.nodeID {
|
if *info.getNodeID() == t.nodeID {
|
||||||
continue
|
continue
|
||||||
@ -313,6 +310,14 @@ func (t *dht) doMaintenance() {
|
|||||||
fmt.Println("DEBUG self:", t.nodeID[:8], "throttle:", info.throttle, "nodeID:", info.getNodeID()[:8], "coords:", info.coords)
|
fmt.Println("DEBUG self:", t.nodeID[:8], "throttle:", info.throttle, "nodeID:", info.getNodeID()[:8], "coords:", info.coords)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return // Skip printing debug info
|
||||||
|
var out []interface{}
|
||||||
|
out = append(out, "DEBUG important:")
|
||||||
|
out = append(out, t.nodeID[:8])
|
||||||
|
for _, info := range imp {
|
||||||
|
out = append(out, info.getNodeID()[:8])
|
||||||
|
}
|
||||||
|
fmt.Println(out...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *dht) getImportant() []*dhtInfo {
|
func (t *dht) getImportant() []*dhtInfo {
|
||||||
@ -323,9 +328,8 @@ func (t *dht) getImportant() []*dhtInfo {
|
|||||||
}
|
}
|
||||||
// Sort them by increasing order in distance along the ring
|
// Sort them by increasing order in distance along the ring
|
||||||
sort.SliceStable(infos, func(i, j int) bool {
|
sort.SliceStable(infos, func(i, j int) bool {
|
||||||
// Sort in order of predecessors (!), reverse from chord normal, becuase it plays nicer with zero bits for unknown parts of target addresses
|
// Sort in order of predecessors (!), reverse from chord normal, because it plays nicer with zero bits for unknown parts of target addresses
|
||||||
return dht_ordered(infos[j].getNodeID(), infos[i].getNodeID(), &t.nodeID)
|
return dht_ordered(infos[j].getNodeID(), infos[i].getNodeID(), &t.nodeID)
|
||||||
//return dht_ordered(&t.nodeID, infos[i].getNodeID(), infos[j].getNodeID())
|
|
||||||
})
|
})
|
||||||
// Keep the ones that are no further than the closest seen so far
|
// Keep the ones that are no further than the closest seen so far
|
||||||
minDist := ^uint64(0)
|
minDist := ^uint64(0)
|
||||||
@ -338,6 +342,19 @@ func (t *dht) getImportant() []*dhtInfo {
|
|||||||
important = append(important, info)
|
important = append(important, info)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
var temp []*dhtInfo
|
||||||
|
minDist = ^uint64(0)
|
||||||
|
for idx := len(infos) - 1; idx >= 0; idx-- {
|
||||||
|
info := infos[idx]
|
||||||
|
dist := uint64(loc.dist(info.coords))
|
||||||
|
if dist < minDist {
|
||||||
|
minDist = dist
|
||||||
|
temp = append(temp, info)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for idx := len(temp) - 1; idx >= 0; idx-- {
|
||||||
|
important = append(important, temp[idx])
|
||||||
|
}
|
||||||
return important
|
return important
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -347,15 +364,28 @@ func (t *dht) isImportant(ninfo *dhtInfo, important []*dhtInfo) bool {
|
|||||||
ndist := uint64(loc.dist(ninfo.coords))
|
ndist := uint64(loc.dist(ninfo.coords))
|
||||||
minDist := ^uint64(0)
|
minDist := ^uint64(0)
|
||||||
for _, info := range important {
|
for _, info := range important {
|
||||||
|
if (*info.getNodeID() == *ninfo.getNodeID()) ||
|
||||||
|
(ndist < minDist && dht_ordered(info.getNodeID(), ninfo.getNodeID(), &t.nodeID)) {
|
||||||
|
// Either the same node, or a better one
|
||||||
|
return true
|
||||||
|
}
|
||||||
dist := uint64(loc.dist(info.coords))
|
dist := uint64(loc.dist(info.coords))
|
||||||
if dist < minDist {
|
if dist < minDist {
|
||||||
minDist = dist
|
minDist = dist
|
||||||
}
|
}
|
||||||
//if dht_ordered(&t.nodeID, ninfo.getNodeID(), info.getNodeID()) && ndist <= minDist {
|
}
|
||||||
if dht_ordered(info.getNodeID(), ninfo.getNodeID(), &t.nodeID) && ndist <= minDist {
|
minDist = ^uint64(0)
|
||||||
// This node is at least as close in both key space and tree space
|
for idx := len(important) - 1; idx >= 0; idx-- {
|
||||||
|
info := important[idx]
|
||||||
|
if (*info.getNodeID() == *ninfo.getNodeID()) ||
|
||||||
|
(ndist < minDist && dht_ordered(&t.nodeID, ninfo.getNodeID(), info.getNodeID())) {
|
||||||
|
// Either the same node, or a better one
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
dist := uint64(loc.dist(info.coords))
|
||||||
|
if dist < minDist {
|
||||||
|
minDist = dist
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// We didn't find any important node that ninfo is better than
|
// We didn't find any important node that ninfo is better than
|
||||||
return false
|
return false
|
||||||
|
@ -96,7 +96,8 @@ func (s *searches) addToSearch(sinfo *searchInfo, res *dhtRes) {
|
|||||||
if *info.getNodeID() == s.core.dht.nodeID || sinfo.visited[*info.getNodeID()] {
|
if *info.getNodeID() == s.core.dht.nodeID || sinfo.visited[*info.getNodeID()] {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if true || dht_ordered(from.getNodeID(), info.getNodeID(), &res.Dest) {
|
if dht_ordered(&sinfo.dest, info.getNodeID(), from.getNodeID()) {
|
||||||
|
// Response is closer to the destination
|
||||||
sinfo.toVisit = append(sinfo.toVisit, info)
|
sinfo.toVisit = append(sinfo.toVisit, info)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -118,7 +119,7 @@ func (s *searches) addToSearch(sinfo *searchInfo, res *dhtRes) {
|
|||||||
})
|
})
|
||||||
// Truncate to some maximum size
|
// Truncate to some maximum size
|
||||||
if len(sinfo.toVisit) > search_MAX_SEARCH_SIZE {
|
if len(sinfo.toVisit) > search_MAX_SEARCH_SIZE {
|
||||||
sinfo.toVisit = sinfo.toVisit[:search_MAX_SEARCH_SIZE]
|
sinfo.toVisit = sinfo.toVisit[:search_MAX_SEARCH_SIZE] //FIXME debug
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user