mirror of
https://github.com/cwinfo/yggdrasil-go.git
synced 2024-11-22 13:00:47 +00:00
cleanup
This commit is contained in:
parent
02f0611dde
commit
5a85d3515d
@ -415,10 +415,10 @@ func main() {
|
||||
}
|
||||
fmt.Println("Test")
|
||||
Util_testAddrIDMask()
|
||||
//idxstore := makeStoreSquareGrid(4)
|
||||
idxstore := makeStoreSquareGrid(4)
|
||||
//idxstore := makeStoreStar(256)
|
||||
//idxstore := loadGraph("misc/sim/hype-2016-09-19.list")
|
||||
idxstore := loadGraph("misc/sim/fc00-2017-08-12.txt")
|
||||
//idxstore := loadGraph("misc/sim/fc00-2017-08-12.txt")
|
||||
//idxstore := loadGraph("skitter")
|
||||
kstore := getKeyedStore(idxstore)
|
||||
//*
|
||||
|
@ -1,7 +1,6 @@
|
||||
package yggdrasil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
@ -16,9 +15,6 @@ type dhtInfo struct {
|
||||
coords []byte
|
||||
send time.Time // When we last sent a message
|
||||
recv time.Time // When we last received a message
|
||||
//pings int // Decide when to drop
|
||||
//throttle time.Duration // Time to wait before pinging a node to bootstrap buckets, increases exponentially from 1 second to 1 minute
|
||||
//bootstrapSend time.Time // The time checked/updated as part of throttle checks
|
||||
}
|
||||
|
||||
// Returns the *NodeID associated with dhtInfo.key, calculating it on the fly the first time or from a cache all subsequent times.
|
||||
@ -54,7 +50,6 @@ type dht struct {
|
||||
table map[NodeID]*dhtInfo
|
||||
peers chan *dhtInfo // other goroutines put incoming dht updates here
|
||||
reqs map[boxPubKey]map[NodeID]time.Time
|
||||
//rumorMill []dht_rumor
|
||||
}
|
||||
|
||||
func (t *dht) init(c *Core) {
|
||||
@ -66,7 +61,7 @@ func (t *dht) init(c *Core) {
|
||||
}
|
||||
|
||||
func (t *dht) reset() {
|
||||
fmt.Println("Resetting table:", t.nodeID)
|
||||
//fmt.Println("Resetting table:", t.nodeID)
|
||||
t.reqs = make(map[boxPubKey]map[NodeID]time.Time)
|
||||
t.table = make(map[NodeID]*dhtInfo)
|
||||
}
|
||||
@ -99,7 +94,7 @@ func (t *dht) lookup(nodeID *NodeID, allowWorse bool) []*dhtInfo {
|
||||
// Insert into table, preserving the time we last sent a packet if the node was already in the table, otherwise setting that time to now
|
||||
func (t *dht) insert(info *dhtInfo) {
|
||||
if *info.getNodeID() == t.nodeID {
|
||||
// This shouldn't happen, but don't add it in case it does
|
||||
// This shouldn't happen, but don't add it if it does
|
||||
return
|
||||
panic("FIXME")
|
||||
}
|
||||
@ -236,19 +231,9 @@ func (t *dht) handleRes(res *dhtRes) {
|
||||
// We could try sending to only the best, but then packet loss matters more
|
||||
if successor == nil || dht_ordered(&t.nodeID, info.getNodeID(), successor.getNodeID()) {
|
||||
t.ping(info, &t.nodeID)
|
||||
if successor != nil {
|
||||
fmt.Println("pinging better successor", t.nodeID[:4], info.getNodeID()[:4], successor.getNodeID()[:4], len(t.table))
|
||||
} else {
|
||||
fmt.Println("pinging new successor", t.nodeID[:4], info.getNodeID()[:4], successor)
|
||||
}
|
||||
}
|
||||
if predecessor == nil || dht_ordered(predecessor.getNodeID(), info.getNodeID(), &t.nodeID) {
|
||||
t.ping(info, &t.nodeID)
|
||||
if predecessor != nil {
|
||||
fmt.Println("pinging better predecessor", t.nodeID[:4], info.getNodeID()[:4], predecessor.getNodeID()[:4], len(t.table))
|
||||
} else {
|
||||
fmt.Println("pinging new predecessor", t.nodeID[:4], info.getNodeID()[:4])
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO add everyting else to a rumor mill for later use? (when/how?)
|
||||
@ -300,30 +285,15 @@ func (t *dht) doMaintenance() {
|
||||
// Ping successor, asking for their predecessor, and clean up old/expired info
|
||||
var successor *dhtInfo
|
||||
now := time.Now()
|
||||
size := len(t.table)
|
||||
for infoID, info := range t.table {
|
||||
/*
|
||||
if now.Sub(info.recv) > time.Minute {
|
||||
delete(t.table, infoID)
|
||||
} else if successor == nil || dht_ordered(&t.nodeID, &infoID, successor.getNodeID()) {
|
||||
successor = info
|
||||
}
|
||||
*/
|
||||
if successor == nil || dht_ordered(&t.nodeID, &infoID, successor.getNodeID()) {
|
||||
successor = info
|
||||
}
|
||||
if now.Sub(info.recv) > time.Minute {
|
||||
delete(t.table, infoID)
|
||||
} else if successor == nil || dht_ordered(&t.nodeID, &infoID, successor.getNodeID()) {
|
||||
successor = info
|
||||
}
|
||||
}
|
||||
if successor != nil &&
|
||||
now.Sub(successor.send) > 6*time.Second {
|
||||
t.ping(successor, nil)
|
||||
}
|
||||
if successor != nil && t.table[*successor.getNodeID()] == nil {
|
||||
fmt.Println("DEBUG: successor timed out:", t.nodeID[:4], successor.getNodeID()[:4])
|
||||
}
|
||||
if len(t.table) != size {
|
||||
fmt.Println("DEBUG: timeouts:", t.nodeID[:4], size, len(t.table))
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user