5
0
mirror of https://github.com/cwinfo/yggdrasil-go.git synced 2024-12-22 23:25:39 +00:00

Run gofmt -s -w .

This commit is contained in:
Neil Alexander 2018-01-04 22:37:51 +00:00
parent ae7b07ae6a
commit b3ebe76b59
45 changed files with 5037 additions and 4288 deletions

View File

@ -20,58 +20,67 @@ import . "yggdrasil"
var doSig = flag.Bool("sig", false, "generate new signing keys instead") var doSig = flag.Bool("sig", false, "generate new signing keys instead")
func main() { func main() {
flag.Parse() flag.Parse()
switch { switch {
case *doSig: doSigKeys() case *doSig:
default: doBoxKeys() doSigKeys()
} default:
doBoxKeys()
}
} }
func isBetter(oldID, newID []byte) bool { func isBetter(oldID, newID []byte) bool {
for idx := range oldID { for idx := range oldID {
if newID[idx] > oldID[idx] { return true } if newID[idx] > oldID[idx] {
if newID[idx] < oldID[idx] { return false } return true
} }
return false if newID[idx] < oldID[idx] {
return false
}
}
return false
} }
func doBoxKeys() { func doBoxKeys() {
c := Core{} c := Core{}
pub, _ := c.DEBUG_newBoxKeys() pub, _ := c.DEBUG_newBoxKeys()
bestID := c.DEBUG_getNodeID(pub) bestID := c.DEBUG_getNodeID(pub)
for idx := range bestID { for idx := range bestID {
bestID[idx] = 0 bestID[idx] = 0
} }
for { for {
pub, priv := c.DEBUG_newBoxKeys() pub, priv := c.DEBUG_newBoxKeys()
id := c.DEBUG_getNodeID(pub) id := c.DEBUG_getNodeID(pub)
if !isBetter(bestID[:], id[:]) { continue } if !isBetter(bestID[:], id[:]) {
bestID = id continue
ip := c.DEBUG_addrForNodeID(id) }
fmt.Println("--------------------------------------------------------------------------------") bestID = id
fmt.Println("boxPriv:", hex.EncodeToString(priv[:])) ip := c.DEBUG_addrForNodeID(id)
fmt.Println("boxPub:", hex.EncodeToString(pub[:])) fmt.Println("--------------------------------------------------------------------------------")
fmt.Println("NodeID:", hex.EncodeToString(id[:])) fmt.Println("boxPriv:", hex.EncodeToString(priv[:]))
fmt.Println("IP:", ip) fmt.Println("boxPub:", hex.EncodeToString(pub[:]))
} fmt.Println("NodeID:", hex.EncodeToString(id[:]))
fmt.Println("IP:", ip)
}
} }
func doSigKeys() { func doSigKeys() {
c := Core{} c := Core{}
pub, _ := c.DEBUG_newSigKeys() pub, _ := c.DEBUG_newSigKeys()
bestID := c.DEBUG_getTreeID(pub) bestID := c.DEBUG_getTreeID(pub)
for idx := range bestID { for idx := range bestID {
bestID[idx] = 0 bestID[idx] = 0
} }
for { for {
pub, priv := c.DEBUG_newSigKeys() pub, priv := c.DEBUG_newSigKeys()
id := c.DEBUG_getTreeID(pub) id := c.DEBUG_getTreeID(pub)
if !isBetter(bestID[:], id[:]) { continue } if !isBetter(bestID[:], id[:]) {
bestID = id continue
fmt.Println("--------------------------------------------------------------------------------") }
fmt.Println("sigPriv:", hex.EncodeToString(priv[:])) bestID = id
fmt.Println("sigPub:", hex.EncodeToString(pub[:])) fmt.Println("--------------------------------------------------------------------------------")
fmt.Println("TreeID:", hex.EncodeToString(id[:])) fmt.Println("sigPriv:", hex.EncodeToString(priv[:]))
} fmt.Println("sigPub:", hex.EncodeToString(pub[:]))
fmt.Println("TreeID:", hex.EncodeToString(id[:]))
}
} }

View File

@ -15,153 +15,157 @@ import "router"
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
type Node struct { type Node struct {
nodeID router.NodeID nodeID router.NodeID
table router.Table table router.Table
links []*Node links []*Node
} }
func (n *Node) init(nodeID router.NodeID) { func (n *Node) init(nodeID router.NodeID) {
n.nodeID = nodeID n.nodeID = nodeID
n.table.Init(nodeID) n.table.Init(nodeID)
n.links = append(n.links, n) n.links = append(n.links, n)
} }
func linkNodes(m, n *Node) { func linkNodes(m, n *Node) {
for _, o := range m.links { for _, o := range m.links {
if o.nodeID == n.nodeID { if o.nodeID == n.nodeID {
// Don't allow duplicates // Don't allow duplicates
return return
} }
} }
m.links = append(m.links, n) m.links = append(m.links, n)
n.links = append(n.links, m) n.links = append(n.links, m)
} }
func makeStoreSquareGrid(sideLength int) map[router.NodeID]*Node { func makeStoreSquareGrid(sideLength int) map[router.NodeID]*Node {
store := make(map[router.NodeID]*Node) store := make(map[router.NodeID]*Node)
nNodes := sideLength*sideLength nNodes := sideLength * sideLength
nodeIDs := make([]router.NodeID, 0, nNodes) nodeIDs := make([]router.NodeID, 0, nNodes)
// TODO shuffle nodeIDs // TODO shuffle nodeIDs
for nodeID := 1 ; nodeID <= nNodes ; nodeID++ { for nodeID := 1; nodeID <= nNodes; nodeID++ {
nodeIDs = append(nodeIDs, router.NodeID(nodeID)) nodeIDs = append(nodeIDs, router.NodeID(nodeID))
} }
for _, nodeID := range nodeIDs { for _, nodeID := range nodeIDs {
node := &Node{} node := &Node{}
node.init(nodeID) node.init(nodeID)
store[nodeID] = node store[nodeID] = node
} }
for idx := 0 ; idx < nNodes ; idx++ { for idx := 0; idx < nNodes; idx++ {
if (idx % sideLength) != 0 { if (idx % sideLength) != 0 {
linkNodes(store[nodeIDs[idx]], store[nodeIDs[idx-1]]) linkNodes(store[nodeIDs[idx]], store[nodeIDs[idx-1]])
} }
if idx >= sideLength { if idx >= sideLength {
linkNodes(store[nodeIDs[idx]], store[nodeIDs[idx-sideLength]]) linkNodes(store[nodeIDs[idx]], store[nodeIDs[idx-sideLength]])
} }
} }
return store return store
} }
func loadGraph(path string) map[router.NodeID]*Node { func loadGraph(path string) map[router.NodeID]*Node {
f, err := os.Open(path) f, err := os.Open(path)
if err != nil { panic(err) } if err != nil {
defer f.Close() panic(err)
store := make(map[router.NodeID]*Node) }
s := bufio.NewScanner(f) defer f.Close()
for s.Scan() { store := make(map[router.NodeID]*Node)
line := s.Text() s := bufio.NewScanner(f)
nodeIDstrs := strings.Split(line, " ") for s.Scan() {
nodeIDi0, _ := strconv.Atoi(nodeIDstrs[0]) line := s.Text()
nodeIDi1, _ := strconv.Atoi(nodeIDstrs[1]) nodeIDstrs := strings.Split(line, " ")
nodeID0 := router.NodeID(nodeIDi0) nodeIDi0, _ := strconv.Atoi(nodeIDstrs[0])
nodeID1 := router.NodeID(nodeIDi1) nodeIDi1, _ := strconv.Atoi(nodeIDstrs[1])
if store[nodeID0] == nil { nodeID0 := router.NodeID(nodeIDi0)
node := &Node{} nodeID1 := router.NodeID(nodeIDi1)
node.init(nodeID0) if store[nodeID0] == nil {
store[nodeID0] = node node := &Node{}
} node.init(nodeID0)
if store[nodeID1] == nil { store[nodeID0] = node
node := &Node{} }
node.init(nodeID1) if store[nodeID1] == nil {
store[nodeID1] = node node := &Node{}
} node.init(nodeID1)
linkNodes(store[nodeID0], store[nodeID1]) store[nodeID1] = node
} }
return store linkNodes(store[nodeID0], store[nodeID1])
}
return store
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
func idleUntilConverged(store map[router.NodeID]*Node) { func idleUntilConverged(store map[router.NodeID]*Node) {
timeOfLastChange := 0 timeOfLastChange := 0
step := 0 step := 0
// Idle untl the network has converged // Idle untl the network has converged
for step - timeOfLastChange < 4*router.TIMEOUT { for step-timeOfLastChange < 4*router.TIMEOUT {
step++ step++
fmt.Println("Step:", step, "--", "last change:", timeOfLastChange) fmt.Println("Step:", step, "--", "last change:", timeOfLastChange)
for _, node := range store { for _, node := range store {
node.table.Tick() node.table.Tick()
for idx, link := range node.links[1:] { for idx, link := range node.links[1:] {
msg := node.table.CreateMessage(router.Iface(idx)) msg := node.table.CreateMessage(router.Iface(idx))
for idx, fromNode := range link.links { for idx, fromNode := range link.links {
if fromNode == node { if fromNode == node {
//fmt.Println("Sending from node", node.nodeID, "to", link.nodeID) //fmt.Println("Sending from node", node.nodeID, "to", link.nodeID)
link.table.HandleMessage(msg, router.Iface(idx)) link.table.HandleMessage(msg, router.Iface(idx))
break break
} }
} }
} }
} }
//for _, node := range store { //for _, node := range store {
// if node.table.DEBUG_isDirty() { timeOfLastChange = step } // if node.table.DEBUG_isDirty() { timeOfLastChange = step }
//} //}
//time.Sleep(10*time.Millisecond) //time.Sleep(10*time.Millisecond)
} }
} }
func testPaths(store map[router.NodeID]*Node) { func testPaths(store map[router.NodeID]*Node) {
nNodes := len(store) nNodes := len(store)
nodeIDs := make([]router.NodeID, 0, nNodes) nodeIDs := make([]router.NodeID, 0, nNodes)
for nodeID := range store { for nodeID := range store {
nodeIDs = append(nodeIDs, nodeID) nodeIDs = append(nodeIDs, nodeID)
} }
lookups := 0 lookups := 0
count := 0 count := 0
start := time.Now() start := time.Now()
for _, source := range store { for _, source := range store {
count++ count++
fmt.Printf("Testing paths from node %d / %d (%d)\n", count, nNodes, source.nodeID) fmt.Printf("Testing paths from node %d / %d (%d)\n", count, nNodes, source.nodeID)
for _, dest := range store { for _, dest := range store {
//if source == dest { continue } //if source == dest { continue }
destLoc := dest.table.GetLocator() destLoc := dest.table.GetLocator()
temp := 0 temp := 0
for here := source ; here != dest ; { for here := source; here != dest; {
temp++ temp++
if temp > 16 { panic("Loop?") } if temp > 16 {
next := here.links[here.table.Lookup(destLoc)] panic("Loop?")
if next == here { }
//for idx, link := range here.links { next := here.links[here.table.Lookup(destLoc)]
// fmt.Println("DUMP:", idx, link.nodeID) if next == here {
//} //for idx, link := range here.links {
panic(fmt.Sprintln("Routing Loop:", // fmt.Println("DUMP:", idx, link.nodeID)
source.nodeID, //}
here.nodeID, panic(fmt.Sprintln("Routing Loop:",
dest.nodeID)) source.nodeID,
} here.nodeID,
//fmt.Println("DEBUG:", source.nodeID, here.nodeID, dest.nodeID) dest.nodeID))
here = next }
lookups++ //fmt.Println("DEBUG:", source.nodeID, here.nodeID, dest.nodeID)
} here = next
} lookups++
} }
timed := time.Since(start) }
fmt.Printf("%f lookups per second\n", float64(lookups)/timed.Seconds()) }
timed := time.Since(start)
fmt.Printf("%f lookups per second\n", float64(lookups)/timed.Seconds())
} }
func dumpStore(store map[router.NodeID]*Node) { func dumpStore(store map[router.NodeID]*Node) {
for _, node := range store { for _, node := range store {
fmt.Println("DUMPSTORE:", node.nodeID, node.table.GetLocator()) fmt.Println("DUMPSTORE:", node.nodeID, node.table.GetLocator())
node.table.DEBUG_dumpTable() node.table.DEBUG_dumpTable()
} }
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -169,25 +173,25 @@ func dumpStore(store map[router.NodeID]*Node) {
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
func main() { func main() {
flag.Parse() flag.Parse()
if *cpuprofile != "" { if *cpuprofile != "" {
f, err := os.Create(*cpuprofile) f, err := os.Create(*cpuprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err)) panic(fmt.Sprintf("could not create CPU profile: ", err))
} }
if err := pprof.StartCPUProfile(f); err != nil { if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err)) panic(fmt.Sprintf("could not start CPU profile: ", err))
} }
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
} }
fmt.Println("Test") fmt.Println("Test")
store := makeStoreSquareGrid(4) store := makeStoreSquareGrid(4)
idleUntilConverged(store) idleUntilConverged(store)
dumpStore(store) dumpStore(store)
testPaths(store) testPaths(store)
//panic("DYING") //panic("DYING")
store = loadGraph("hype-2016-09-19.list") store = loadGraph("hype-2016-09-19.list")
idleUntilConverged(store) idleUntilConverged(store)
dumpStore(store) dumpStore(store)
testPaths(store) testPaths(store)
} }

View File

@ -15,347 +15,366 @@ import . "yggdrasil"
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
type Node struct { type Node struct {
index int index int
core Core core Core
send chan<- []byte send chan<- []byte
recv <-chan []byte recv <-chan []byte
} }
func (n *Node) init(index int) { func (n *Node) init(index int) {
n.index = index n.index = index
n.core.Init() n.core.Init()
n.send = n.core.DEBUG_getSend() n.send = n.core.DEBUG_getSend()
n.recv = n.core.DEBUG_getRecv() n.recv = n.core.DEBUG_getRecv()
} }
func (n *Node) printTraffic() { func (n *Node) printTraffic() {
for { for {
packet := <-n.recv packet := <-n.recv
fmt.Println(n.index, packet) fmt.Println(n.index, packet)
//panic("Got a packet") //panic("Got a packet")
} }
} }
func (n *Node) startPeers() { func (n *Node) startPeers() {
//for _, p := range n.core.Peers.Ports { //for _, p := range n.core.Peers.Ports {
// go p.MainLoop() // go p.MainLoop()
//} //}
//go n.printTraffic() //go n.printTraffic()
//n.core.Peers.DEBUG_startPeers() //n.core.Peers.DEBUG_startPeers()
} }
func linkNodes(m, n *Node) { func linkNodes(m, n *Node) {
// Don't allow duplicates // Don't allow duplicates
if m.core.DEBUG_getPeers().DEBUG_hasPeer(n.core.DEBUG_getSigPub()) { return } if m.core.DEBUG_getPeers().DEBUG_hasPeer(n.core.DEBUG_getSigPub()) {
// Create peers return
// Buffering reduces packet loss in the sim }
// This slightly speeds up testing (fewer delays before retrying a ping) // Create peers
p := m.core.DEBUG_getPeers().DEBUG_newPeer(n.core.DEBUG_getBoxPub(), // Buffering reduces packet loss in the sim
n.core.DEBUG_getSigPub()) // This slightly speeds up testing (fewer delays before retrying a ping)
q := n.core.DEBUG_getPeers().DEBUG_newPeer(m.core.DEBUG_getBoxPub(), p := m.core.DEBUG_getPeers().DEBUG_newPeer(n.core.DEBUG_getBoxPub(),
m.core.DEBUG_getSigPub()) n.core.DEBUG_getSigPub())
DEBUG_simLinkPeers(p, q) q := n.core.DEBUG_getPeers().DEBUG_newPeer(m.core.DEBUG_getBoxPub(),
return m.core.DEBUG_getSigPub())
DEBUG_simLinkPeers(p, q)
return
} }
func makeStoreSquareGrid(sideLength int) map[int]*Node { func makeStoreSquareGrid(sideLength int) map[int]*Node {
store := make(map[int]*Node) store := make(map[int]*Node)
nNodes := sideLength*sideLength nNodes := sideLength * sideLength
idxs := make([]int, 0, nNodes) idxs := make([]int, 0, nNodes)
// TODO shuffle nodeIDs // TODO shuffle nodeIDs
for idx := 1 ; idx <= nNodes ; idx++ { for idx := 1; idx <= nNodes; idx++ {
idxs = append(idxs, idx) idxs = append(idxs, idx)
} }
for _, idx := range idxs { for _, idx := range idxs {
node := &Node{} node := &Node{}
node.init(idx) node.init(idx)
store[idx] = node store[idx] = node
} }
for idx := 0 ; idx < nNodes ; idx++ { for idx := 0; idx < nNodes; idx++ {
if (idx % sideLength) != 0 { if (idx % sideLength) != 0 {
linkNodes(store[idxs[idx]], store[idxs[idx-1]]) linkNodes(store[idxs[idx]], store[idxs[idx-1]])
} }
if idx >= sideLength { if idx >= sideLength {
linkNodes(store[idxs[idx]], store[idxs[idx-sideLength]]) linkNodes(store[idxs[idx]], store[idxs[idx-sideLength]])
} }
} }
//for _, node := range store { node.initPorts() } //for _, node := range store { node.initPorts() }
return store return store
} }
func makeStoreStar(nNodes int) map[int]*Node { func makeStoreStar(nNodes int) map[int]*Node {
store := make(map[int]*Node) store := make(map[int]*Node)
center := &Node{} center := &Node{}
center.init(0) center.init(0)
store[0] = center store[0] = center
for idx := 1 ; idx < nNodes ; idx++ { for idx := 1; idx < nNodes; idx++ {
node := &Node{} node := &Node{}
node.init(idx) node.init(idx)
store[idx] = node store[idx] = node
linkNodes(center, node) linkNodes(center, node)
} }
return store return store
} }
func loadGraph(path string) map[int]*Node { func loadGraph(path string) map[int]*Node {
f, err := os.Open(path) f, err := os.Open(path)
if err != nil { panic(err) } if err != nil {
defer f.Close() panic(err)
store := make(map[int]*Node) }
s := bufio.NewScanner(f) defer f.Close()
for s.Scan() { store := make(map[int]*Node)
line := s.Text() s := bufio.NewScanner(f)
nodeIdxstrs := strings.Split(line, " ") for s.Scan() {
nodeIdx0, _ := strconv.Atoi(nodeIdxstrs[0]) line := s.Text()
nodeIdx1, _ := strconv.Atoi(nodeIdxstrs[1]) nodeIdxstrs := strings.Split(line, " ")
if store[nodeIdx0] == nil { nodeIdx0, _ := strconv.Atoi(nodeIdxstrs[0])
node := &Node{} nodeIdx1, _ := strconv.Atoi(nodeIdxstrs[1])
node.init(nodeIdx0) if store[nodeIdx0] == nil {
store[nodeIdx0] = node node := &Node{}
} node.init(nodeIdx0)
if store[nodeIdx1] == nil { store[nodeIdx0] = node
node := &Node{} }
node.init(nodeIdx1) if store[nodeIdx1] == nil {
store[nodeIdx1] = node node := &Node{}
} node.init(nodeIdx1)
linkNodes(store[nodeIdx0], store[nodeIdx1]) store[nodeIdx1] = node
} }
//for _, node := range store { node.initPorts() } linkNodes(store[nodeIdx0], store[nodeIdx1])
return store }
//for _, node := range store { node.initPorts() }
return store
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
func startNetwork(store map[[32]byte]*Node) { func startNetwork(store map[[32]byte]*Node) {
for _, node := range store { for _, node := range store {
node.startPeers() node.startPeers()
} }
} }
func getKeyedStore(store map[int]*Node) map[[32]byte]*Node { func getKeyedStore(store map[int]*Node) map[[32]byte]*Node {
newStore := make(map[[32]byte]*Node) newStore := make(map[[32]byte]*Node)
for _, node := range store { for _, node := range store {
newStore[node.core.DEBUG_getSigPub()] = node newStore[node.core.DEBUG_getSigPub()] = node
} }
return newStore return newStore
} }
func testPaths(store map[[32]byte]*Node) bool { func testPaths(store map[[32]byte]*Node) bool {
nNodes := len(store) nNodes := len(store)
count := 0 count := 0
for _, source := range store { for _, source := range store {
count++ count++
fmt.Printf("Testing paths from node %d / %d (%d)\n", count, nNodes, source.index) fmt.Printf("Testing paths from node %d / %d (%d)\n", count, nNodes, source.index)
for _, dest := range store { for _, dest := range store {
//if source == dest { continue } //if source == dest { continue }
destLoc := dest.core.DEBUG_getLocator() destLoc := dest.core.DEBUG_getLocator()
coords := destLoc.DEBUG_getCoords() coords := destLoc.DEBUG_getCoords()
temp := 0 temp := 0
ttl := ^uint64(0) ttl := ^uint64(0)
oldTTL := ttl oldTTL := ttl
for here := source ; here != dest ; { for here := source; here != dest; {
if ttl == 0 { if ttl == 0 {
fmt.Println("Drop:", source.index, here.index, dest.index, oldTTL) fmt.Println("Drop:", source.index, here.index, dest.index, oldTTL)
return false return false
} }
temp++ temp++
if temp > 4096 { panic("Loop?") } if temp > 4096 {
oldTTL = ttl panic("Loop?")
nextPort, newTTL := here.core.DEBUG_switchLookup(coords, ttl) }
ttl = newTTL oldTTL = ttl
// First check if "here" is accepting packets from the previous node nextPort, newTTL := here.core.DEBUG_switchLookup(coords, ttl)
// TODO explain how this works ttl = newTTL
ports := here.core.DEBUG_getPeers().DEBUG_getPorts() // First check if "here" is accepting packets from the previous node
nextPeer := ports[nextPort] // TODO explain how this works
if nextPeer == nil { ports := here.core.DEBUG_getPeers().DEBUG_getPorts()
fmt.Println("Peer associated with next port is nil") nextPeer := ports[nextPort]
return false if nextPeer == nil {
} fmt.Println("Peer associated with next port is nil")
next := store[nextPeer.DEBUG_getSigKey()] return false
/* }
if next == here { next := store[nextPeer.DEBUG_getSigKey()]
//for idx, link := range here.links { /*
// fmt.Println("DUMP:", idx, link.nodeID) if next == here {
//} //for idx, link := range here.links {
if nextPort != 0 { panic("This should not be") } // fmt.Println("DUMP:", idx, link.nodeID)
fmt.Println("Failed to route:", source.index, here.index, dest.index, oldTTL, ttl) //}
//here.table.DEBUG_dumpTable() if nextPort != 0 { panic("This should not be") }
//fmt.Println("Ports:", here.nodeID, here.ports) fmt.Println("Failed to route:", source.index, here.index, dest.index, oldTTL, ttl)
return false //here.table.DEBUG_dumpTable()
panic(fmt.Sprintln("Routing Loop:", //fmt.Println("Ports:", here.nodeID, here.ports)
source.index, return false
here.index, panic(fmt.Sprintln("Routing Loop:",
dest.index)) source.index,
} here.index,
*/ dest.index))
if temp > 4090 { }
fmt.Println("DEBUG:", */
source.index, source.core.DEBUG_getLocator(), if temp > 4090 {
here.index, here.core.DEBUG_getLocator(), fmt.Println("DEBUG:",
dest.index, dest.core.DEBUG_getLocator()) source.index, source.core.DEBUG_getLocator(),
here.core.DEBUG_getSwitchTable().DEBUG_dumpTable() here.index, here.core.DEBUG_getLocator(),
} dest.index, dest.core.DEBUG_getLocator())
if (here != source) { here.core.DEBUG_getSwitchTable().DEBUG_dumpTable()
// This is sufficient to check for routing loops or blackholes }
//break if here != source {
} // This is sufficient to check for routing loops or blackholes
here = next //break
} }
} here = next
} }
return true }
}
return true
} }
func stressTest(store map[[32]byte]*Node) { func stressTest(store map[[32]byte]*Node) {
fmt.Println("Stress testing network...") fmt.Println("Stress testing network...")
nNodes := len(store) nNodes := len(store)
dests := make([][]byte, 0, nNodes) dests := make([][]byte, 0, nNodes)
for _, dest := range store { for _, dest := range store {
loc := dest.core.DEBUG_getLocator() loc := dest.core.DEBUG_getLocator()
coords := loc.DEBUG_getCoords() coords := loc.DEBUG_getCoords()
dests = append(dests, coords) dests = append(dests, coords)
} }
lookups := 0 lookups := 0
start := time.Now() start := time.Now()
for _, source := range store { for _, source := range store {
for _, coords := range dests { for _, coords := range dests {
source.core.DEBUG_switchLookup(coords, ^uint64(0)) source.core.DEBUG_switchLookup(coords, ^uint64(0))
lookups++ lookups++
} }
} }
timed := time.Since(start) timed := time.Since(start)
fmt.Printf("%d lookups in %s (%f lookups per second)\n", fmt.Printf("%d lookups in %s (%f lookups per second)\n",
lookups, lookups,
timed, timed,
float64(lookups)/timed.Seconds()) float64(lookups)/timed.Seconds())
} }
func pingNodes(store map[[32]byte]*Node) { func pingNodes(store map[[32]byte]*Node) {
fmt.Println("Sending pings...") fmt.Println("Sending pings...")
nNodes := len(store) nNodes := len(store)
count := 0 count := 0
equiv := func (a []byte, b []byte) bool { equiv := func(a []byte, b []byte) bool {
if len(a) != len(b) { return false } if len(a) != len(b) {
for idx := 0 ; idx < len(a) ; idx++ { return false
if a[idx] != b[idx] { return false } }
} for idx := 0; idx < len(a); idx++ {
return true if a[idx] != b[idx] {
} return false
for _, source := range store { }
count++ }
//if count > 16 { break } return true
fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index) }
sourceKey := source.core.DEBUG_getBoxPub() for _, source := range store {
payload := sourceKey[:] count++
sourceAddr := source.core.DEBUG_getAddr()[:] //if count > 16 { break }
sendTo := func (bs []byte, destAddr []byte) { fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index)
packet := make([]byte, 40+len(bs)) sourceKey := source.core.DEBUG_getBoxPub()
copy(packet[8:24], sourceAddr) payload := sourceKey[:]
copy(packet[24:40], destAddr) sourceAddr := source.core.DEBUG_getAddr()[:]
copy(packet[40:], bs) sendTo := func(bs []byte, destAddr []byte) {
source.send<-packet packet := make([]byte, 40+len(bs))
} copy(packet[8:24], sourceAddr)
destCount := 0 copy(packet[24:40], destAddr)
for _, dest := range store { copy(packet[40:], bs)
destCount += 1 source.send <- packet
fmt.Printf("%d Nodes, %d Send, %d Recv\n", nNodes, count, destCount) }
if dest == source { destCount := 0
fmt.Println("Skipping self") for _, dest := range store {
continue destCount += 1
} fmt.Printf("%d Nodes, %d Send, %d Recv\n", nNodes, count, destCount)
destAddr := dest.core.DEBUG_getAddr()[:] if dest == source {
ticker := time.NewTicker(150*time.Millisecond) fmt.Println("Skipping self")
ch := make(chan bool, 1) continue
ch<-true }
doTicker := func () { destAddr := dest.core.DEBUG_getAddr()[:]
for _ = range ticker.C { ticker := time.NewTicker(150 * time.Millisecond)
select { ch := make(chan bool, 1)
case ch<-true: ch <- true
default: doTicker := func() {
} for range ticker.C {
} select {
} case ch <- true:
go doTicker() default:
for loop := true ; loop ; { }
select { }
case packet := <-dest.recv: { }
if equiv(payload, packet[len(packet)-len(payload):]) { go doTicker()
loop = false for loop := true; loop; {
} select {
} case packet := <-dest.recv:
case <-ch: sendTo(payload, destAddr) {
} if equiv(payload, packet[len(packet)-len(payload):]) {
} loop = false
ticker.Stop() }
} }
//break // Only try sending pings from 1 node case <-ch:
// This is because, for some reason, stopTun() doesn't always close it sendTo(payload, destAddr)
// And if two tuns are up, bad things happen (sends via wrong interface) }
} }
fmt.Println("Finished pinging nodes") ticker.Stop()
}
//break // Only try sending pings from 1 node
// This is because, for some reason, stopTun() doesn't always close it
// And if two tuns are up, bad things happen (sends via wrong interface)
}
fmt.Println("Finished pinging nodes")
} }
func pingBench(store map[[32]byte]*Node) { func pingBench(store map[[32]byte]*Node) {
fmt.Println("Benchmarking pings...") fmt.Println("Benchmarking pings...")
nPings := 0 nPings := 0
payload := make([]byte, 1280+40) // MTU + ipv6 header payload := make([]byte, 1280+40) // MTU + ipv6 header
var timed time.Duration var timed time.Duration
//nNodes := len(store) //nNodes := len(store)
count := 0 count := 0
for _, source := range store { for _, source := range store {
count++ count++
//fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index) //fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index)
getPing := func (key [32]byte, decodedCoords []byte) []byte { getPing := func(key [32]byte, decodedCoords []byte) []byte {
// TODO write some function to do this the right way, put... somewhere... // TODO write some function to do this the right way, put... somewhere...
coords := DEBUG_wire_encode_coords(decodedCoords) coords := DEBUG_wire_encode_coords(decodedCoords)
packet := make([]byte, 0, len(key)+len(coords)+len(payload)) packet := make([]byte, 0, len(key)+len(coords)+len(payload))
packet = append(packet, key[:]...) packet = append(packet, key[:]...)
packet = append(packet, coords...) packet = append(packet, coords...)
packet = append(packet, payload[:]...) packet = append(packet, payload[:]...)
return packet return packet
} }
for _, dest := range store { for _, dest := range store {
key := dest.core.DEBUG_getBoxPub() key := dest.core.DEBUG_getBoxPub()
loc := dest.core.DEBUG_getLocator() loc := dest.core.DEBUG_getLocator()
coords := loc.DEBUG_getCoords() coords := loc.DEBUG_getCoords()
ping := getPing(key, coords) ping := getPing(key, coords)
// TODO make sure the session is open first // TODO make sure the session is open first
start := time.Now() start := time.Now()
for i := 0 ; i < 1000000 ; i++{ source.send<-ping ; nPings++ } for i := 0; i < 1000000; i++ {
timed += time.Since(start) source.send <- ping
break nPings++
} }
break timed += time.Since(start)
} break
fmt.Printf("Sent %d pings in %s (%f per second)\n", }
nPings, break
timed, }
float64(nPings)/timed.Seconds()) fmt.Printf("Sent %d pings in %s (%f per second)\n",
nPings,
timed,
float64(nPings)/timed.Seconds())
} }
func dumpStore(store map[NodeID]*Node) { func dumpStore(store map[NodeID]*Node) {
for _, node := range store { for _, node := range store {
fmt.Println("DUMPSTORE:", node.index, node.core.DEBUG_getLocator()) fmt.Println("DUMPSTORE:", node.index, node.core.DEBUG_getLocator())
node.core.DEBUG_getSwitchTable().DEBUG_dumpTable() node.core.DEBUG_getSwitchTable().DEBUG_dumpTable()
} }
} }
func dumpDHTSize(store map[[32]byte]*Node) { func dumpDHTSize(store map[[32]byte]*Node) {
var min, max, sum int var min, max, sum int
for _, node := range store { for _, node := range store {
num := node.core.DEBUG_getDHTSize() num := node.core.DEBUG_getDHTSize()
min = num min = num
max = num max = num
break break
} }
for _, node := range store { for _, node := range store {
num := node.core.DEBUG_getDHTSize() num := node.core.DEBUG_getDHTSize()
if num < min { min = num } if num < min {
if num > max { max = num } min = num
sum += num }
} if num > max {
avg := float64(sum)/float64(len(store)) max = num
fmt.Printf("DHT min %d / avg %f / max %d\n", min, avg, max) }
sum += num
}
avg := float64(sum) / float64(len(store))
fmt.Printf("DHT min %d / avg %f / max %d\n", min, avg, max)
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -364,47 +383,48 @@ var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file") var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main() { func main() {
flag.Parse() flag.Parse()
if *cpuprofile != "" { if *cpuprofile != "" {
f, err := os.Create(*cpuprofile) f, err := os.Create(*cpuprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err)) panic(fmt.Sprintf("could not create CPU profile: ", err))
} }
if err := pprof.StartCPUProfile(f); err != nil { if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err)) panic(fmt.Sprintf("could not start CPU profile: ", err))
} }
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
} }
if *memprofile != "" { if *memprofile != "" {
f, err := os.Create(*memprofile) f, err := os.Create(*memprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err)) panic(fmt.Sprintf("could not create memory profile: ", err))
} }
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }() defer func() { pprof.WriteHeapProfile(f); f.Close() }()
} }
fmt.Println("Test") fmt.Println("Test")
Util_testAddrIDMask() Util_testAddrIDMask()
idxstore := makeStoreSquareGrid(4) idxstore := makeStoreSquareGrid(4)
//idxstore := makeStoreStar(256) //idxstore := makeStoreStar(256)
//idxstore := loadGraph("misc/sim/hype-2016-09-19.list") //idxstore := loadGraph("misc/sim/hype-2016-09-19.list")
//idxstore := loadGraph("misc/sim/fc00-2017-08-12.txt") //idxstore := loadGraph("misc/sim/fc00-2017-08-12.txt")
//idxstore := loadGraph("skitter") //idxstore := loadGraph("skitter")
kstore := getKeyedStore(idxstore) kstore := getKeyedStore(idxstore)
/* /*
for _, n := range kstore { for _, n := range kstore {
log := n.core.DEBUG_getLogger() log := n.core.DEBUG_getLogger()
log.SetOutput(os.Stderr) log.SetOutput(os.Stderr)
} }
*/ */
startNetwork(kstore) startNetwork(kstore)
//time.Sleep(10*time.Second) //time.Sleep(10*time.Second)
// Note that testPaths only works if pressure is turend off // Note that testPaths only works if pressure is turend off
// Otherwise congestion can lead to routing loops? // Otherwise congestion can lead to routing loops?
for finished := false; !finished ; { finished = testPaths(kstore) } for finished := false; !finished; {
pingNodes(kstore) finished = testPaths(kstore)
//pingBench(kstore) // Only after disabling debug output }
//stressTest(kstore) pingNodes(kstore)
//time.Sleep(120*time.Second) //pingBench(kstore) // Only after disabling debug output
dumpDHTSize(kstore) // note that this uses racey functions to read things... //stressTest(kstore)
//time.Sleep(120*time.Second)
dumpDHTSize(kstore) // note that this uses racey functions to read things...
} }

View File

@ -7,16 +7,16 @@ import "runtime"
func main() { func main() {
var ops uint64 = 0 var ops uint64 = 0
for i := 0 ; i < 4 ; i++ { for i := 0; i < 4; i++ {
go func () { go func() {
for { for {
atomic.AddUint64(&ops, 1) atomic.AddUint64(&ops, 1)
runtime.Gosched() runtime.Gosched()
} }
}() }()
} }
time.Sleep(1*time.Second) time.Sleep(1 * time.Second)
opsFinal := atomic.LoadUint64(&ops) opsFinal := atomic.LoadUint64(&ops)
fmt.Println("ops:", opsFinal) fmt.Println("ops:", opsFinal)
} }

View File

@ -4,39 +4,50 @@ import "fmt"
import "net" import "net"
import "time" import "time"
func main () { func main() {
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001") addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
if err != nil { panic(err) } if err != nil {
listener, err := net.ListenTCP("tcp", addr) panic(err)
if err != nil { panic(err) } }
defer listener.Close() listener, err := net.ListenTCP("tcp", addr)
if err != nil {
panic(err)
}
defer listener.Close()
packetSize := 65535 packetSize := 65535
numPackets := 65535 numPackets := 65535
go func () { go func() {
send, err := net.DialTCP("tcp", nil, addr) send, err := net.DialTCP("tcp", nil, addr)
if err != nil { panic(err) } if err != nil {
defer send.Close() panic(err)
msg := make([]byte, packetSize) }
for idx := 0 ; idx < numPackets ; idx++ { send.Write(msg) } defer send.Close()
}() msg := make([]byte, packetSize)
for idx := 0; idx < numPackets; idx++ {
send.Write(msg)
}
}()
start := time.Now() start := time.Now()
//msg := make([]byte, 1280) //msg := make([]byte, 1280)
sock, err := listener.AcceptTCP() sock, err := listener.AcceptTCP()
if err != nil { panic(err) } if err != nil {
defer sock.Close() panic(err)
read := 0 }
buf := make([]byte, packetSize) defer sock.Close()
for { read := 0
n, err := sock.Read(buf) buf := make([]byte, packetSize)
read += n for {
if err != nil { break } n, err := sock.Read(buf)
} read += n
timed := time.Since(start) if err != nil {
break
}
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds()) fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
fmt.Printf("%f bits/sec\n", 8*float64(read)/timed.Seconds()) fmt.Printf("%f bits/sec\n", 8*float64(read)/timed.Seconds())
} }

View File

@ -5,32 +5,32 @@ import "fmt"
import "sync" import "sync"
func main() { func main() {
fmt.Println("Testing speed of recv+send loop") fmt.Println("Testing speed of recv+send loop")
const count = 10000000 const count = 10000000
c := make(chan []byte, 1) c := make(chan []byte, 1)
c<-[]byte{} c <- []byte{}
var wg sync.WaitGroup var wg sync.WaitGroup
worker := func () { worker := func() {
for idx := 0 ; idx < count ; idx++ { for idx := 0; idx < count; idx++ {
p := <-c p := <-c
select { select {
case c<-p: case c <- p:
default: default:
} }
} }
wg.Done() wg.Done()
} }
nIter := 0 nIter := 0
start := time.Now() start := time.Now()
for idx := 0 ; idx < 1 ; idx++ { for idx := 0; idx < 1; idx++ {
go worker() go worker()
nIter += count nIter += count
wg.Add(1) wg.Add(1)
} }
wg.Wait() wg.Wait()
stop := time.Now() stop := time.Now()
timed := stop.Sub(start) timed := stop.Sub(start)
fmt.Printf("%d iterations in %s\n", nIter, timed) fmt.Printf("%d iterations in %s\n", nIter, timed)
fmt.Printf("%f iterations per second\n", float64(nIter)/timed.Seconds()) fmt.Printf("%f iterations per second\n", float64(nIter)/timed.Seconds())
fmt.Printf("%s per iteration\n", timed/time.Duration(nIter)) fmt.Printf("%s per iteration\n", timed/time.Duration(nIter))
} }

View File

@ -6,47 +6,51 @@ import "time"
import "fmt" import "fmt"
type testStruct struct { type testStruct struct {
First uint64 First uint64
Second float64 Second float64
Third []byte Third []byte
} }
func testFunc(tickerDuration time.Duration) { func testFunc(tickerDuration time.Duration) {
chn := make(chan []byte) chn := make(chan []byte)
ticker := time.NewTicker(tickerDuration) ticker := time.NewTicker(tickerDuration)
defer ticker.Stop() defer ticker.Stop()
send := testStruct{First: 1, Second: 2, Third: []byte{3, 4, 5}} send := testStruct{First: 1, Second: 2, Third: []byte{3, 4, 5}}
buf := bytes.NewBuffer(nil) buf := bytes.NewBuffer(nil)
enc := gob.NewEncoder(buf) enc := gob.NewEncoder(buf)
dec := gob.NewDecoder(buf) dec := gob.NewDecoder(buf)
sendCall := func () { sendCall := func() {
err := enc.EncodeValue(&send) err := enc.EncodeValue(&send)
if err != nil { panic(err) } if err != nil {
bs := make([]byte, buf.Len()) panic(err)
buf.Read(bs) }
fmt.Println("send:", bs) bs := make([]byte, buf.Len())
go func() { chn<-bs }() buf.Read(bs)
} fmt.Println("send:", bs)
recvCall := func (bs []byte) { go func() { chn <- bs }()
buf.Write(bs) }
recv := testStruct{} recvCall := func(bs []byte) {
err := dec.DecodeValue(&recv) buf.Write(bs)
fmt.Println("recv:", bs) recv := testStruct{}
if err != nil { panic(err) } err := dec.DecodeValue(&recv)
} fmt.Println("recv:", bs)
for { if err != nil {
select { panic(err)
case bs := <-chn : recvCall(bs) }
case <-ticker.C : sendCall() }
} for {
} select {
case bs := <-chn:
recvCall(bs)
case <-ticker.C:
sendCall()
}
}
} }
func main() { func main() {
go testFunc(100*time.Millisecond) // Does not crash go testFunc(100 * time.Millisecond) // Does not crash
time.Sleep(time.Second) time.Sleep(time.Second)
go testFunc(time.Nanosecond) // Does crash go testFunc(time.Nanosecond) // Does crash
time.Sleep(time.Second) time.Sleep(time.Second)
} }

View File

@ -4,19 +4,19 @@ import "sync"
import "time" import "time"
import "fmt" import "fmt"
func main () { func main() {
const reqs = 1000000 const reqs = 1000000
var wg sync.WaitGroup var wg sync.WaitGroup
start := time.Now() start := time.Now()
for idx := 0 ; idx < reqs ; idx++ { for idx := 0; idx < reqs; idx++ {
wg.Add(1) wg.Add(1)
go func () { wg.Done() } () go func() { wg.Done() }()
} }
wg.Wait() wg.Wait()
stop := time.Now() stop := time.Now()
timed := stop.Sub(start) timed := stop.Sub(start)
fmt.Printf("%d goroutines in %s (%f per second)\n", fmt.Printf("%d goroutines in %s (%f per second)\n",
reqs, reqs,
timed, timed,
reqs/timed.Seconds()) reqs/timed.Seconds())
} }

View File

@ -8,42 +8,50 @@ import "time"
func basic_test() { func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface! // TODO need a way to look up who our link-local neighbors are for each iface!
//addr, err := net.ResolveUDPAddr("udp", "[ff02::1%veth0]:9001") //addr, err := net.ResolveUDPAddr("udp", "[ff02::1%veth0]:9001")
addr, err := net.ResolveUDPAddr("udp", "[ff02::1]:9001") addr, err := net.ResolveUDPAddr("udp", "[ff02::1]:9001")
if err != nil { panic(err) } if err != nil {
sock, err := net.ListenMulticastUDP("udp", nil, addr) panic(err)
if err != nil { panic(err) } }
defer sock.Close() sock, err := net.ListenMulticastUDP("udp", nil, addr)
if err != nil {
panic(err)
}
defer sock.Close()
go func () { go func() {
saddr, err := net.ResolveUDPAddr("udp", "[::]:0") saddr, err := net.ResolveUDPAddr("udp", "[::]:0")
if err != nil { panic(err) } if err != nil {
send, err := net.ListenUDP("udp", saddr) panic(err)
if err != nil { panic(err) } }
defer send.Close() send, err := net.ListenUDP("udp", saddr)
msg := make([]byte, 1280) if err != nil {
for { panic(err)
//fmt.Println("Sending...") }
send.WriteTo(msg, addr) defer send.Close()
} msg := make([]byte, 1280)
}() for {
//fmt.Println("Sending...")
send.WriteTo(msg, addr)
}
}()
numPackets := 1000 numPackets := 1000
start := time.Now() start := time.Now()
msg := make([]byte, 2000) msg := make([]byte, 2000)
for i := 0 ; i < numPackets ; i++ { for i := 0; i < numPackets; i++ {
//fmt.Println("Reading:", i) //fmt.Println("Reading:", i)
sock.ReadFromUDP(msg) sock.ReadFromUDP(msg)
} }
timed := time.Since(start) timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds()) fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
} }
func main () { func main() {
basic_test() basic_test()
} }

View File

@ -4,75 +4,89 @@ import "fmt"
import "net" import "net"
import "time" import "time"
// TODO look into netmap + libpcap to bypass the kernel as much as possible // TODO look into netmap + libpcap to bypass the kernel as much as possible
func basic_test() { func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface! // TODO need a way to look up who our link-local neighbors are for each iface!
var ip *net.IP var ip *net.IP
ifaces, err := net.Interfaces() ifaces, err := net.Interfaces()
if err != nil { panic(err) } if err != nil {
var zone string panic(err)
for _, iface := range ifaces { }
addrs, err := iface.Addrs() var zone string
if err != nil { panic(err) } for _, iface := range ifaces {
for _, addr := range addrs { addrs, err := iface.Addrs()
addrIP, _, _ := net.ParseCIDR(addr.String()) if err != nil {
if addrIP.To4() != nil { continue } // IPv6 only panic(err)
if !addrIP.IsLinkLocalUnicast() { continue } }
zone = iface.Name for _, addr := range addrs {
ip = &addrIP addrIP, _, _ := net.ParseCIDR(addr.String())
} if addrIP.To4() != nil {
addrs, err = iface.MulticastAddrs() continue
if err != nil { panic(err) } } // IPv6 only
for _, addr := range addrs { if !addrIP.IsLinkLocalUnicast() {
fmt.Println(addr.String()) continue
} }
} zone = iface.Name
if ip == nil { panic("No link-local IPv6 found") } ip = &addrIP
fmt.Println("Using address:", *ip) }
addrs, err = iface.MulticastAddrs()
if err != nil {
panic(err)
}
for _, addr := range addrs {
fmt.Println(addr.String())
}
}
if ip == nil {
panic("No link-local IPv6 found")
}
fmt.Println("Using address:", *ip)
addr := net.UDPAddr{IP: *ip, Port: 9001, Zone: zone} addr := net.UDPAddr{IP: *ip, Port: 9001, Zone: zone}
saddr := net.UDPAddr{IP: *ip, Port: 9002, Zone: zone} saddr := net.UDPAddr{IP: *ip, Port: 9002, Zone: zone}
send, err := net.ListenUDP("udp", &saddr) send, err := net.ListenUDP("udp", &saddr)
defer send.Close() defer send.Close()
if err != nil { panic(err) } if err != nil {
sock, err := net.ListenUDP("udp", &addr) panic(err)
defer sock.Close() }
if err != nil { panic(err) } sock, err := net.ListenUDP("udp", &addr)
defer sock.Close()
if err != nil {
panic(err)
}
const buffSize = 1048576*100 const buffSize = 1048576 * 100
send.SetWriteBuffer(buffSize) send.SetWriteBuffer(buffSize)
sock.SetReadBuffer(buffSize) sock.SetReadBuffer(buffSize)
sock.SetWriteBuffer(buffSize) sock.SetWriteBuffer(buffSize)
go func() {
msg := make([]byte, 1280)
for {
send.WriteTo(msg, &addr)
}
}()
go func () { numPackets := 100000
msg := make([]byte, 1280) start := time.Now()
for { msg := make([]byte, 2000)
send.WriteTo(msg, &addr) for i := 0; i < numPackets; i++ {
} _, addr, _ := sock.ReadFrom(msg)
}() sock.WriteTo(msg, addr)
}
timed := time.Since(start)
numPackets := 100000 fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
start := time.Now()
msg := make([]byte, 2000)
for i := 0 ; i < numPackets ; i++ {
_, addr, _ := sock.ReadFrom(msg)
sock.WriteTo(msg, addr)
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
} }
func main () { func main() {
basic_test() basic_test()
} }

View File

@ -1,83 +1,89 @@
package main package main
import "fmt" import "fmt"
//import "net" //import "net"
import "time" import "time"
import "runtime" import "runtime"
import "sync/atomic" import "sync/atomic"
func poolbench() { func poolbench() {
nWorkers := runtime.GOMAXPROCS(0) nWorkers := runtime.GOMAXPROCS(0)
work := make(chan func(), 1) work := make(chan func(), 1)
workers := make(chan chan<- func(), nWorkers) workers := make(chan chan<- func(), nWorkers)
makeWorker := func() chan<- func() { makeWorker := func() chan<- func() {
ch := make(chan func()) ch := make(chan func())
go func() { go func() {
for { for {
f := <-ch f := <-ch
f() f()
select { select {
case workers<-(ch): case workers <- (ch):
default: return default:
} return
} }
}() }
return ch }()
} return ch
getWorker := func() chan<- func() { }
select { getWorker := func() chan<- func() {
case ch := <-workers: return ch select {
default: return makeWorker() case ch := <-workers:
} return ch
} default:
dispatcher := func() { return makeWorker()
for { }
w := <-work }
ch := getWorker() dispatcher := func() {
ch<-w for {
} w := <-work
} ch := getWorker()
go dispatcher() ch <- w
var count uint64 }
const nCounts = 1000000 }
for idx := 0 ; idx < nCounts ; idx++ { go dispatcher()
f := func() { atomic.AddUint64(&count, 1) } var count uint64
work <- f const nCounts = 1000000
} for idx := 0; idx < nCounts; idx++ {
for atomic.LoadUint64(&count) < nCounts {} f := func() { atomic.AddUint64(&count, 1) }
work <- f
}
for atomic.LoadUint64(&count) < nCounts {
}
} }
func normalbench() { func normalbench() {
var count uint64 var count uint64
const nCounts = 1000000 const nCounts = 1000000
ch := make(chan struct{}, 1) ch := make(chan struct{}, 1)
ch<-struct{}{} ch <- struct{}{}
for idx := 0 ; idx < nCounts ; idx++ { for idx := 0; idx < nCounts; idx++ {
f := func() { atomic.AddUint64(&count, 1) } f := func() { atomic.AddUint64(&count, 1) }
f() f()
<-ch <-ch
ch<-struct{}{} ch <- struct{}{}
} }
} }
func gobench() { func gobench() {
var count uint64 var count uint64
const nCounts = 1000000 const nCounts = 1000000
for idx := 0 ; idx < nCounts ; idx++ { for idx := 0; idx < nCounts; idx++ {
f := func() { atomic.AddUint64(&count, 1) } f := func() { atomic.AddUint64(&count, 1) }
go f() go f()
} }
for atomic.LoadUint64(&count) < nCounts {} for atomic.LoadUint64(&count) < nCounts {
}
} }
func main() { func main() {
start := time.Now() start := time.Now()
poolbench() poolbench()
fmt.Println(time.Since(start)) fmt.Println(time.Since(start))
start = time.Now() start = time.Now()
normalbench() normalbench()
fmt.Println(time.Since(start)) fmt.Println(time.Since(start))
start = time.Now() start = time.Now()
gobench() gobench()
fmt.Println(time.Since(start)) fmt.Println(time.Since(start))
} }

View File

@ -1,64 +1,76 @@
package main package main
import ( import (
"fmt" "bytes"
"time" "crypto/rand"
"bytes" "crypto/rsa"
"sync" "crypto/tls"
"crypto/rand" "crypto/x509"
"crypto/rsa" "encoding/pem"
"crypto/tls" "fmt"
"crypto/x509" quic "github.com/lucas-clemente/quic-go"
"encoding/pem" "math/big"
"math/big" "sync"
quic "github.com/lucas-clemente/quic-go" "time"
) )
const addr = "[::1]:9001" const addr = "[::1]:9001"
func main () { func main() {
go run_server() go run_server()
run_client() run_client()
} }
func run_server() { func run_server() {
listener, err := quic.ListenAddr(addr, generateTLSConfig(), nil) listener, err := quic.ListenAddr(addr, generateTLSConfig(), nil)
if err != nil { panic(err) } if err != nil {
ses, err := listener.Accept() panic(err)
if err != nil { panic(err) } }
for { ses, err := listener.Accept()
stream, err := ses.AcceptStream() if err != nil {
if err != nil { panic(err) } panic(err)
go func() { }
defer stream.Close() for {
bs := bytes.Buffer{} stream, err := ses.AcceptStream()
_, err := bs.ReadFrom(stream) if err != nil {
if err != nil { panic(err) } //<-- TooManyOpenStreams panic(err)
}() }
} go func() {
defer stream.Close()
bs := bytes.Buffer{}
_, err := bs.ReadFrom(stream)
if err != nil {
panic(err)
} //<-- TooManyOpenStreams
}()
}
} }
func run_client() { func run_client() {
msgSize := 1048576 msgSize := 1048576
msgCount := 128 msgCount := 128
ses, err := quic.DialAddr(addr, &tls.Config{InsecureSkipVerify: true}, nil) ses, err := quic.DialAddr(addr, &tls.Config{InsecureSkipVerify: true}, nil)
if err != nil { panic(err) } if err != nil {
bs := make([]byte, msgSize) panic(err)
wg := sync.WaitGroup{} }
start := time.Now() bs := make([]byte, msgSize)
for idx := 0 ; idx < msgCount ; idx++ { wg := sync.WaitGroup{}
wg.Add(1) start := time.Now()
go func() { for idx := 0; idx < msgCount; idx++ {
defer wg.Done() wg.Add(1)
stream, err := ses.OpenStreamSync() go func() {
if err != nil { panic(err) } defer wg.Done()
defer stream.Close() stream, err := ses.OpenStreamSync()
stream.Write(bs) if err != nil {
}() // "go" this later panic(err)
} }
wg.Wait() defer stream.Close()
timed := time.Since(start) stream.Write(bs)
fmt.Println("Client finished", timed, fmt.Sprintf("%f Bits/sec", 8*float64(msgSize*msgCount)/timed.Seconds())) }() // "go" this later
}
wg.Wait()
timed := time.Since(start)
fmt.Println("Client finished", timed, fmt.Sprintf("%f Bits/sec", 8*float64(msgSize*msgCount)/timed.Seconds()))
} }
// Setup a bare-bones TLS config for the server // Setup a bare-bones TLS config for the server
@ -81,4 +93,3 @@ func generateTLSConfig() *tls.Config {
} }
return &tls.Config{Certificates: []tls.Certificate{tlsCert}} return &tls.Config{Certificates: []tls.Certificate{tlsCert}}
} }

View File

@ -11,59 +11,64 @@ import "runtime/pprof"
func basic_test() { func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface! // TODO need a way to look up who our link-local neighbors are for each iface!
addr, err := net.ResolveUDPAddr("udp", "[::1]:9001") addr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
if err != nil { panic(err) } if err != nil {
sock, err := net.ListenUDP("udp", addr) panic(err)
if err != nil { panic(err) } }
defer sock.Close() sock, err := net.ListenUDP("udp", addr)
if err != nil {
panic(err)
}
defer sock.Close()
go func () { go func() {
send, err := net.DialUDP("udp", nil, addr) send, err := net.DialUDP("udp", nil, addr)
if err != nil { panic(err) } if err != nil {
defer send.Close() panic(err)
msg := make([]byte, 1280) }
for { defer send.Close()
send.Write(msg) msg := make([]byte, 1280)
} for {
}() send.Write(msg)
}
}()
numPackets := 1000000 numPackets := 1000000
start := time.Now() start := time.Now()
msg := make([]byte, 2000) msg := make([]byte, 2000)
for i := 0 ; i < numPackets ; i++ { for i := 0; i < numPackets; i++ {
sock.ReadFrom(msg) sock.ReadFrom(msg)
} }
timed := time.Since(start) timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds()) fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
} }
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file") var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main () { func main() {
flag.Parse() flag.Parse()
if *cpuprofile != "" { if *cpuprofile != "" {
f, err := os.Create(*cpuprofile) f, err := os.Create(*cpuprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err)) panic(fmt.Sprintf("could not create CPU profile: ", err))
} }
if err := pprof.StartCPUProfile(f); err != nil { if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err)) panic(fmt.Sprintf("could not start CPU profile: ", err))
} }
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
} }
if *memprofile != "" { if *memprofile != "" {
f, err := os.Create(*memprofile) f, err := os.Create(*memprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err)) panic(fmt.Sprintf("could not create memory profile: ", err))
} }
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }() defer func() { pprof.WriteHeapProfile(f); f.Close() }()
} }
basic_test() basic_test()
} }

View File

@ -11,67 +11,74 @@ import "runtime/pprof"
func basic_test() { func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface! // TODO need a way to look up who our link-local neighbors are for each iface!
addr, err := net.ResolveUDPAddr("udp", "[::1]:9001") addr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
if err != nil { panic(err) } if err != nil {
sock, err := net.ListenUDP("udp", addr) panic(err)
if err != nil { panic(err) } }
defer sock.Close() sock, err := net.ListenUDP("udp", addr)
if err != nil {
panic(err)
}
defer sock.Close()
go func () { go func() {
send, err := net.DialUDP("udp", nil, addr) send, err := net.DialUDP("udp", nil, addr)
if err != nil { panic(err) } if err != nil {
defer send.Close() panic(err)
msg := make([]byte, 1280) }
bss := make(net.Buffers, 0, 1024) defer send.Close()
for { msg := make([]byte, 1280)
for len(bss) < 1024 { bss := make(net.Buffers, 0, 1024)
bss = append(bss, msg) for {
} for len(bss) < 1024 {
bss.WriteTo(send) bss = append(bss, msg)
//bss = bss[:0] }
//send.Write(msg) bss.WriteTo(send)
} //bss = bss[:0]
}() //send.Write(msg)
}
}()
numPackets := 1000 numPackets := 1000
start := time.Now() start := time.Now()
msg := make([]byte, 2000) msg := make([]byte, 2000)
for i := 0 ; i < numPackets ; i++ { for i := 0; i < numPackets; i++ {
n, err := sock.Read(msg) n, err := sock.Read(msg)
if err != nil { panic(err) } if err != nil {
fmt.Println(n) panic(err)
} }
timed := time.Since(start) fmt.Println(n)
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds()) fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
} }
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file") var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main () { func main() {
flag.Parse() flag.Parse()
if *cpuprofile != "" { if *cpuprofile != "" {
f, err := os.Create(*cpuprofile) f, err := os.Create(*cpuprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err)) panic(fmt.Sprintf("could not create CPU profile: ", err))
} }
if err := pprof.StartCPUProfile(f); err != nil { if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err)) panic(fmt.Sprintf("could not start CPU profile: ", err))
} }
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
} }
if *memprofile != "" { if *memprofile != "" {
f, err := os.Create(*memprofile) f, err := os.Create(*memprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err)) panic(fmt.Sprintf("could not create memory profile: ", err))
} }
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }() defer func() { pprof.WriteHeapProfile(f); f.Close() }()
} }
basic_test() basic_test()
} }

View File

@ -11,89 +11,106 @@ import "time"
func basic_test() { func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface! // TODO need a way to look up who our link-local neighbors are for each iface!
var ip *net.IP var ip *net.IP
ifaces, err := net.Interfaces() ifaces, err := net.Interfaces()
if err != nil { panic(err) } if err != nil {
var zone string panic(err)
for _, iface := range ifaces { }
addrs, err := iface.Addrs() var zone string
if err != nil { panic(err) } for _, iface := range ifaces {
for _, addr := range addrs { addrs, err := iface.Addrs()
addrIP, _, _ := net.ParseCIDR(addr.String()) if err != nil {
if addrIP.To4() != nil { continue } // IPv6 only panic(err)
if !addrIP.IsLinkLocalUnicast() { continue } }
fmt.Println(iface.Name, addrIP) for _, addr := range addrs {
zone = iface.Name addrIP, _, _ := net.ParseCIDR(addr.String())
ip = &addrIP if addrIP.To4() != nil {
} continue
if ip != nil { break } } // IPv6 only
/* if !addrIP.IsLinkLocalUnicast() {
addrs, err = iface.MulticastAddrs() continue
if err != nil { panic(err) } }
for _, addr := range addrs { fmt.Println(iface.Name, addrIP)
fmt.Println(addr.String()) zone = iface.Name
} ip = &addrIP
*/ }
} if ip != nil {
if ip == nil { panic("No link-local IPv6 found") } break
fmt.Println("Using address:", *ip) }
addr := net.UDPAddr{IP: *ip, Port: 9001, Zone: zone} /*
addrs, err = iface.MulticastAddrs()
if err != nil { panic(err) }
for _, addr := range addrs {
fmt.Println(addr.String())
}
*/
}
if ip == nil {
panic("No link-local IPv6 found")
}
fmt.Println("Using address:", *ip)
addr := net.UDPAddr{IP: *ip, Port: 9001, Zone: zone}
laddr, err := net.ResolveUDPAddr("udp", "[::]:9001") laddr, err := net.ResolveUDPAddr("udp", "[::]:9001")
if err != nil { panic(err) } if err != nil {
sock, err := net.ListenUDP("udp", laddr) panic(err)
if err != nil { panic(err) } }
defer sock.Close() sock, err := net.ListenUDP("udp", laddr)
if err != nil {
panic(err)
}
defer sock.Close()
go func () { go func() {
send, err := net.DialUDP("udp", nil, &addr) send, err := net.DialUDP("udp", nil, &addr)
//send, err := net.ListenUDP("udp", nil) //send, err := net.ListenUDP("udp", nil)
if err != nil { panic(err) } if err != nil {
defer send.Close() panic(err)
msg := make([]byte, 1280) }
for { defer send.Close()
send.Write(msg) msg := make([]byte, 1280)
//send.WriteToUDP(msg, &addr) for {
} send.Write(msg)
}() //send.WriteToUDP(msg, &addr)
}
}()
numPackets := 1000000 numPackets := 1000000
start := time.Now() start := time.Now()
msg := make([]byte, 2000) msg := make([]byte, 2000)
for i := 0 ; i < numPackets ; i++ { for i := 0; i < numPackets; i++ {
sock.ReadFromUDP(msg) sock.ReadFromUDP(msg)
} }
timed := time.Since(start) timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds()) fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
} }
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file") var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main () { func main() {
flag.Parse() flag.Parse()
if *cpuprofile != "" { if *cpuprofile != "" {
f, err := os.Create(*cpuprofile) f, err := os.Create(*cpuprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err)) panic(fmt.Sprintf("could not create CPU profile: ", err))
} }
if err := pprof.StartCPUProfile(f); err != nil { if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err)) panic(fmt.Sprintf("could not start CPU profile: ", err))
} }
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
} }
if *memprofile != "" { if *memprofile != "" {
f, err := os.Create(*memprofile) f, err := os.Create(*memprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err)) panic(fmt.Sprintf("could not create memory profile: ", err))
} }
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }() defer func() { pprof.WriteHeapProfile(f); f.Close() }()
} }
basic_test() basic_test()
} }

View File

@ -13,82 +13,91 @@ const buffSize = 32
func basic_test() { func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface! // TODO need a way to look up who our link-local neighbors are for each iface!
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001") addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
if err != nil { panic(err) } if err != nil {
listener, err := net.ListenTCP("tcp", addr) panic(err)
if err != nil { panic(err) } }
defer listener.Close() listener, err := net.ListenTCP("tcp", addr)
if err != nil {
panic(err)
}
defer listener.Close()
go func () { go func() {
send, err := net.DialTCP("tcp", nil, addr) send, err := net.DialTCP("tcp", nil, addr)
if err != nil { panic(err) } if err != nil {
defer send.Close() panic(err)
msg := make([]byte, 1280) }
bss := make(net.Buffers, 0, 1024) defer send.Close()
for { msg := make([]byte, 1280)
for len(bss) < 1 { //buffSize { bss := make(net.Buffers, 0, 1024)
bss = append(bss, msg) for {
} for len(bss) < 1 { //buffSize {
bss := net.Buffers{[]byte{0,1,2,3}, []byte{0,1}, msg} bss = append(bss, msg)
bss.WriteTo(send) }
//send.Write(msg) bss := net.Buffers{[]byte{0, 1, 2, 3}, []byte{0, 1}, msg}
} bss.WriteTo(send)
}() //send.Write(msg)
}
}()
numPackets := 1000000 numPackets := 1000000
start := time.Now() start := time.Now()
//msg := make([]byte, 1280) //msg := make([]byte, 1280)
sock, err := listener.AcceptTCP() sock, err := listener.AcceptTCP()
if err != nil { panic(err) } if err != nil {
defer sock.Close() panic(err)
for i := 0 ; i < numPackets ; i++ { }
msg := make([]byte, 1280*buffSize) defer sock.Close()
n, err := sock.Read(msg) for i := 0; i < numPackets; i++ {
if err != nil { panic(err) } msg := make([]byte, 1280*buffSize)
msg = msg[:n] n, err := sock.Read(msg)
for len(msg) > 1286 { if err != nil {
// handle message panic(err)
i++ }
msg = msg[1286:] msg = msg[:n]
} for len(msg) > 1286 {
// handle remaining fragment of message // handle message
//fmt.Println(n) i++
} msg = msg[1286:]
timed := time.Since(start) }
// handle remaining fragment of message
//fmt.Println(n)
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds()) fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
_ = func (in (chan<- int)) { _ = func(in chan<- int) {
close(in) close(in)
} }
} }
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file") var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main () { func main() {
flag.Parse() flag.Parse()
if *cpuprofile != "" { if *cpuprofile != "" {
f, err := os.Create(*cpuprofile) f, err := os.Create(*cpuprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err)) panic(fmt.Sprintf("could not create CPU profile: ", err))
} }
if err := pprof.StartCPUProfile(f); err != nil { if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err)) panic(fmt.Sprintf("could not start CPU profile: ", err))
} }
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
} }
if *memprofile != "" { if *memprofile != "" {
f, err := os.Create(*memprofile) f, err := os.Create(*memprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err)) panic(fmt.Sprintf("could not create memory profile: ", err))
} }
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }() defer func() { pprof.WriteHeapProfile(f); f.Close() }()
} }
basic_test() basic_test()
} }

View File

@ -11,62 +11,67 @@ import "runtime/pprof"
func basic_test() { func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface! // TODO need a way to look up who our link-local neighbors are for each iface!
addr, err := net.ResolveUDPAddr("udp", "[::1]:0") addr, err := net.ResolveUDPAddr("udp", "[::1]:0")
if err != nil { panic(err) } if err != nil {
sock, err := net.ListenUDP("udp", addr) panic(err)
if err != nil { panic(err) } }
defer sock.Close() sock, err := net.ListenUDP("udp", addr)
if err != nil {
panic(err)
}
defer sock.Close()
go func () { go func() {
raddr := sock.LocalAddr().(*net.UDPAddr) raddr := sock.LocalAddr().(*net.UDPAddr)
send, err := net.DialUDP("udp", nil, raddr) send, err := net.DialUDP("udp", nil, raddr)
//send, err := net.ListenUDP("udp", addr) //send, err := net.ListenUDP("udp", addr)
if err != nil { panic(err) } if err != nil {
defer send.Close() panic(err)
msg := make([]byte, 1280) }
for { defer send.Close()
send.Write(msg) msg := make([]byte, 1280)
//send.WriteToUDP(msg, raddr) for {
} send.Write(msg)
}() //send.WriteToUDP(msg, raddr)
}
}()
numPackets := 1000000 numPackets := 1000000
start := time.Now() start := time.Now()
msg := make([]byte, 2000) msg := make([]byte, 2000)
for i := 0 ; i < numPackets ; i++ { for i := 0; i < numPackets; i++ {
sock.ReadFromUDP(msg) sock.ReadFromUDP(msg)
} }
timed := time.Since(start) timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds()) fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
} }
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file") var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main () { func main() {
flag.Parse() flag.Parse()
if *cpuprofile != "" { if *cpuprofile != "" {
f, err := os.Create(*cpuprofile) f, err := os.Create(*cpuprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err)) panic(fmt.Sprintf("could not create CPU profile: ", err))
} }
if err := pprof.StartCPUProfile(f); err != nil { if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err)) panic(fmt.Sprintf("could not start CPU profile: ", err))
} }
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
} }
if *memprofile != "" { if *memprofile != "" {
f, err := os.Create(*memprofile) f, err := os.Create(*memprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err)) panic(fmt.Sprintf("could not create memory profile: ", err))
} }
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }() defer func() { pprof.WriteHeapProfile(f); f.Close() }()
} }
basic_test() basic_test()
} }

View File

@ -11,62 +11,69 @@ import "runtime/pprof"
func basic_test() { func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface! // TODO need a way to look up who our link-local neighbors are for each iface!
saddr, err := net.ResolveUDPAddr("udp", "[::1]:9001") saddr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
if err != nil { panic(err) } if err != nil {
raddr, err := net.ResolveUDPAddr("udp", "[::1]:9002") panic(err)
if err != nil { panic(err) } }
raddr, err := net.ResolveUDPAddr("udp", "[::1]:9002")
if err != nil {
panic(err)
}
send, err := net.DialUDP("udp", saddr, raddr) send, err := net.DialUDP("udp", saddr, raddr)
if err != nil { panic(err) } if err != nil {
defer send.Close() panic(err)
}
defer send.Close()
recv, err := net.DialUDP("udp", raddr, saddr) recv, err := net.DialUDP("udp", raddr, saddr)
if err != nil { panic(err) } if err != nil {
defer recv.Close() panic(err)
}
defer recv.Close()
go func () { go func() {
msg := make([]byte, 1280) msg := make([]byte, 1280)
for { for {
send.Write(msg) send.Write(msg)
} }
}() }()
numPackets := 1000000 numPackets := 1000000
start := time.Now() start := time.Now()
msg := make([]byte, 2000) msg := make([]byte, 2000)
for i := 0 ; i < numPackets ; i++ { for i := 0; i < numPackets; i++ {
recv.Read(msg) recv.Read(msg)
} }
timed := time.Since(start) timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds()) fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
} }
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file") var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main () { func main() {
flag.Parse() flag.Parse()
if *cpuprofile != "" { if *cpuprofile != "" {
f, err := os.Create(*cpuprofile) f, err := os.Create(*cpuprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err)) panic(fmt.Sprintf("could not create CPU profile: ", err))
} }
if err := pprof.StartCPUProfile(f); err != nil { if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err)) panic(fmt.Sprintf("could not start CPU profile: ", err))
} }
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
} }
if *memprofile != "" { if *memprofile != "" {
f, err := os.Create(*memprofile) f, err := os.Create(*memprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err)) panic(fmt.Sprintf("could not create memory profile: ", err))
} }
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }() defer func() { pprof.WriteHeapProfile(f); f.Close() }()
} }
basic_test() basic_test()
} }

View File

@ -11,78 +11,82 @@ import "runtime/pprof"
func basic_test() { func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface! // TODO need a way to look up who our link-local neighbors are for each iface!
sock, err := net.ListenUDP("udp", nil) sock, err := net.ListenUDP("udp", nil)
if err != nil { panic(err) } if err != nil {
defer sock.Close() panic(err)
}
defer sock.Close()
ch := make(chan []byte, 1) ch := make(chan []byte, 1)
writer := func () { writer := func() {
raddr := sock.LocalAddr().(*net.UDPAddr) raddr := sock.LocalAddr().(*net.UDPAddr)
//send, err := net.ListenUDP("udp", nil) //send, err := net.ListenUDP("udp", nil)
//if err != nil { panic(err) } //if err != nil { panic(err) }
//defer send.Close() //defer send.Close()
for { for {
select { select {
case <-ch: case <-ch:
default: default:
} }
msg := make([]byte, 1280) msg := make([]byte, 1280)
sock.WriteToUDP(msg, raddr) sock.WriteToUDP(msg, raddr)
//send.WriteToUDP(msg, raddr) //send.WriteToUDP(msg, raddr)
} }
} }
go writer() go writer()
//go writer() //go writer()
//go writer() //go writer()
//go writer() //go writer()
numPackets := 65536 numPackets := 65536
size := 0 size := 0
start := time.Now() start := time.Now()
success := 0 success := 0
for i := 0 ; i < numPackets ; i++ { for i := 0; i < numPackets; i++ {
msg := make([]byte, 2048) msg := make([]byte, 2048)
n, _, err := sock.ReadFromUDP(msg) n, _, err := sock.ReadFromUDP(msg)
if err != nil { panic(err) } if err != nil {
size += n panic(err)
select { }
case ch <- msg: success += 1 size += n
default: select {
} case ch <- msg:
} success += 1
timed := time.Since(start) default:
}
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds()) fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
fmt.Printf("%f bits per second\n", 8*float64(size)/timed.Seconds()) fmt.Printf("%f bits per second\n", 8*float64(size)/timed.Seconds())
fmt.Println("Success:", success, "/", numPackets) fmt.Println("Success:", success, "/", numPackets)
} }
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file") var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main () { func main() {
flag.Parse() flag.Parse()
if *cpuprofile != "" { if *cpuprofile != "" {
f, err := os.Create(*cpuprofile) f, err := os.Create(*cpuprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err)) panic(fmt.Sprintf("could not create CPU profile: ", err))
} }
if err := pprof.StartCPUProfile(f); err != nil { if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err)) panic(fmt.Sprintf("could not start CPU profile: ", err))
} }
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
} }
if *memprofile != "" { if *memprofile != "" {
f, err := os.Create(*memprofile) f, err := os.Create(*memprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err)) panic(fmt.Sprintf("could not create memory profile: ", err))
} }
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }() defer func() { pprof.WriteHeapProfile(f); f.Close() }()
} }
basic_test() basic_test()
} }

View File

@ -13,105 +13,112 @@ import "golang.org/x/net/ipv6"
func basic_test() { func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface! // TODO need a way to look up who our link-local neighbors are for each iface!
udpAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0") udpAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
if err != nil { panic(err) } if err != nil {
sock, err := net.ListenUDP("udp", udpAddr) panic(err)
if err != nil { panic(err) } }
defer sock.Close() sock, err := net.ListenUDP("udp", udpAddr)
if err != nil {
panic(err)
}
defer sock.Close()
writer := func () { writer := func() {
raddr := sock.LocalAddr().(*net.UDPAddr) raddr := sock.LocalAddr().(*net.UDPAddr)
send, err := net.ListenUDP("udp", nil) send, err := net.ListenUDP("udp", nil)
if err != nil { panic(err) } if err != nil {
defer send.Close() panic(err)
conn := ipv6.NewPacketConn(send) }
defer conn.Close() defer send.Close()
var msgs []ipv6.Message conn := ipv6.NewPacketConn(send)
for idx := 0 ; idx < 1024 ; idx++ { defer conn.Close()
msg := ipv6.Message{Addr: raddr, Buffers: [][]byte{make([]byte, 1280)}} var msgs []ipv6.Message
msgs = append(msgs, msg) for idx := 0; idx < 1024; idx++ {
} msg := ipv6.Message{Addr: raddr, Buffers: [][]byte{make([]byte, 1280)}}
for { msgs = append(msgs, msg)
/* }
var msgs []ipv6.Message for {
for idx := 0 ; idx < 1024 ; idx++ { /*
msg := ipv6.Message{Addr: raddr, Buffers: [][]byte{make([]byte, 1280)}} var msgs []ipv6.Message
msgs = append(msgs, msg) for idx := 0 ; idx < 1024 ; idx++ {
} msg := ipv6.Message{Addr: raddr, Buffers: [][]byte{make([]byte, 1280)}}
*/ msgs = append(msgs, msg)
conn.WriteBatch(msgs, 0) }
} */
conn.WriteBatch(msgs, 0)
}
} }
go writer() go writer()
//go writer() //go writer()
//go writer() //go writer()
//go writer() //go writer()
numPackets := 65536 numPackets := 65536
size := 0 size := 0
count := 0 count := 0
start := time.Now() start := time.Now()
/* /*
conn := ipv6.NewPacketConn(sock) conn := ipv6.NewPacketConn(sock)
defer conn.Close() defer conn.Close()
for ; count < numPackets ; count++ { for ; count < numPackets ; count++ {
msgs := make([]ipv6.Message, 1024) msgs := make([]ipv6.Message, 1024)
for _, msg := range msgs { for _, msg := range msgs {
msg.Buffers = append(msg.Buffers, make([]byte, 2048)) msg.Buffers = append(msg.Buffers, make([]byte, 2048))
} }
n, err := conn.ReadBatch(msgs, 0) n, err := conn.ReadBatch(msgs, 0)
if err != nil { panic(err) } if err != nil { panic(err) }
fmt.Println("DEBUG: n", n) fmt.Println("DEBUG: n", n)
for _, msg := range msgs[:n] { for _, msg := range msgs[:n] {
fmt.Println("DEBUG: msg", msg) fmt.Println("DEBUG: msg", msg)
size += msg.N size += msg.N
//for _, bs := range msg.Buffers { //for _, bs := range msg.Buffers {
// size += len(bs) // size += len(bs)
//} //}
count++ count++
} }
} }
//*/ //*/
//* //*
for ; count < numPackets ; count++ { for ; count < numPackets; count++ {
msg := make([]byte, 2048) msg := make([]byte, 2048)
n, _, err := sock.ReadFromUDP(msg) n, _, err := sock.ReadFromUDP(msg)
if err != nil { panic(err) } if err != nil {
size += n panic(err)
} }
//*/ size += n
timed := time.Since(start) }
//*/
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(count)/timed.Seconds()) fmt.Printf("%f packets per second\n", float64(count)/timed.Seconds())
fmt.Printf("%f bits/second\n", float64(8*size)/timed.Seconds()) fmt.Printf("%f bits/second\n", float64(8*size)/timed.Seconds())
} }
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file") var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main () { func main() {
flag.Parse() flag.Parse()
if *cpuprofile != "" { if *cpuprofile != "" {
f, err := os.Create(*cpuprofile) f, err := os.Create(*cpuprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err)) panic(fmt.Sprintf("could not create CPU profile: ", err))
} }
if err := pprof.StartCPUProfile(f); err != nil { if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err)) panic(fmt.Sprintf("could not start CPU profile: ", err))
} }
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
} }
if *memprofile != "" { if *memprofile != "" {
f, err := os.Create(*memprofile) f, err := os.Create(*memprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err)) panic(fmt.Sprintf("could not create memory profile: ", err))
} }
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }() defer func() { pprof.WriteHeapProfile(f); f.Close() }()
} }
basic_test() basic_test()
} }

View File

@ -13,84 +13,93 @@ const buffSize = 32
func basic_test() { func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface! // TODO need a way to look up who our link-local neighbors are for each iface!
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001") addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
if err != nil { panic(err) } if err != nil {
listener, err := net.ListenTCP("tcp", addr) panic(err)
if err != nil { panic(err) } }
defer listener.Close() listener, err := net.ListenTCP("tcp", addr)
if err != nil {
panic(err)
}
defer listener.Close()
go func () { go func() {
send, err := net.DialTCP("tcp", nil, addr) send, err := net.DialTCP("tcp", nil, addr)
if err != nil { panic(err) } if err != nil {
defer send.Close() panic(err)
msg := make([]byte, 1280) }
bss := make(net.Buffers, 0, 1024) defer send.Close()
count := 0 msg := make([]byte, 1280)
for { bss := make(net.Buffers, 0, 1024)
time.Sleep(100*time.Millisecond) count := 0
for len(bss) < count { for {
bss = append(bss, msg) time.Sleep(100 * time.Millisecond)
} for len(bss) < count {
bss.WriteTo(send) bss = append(bss, msg)
count++ }
//send.Write(msg) bss.WriteTo(send)
} count++
}() //send.Write(msg)
}
}()
numPackets := 1000000 numPackets := 1000000
start := time.Now() start := time.Now()
//msg := make([]byte, 1280) //msg := make([]byte, 1280)
sock, err := listener.AcceptTCP() sock, err := listener.AcceptTCP()
if err != nil { panic(err) } if err != nil {
defer sock.Close() panic(err)
for { }
msg := make([]byte, 1280*buffSize) defer sock.Close()
n, err := sock.Read(msg) for {
if err != nil { panic(err) } msg := make([]byte, 1280*buffSize)
msg = msg[:n] n, err := sock.Read(msg)
fmt.Println("Read:", n) if err != nil {
for len(msg) > 1280 { panic(err)
// handle message }
msg = msg[1280:] msg = msg[:n]
} fmt.Println("Read:", n)
// handle remaining fragment of message for len(msg) > 1280 {
//fmt.Println(n) // handle message
} msg = msg[1280:]
timed := time.Since(start) }
// handle remaining fragment of message
//fmt.Println(n)
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds()) fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
_ = func (in (chan<- int)) { _ = func(in chan<- int) {
close(in) close(in)
} }
} }
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file") var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main () { func main() {
flag.Parse() flag.Parse()
if *cpuprofile != "" { if *cpuprofile != "" {
f, err := os.Create(*cpuprofile) f, err := os.Create(*cpuprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err)) panic(fmt.Sprintf("could not create CPU profile: ", err))
} }
if err := pprof.StartCPUProfile(f); err != nil { if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err)) panic(fmt.Sprintf("could not start CPU profile: ", err))
} }
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
} }
if *memprofile != "" { if *memprofile != "" {
f, err := os.Create(*memprofile) f, err := os.Create(*memprofile)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err)) panic(fmt.Sprintf("could not create memory profile: ", err))
} }
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }() defer func() { pprof.WriteHeapProfile(f); f.Close() }()
} }
basic_test() basic_test()
} }

View File

@ -1,11 +1,11 @@
package main package main
import ( import (
"fmt" "fmt"
"log" "log"
"net" "net"
"os/exec" "os/exec"
"time" "time"
"github.com/songgao/water" "github.com/songgao/water"
) )
@ -17,54 +17,56 @@ func setup_dev() *water.Interface {
DeviceType: water.TUN, DeviceType: water.TUN,
}) })
if err != nil { if err != nil {
panic(err) panic(err)
} }
return ifce return ifce
} }
func setup_dev1() *water.Interface { func setup_dev1() *water.Interface {
ifce := setup_dev() ifce := setup_dev()
cmd := exec.Command("ip", "-f", "inet6", cmd := exec.Command("ip", "-f", "inet6",
"addr", "add", "fc00::2/8", "addr", "add", "fc00::2/8",
"dev", ifce.Name()) "dev", ifce.Name())
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
fmt.Println(string(out)) fmt.Println(string(out))
panic("Failed to assign address") panic("Failed to assign address")
} }
cmd = exec.Command("ip", "link", "set", cmd = exec.Command("ip", "link", "set",
"dev", ifce.Name(), "dev", ifce.Name(),
"mtu", fmt.Sprintf("%d", mtu), "mtu", fmt.Sprintf("%d", mtu),
"up") "up")
out, err = cmd.CombinedOutput() out, err = cmd.CombinedOutput()
if err != nil { if err != nil {
fmt.Println(string(out)) fmt.Println(string(out))
panic("Failed to bring up interface") panic("Failed to bring up interface")
} }
return ifce return ifce
} }
func connect(ifce *water.Interface) { func connect(ifce *water.Interface) {
conn, err := net.DialTimeout("tcp", "192.168.2.2:9001", time.Second) conn, err := net.DialTimeout("tcp", "192.168.2.2:9001", time.Second)
if err != nil { panic(err) } if err != nil {
sock := conn.(*net.TCPConn) panic(err)
// TODO go a worker to move packets to/from the tun }
sock := conn.(*net.TCPConn)
// TODO go a worker to move packets to/from the tun
} }
func bench() { func bench() {
} }
func main() { func main() {
ifce := setup_dev1() ifce := setup_dev1()
connect(ifce) connect(ifce)
bench() bench()
fmt.Println("Done?") fmt.Println("Done?")
return return
ifce, err := water.New(water.Config{ ifce, err := water.New(water.Config{
DeviceType: water.TUN, DeviceType: water.TUN,
}) })
if err != nil { if err != nil {
panic(err) panic(err)
} }
log.Printf("Interface Name: %s\n", ifce.Name()) log.Printf("Interface Name: %s\n", ifce.Name())
@ -73,10 +75,9 @@ func main() {
for { for {
n, err := ifce.Read(packet) n, err := ifce.Read(packet)
if err != nil { if err != nil {
panic(err) panic(err)
log.Fatal(err) log.Fatal(err)
} }
log.Printf("Packet Received: % x\n", packet[:n]) log.Printf("Packet Received: % x\n", packet[:n])
} }
} }

View File

@ -1,10 +1,10 @@
package main package main
import ( import (
"fmt" "fmt"
"log" "log"
"net" "net"
"os/exec" "os/exec"
"github.com/songgao/water" "github.com/songgao/water"
) )
@ -17,84 +17,84 @@ func setup_dev() *water.Interface {
DeviceType: water.TUN, DeviceType: water.TUN,
}) })
if err != nil { if err != nil {
panic(err) panic(err)
} }
return ifce return ifce
} }
func setup_dev1() *water.Interface { func setup_dev1() *water.Interface {
ifce := setup_dev() ifce := setup_dev()
cmd := exec.Command("ip", "-f", "inet6", cmd := exec.Command("ip", "-f", "inet6",
"addr", "add", "fc00::1/8", "addr", "add", "fc00::1/8",
"dev", ifce.Name()) "dev", ifce.Name())
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
fmt.Println(string(out)) fmt.Println(string(out))
fmt.Println(string(err)) fmt.Println(string(err))
panic("Failed to assign address") panic("Failed to assign address")
} }
cmd = exec.Command("ip", "link", "set", cmd = exec.Command("ip", "link", "set",
"dev", tun.name, "dev", tun.name,
"mtu", fmt.Sprintf("%d", mtu), "mtu", fmt.Sprintf("%d", mtu),
"up") "up")
out, err = cmd.CombinedOutput() out, err = cmd.CombinedOutput()
if err != nil { if err != nil {
fmt.Println(string(out)) fmt.Println(string(out))
panic("Failed to bring up interface") panic("Failed to bring up interface")
} }
return ifce return ifce
} }
func addNS(name string) { func addNS(name string) {
cmd := exec.COmmand("ip", "netns", "add", name) cmd := exec.COmmand("ip", "netns", "add", name)
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
fmt.Println(string(out)) fmt.Println(string(out))
panic("Failed to setup netns") panic("Failed to setup netns")
} }
} }
func delNS(name string) { func delNS(name string) {
cmd := exec.COmmand("ip", "netns", "delete", name) cmd := exec.COmmand("ip", "netns", "delete", name)
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
fmt.Println(string(out)) fmt.Println(string(out))
panic("Failed to setup netns") panic("Failed to setup netns")
} }
} }
func doInNetNS(comm ...string) *exec.Cmd { func doInNetNS(comm ...string) *exec.Cmd {
return exec.Command("ip", "netns", "exec", netnsName, comm...) return exec.Command("ip", "netns", "exec", netnsName, comm...)
} }
func setup_dev2() *water.Interface { func setup_dev2() *water.Interface {
ifce := setup_dev() ifce := setup_dev()
addNS(netnsName) addNS(netnsName)
cmd := exec.Command("ip", "link", "set", ifce.Name(), "netns", netnsName) cmd := exec.Command("ip", "link", "set", ifce.Name(), "netns", netnsName)
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
fmt.Println(string(out)) fmt.Println(string(out))
panic("Failed to move tun to netns") panic("Failed to move tun to netns")
} }
cmd = doInNetNS("ip", "-f", "inet6", cmd = doInNetNS("ip", "-f", "inet6",
"addr", "add", "fc00::2/8", "addr", "add", "fc00::2/8",
"dev", ifce.Name()) "dev", ifce.Name())
out, err = cmd.CombinedOutput() out, err = cmd.CombinedOutput()
if err != nil { if err != nil {
fmt.Println(string(out)) fmt.Println(string(out))
panic("Failed to assign address") panic("Failed to assign address")
} }
cmd = doInNetNS("ip", "link", "set", cmd = doInNetNS("ip", "link", "set",
"dev", tun.name, "dev", tun.name,
"mtu", fmt.Sprintf("%d", mtu), "mtu", fmt.Sprintf("%d", mtu),
"up") "up")
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
fmt.Println(string(out)) fmt.Println(string(out))
fmt.Println(string(err)) fmt.Println(string(err))
panic("Failed to bring up interface") panic("Failed to bring up interface")
} }
return ifce return ifce
} }
func connect() { func connect() {
@ -109,7 +109,7 @@ func main() {
DeviceType: water.TUN, DeviceType: water.TUN,
}) })
if err != nil { if err != nil {
panic(err) panic(err)
} }
log.Printf("Interface Name: %s\n", ifce.Name()) log.Printf("Interface Name: %s\n", ifce.Name())
@ -118,10 +118,9 @@ func main() {
for { for {
n, err := ifce.Read(packet) n, err := ifce.Read(packet)
if err != nil { if err != nil {
panic(err) panic(err)
log.Fatal(err) log.Fatal(err)
} }
log.Printf("Packet Received: % x\n", packet[:n]) log.Printf("Packet Received: % x\n", packet[:n])
} }
} }

View File

@ -1,10 +1,10 @@
package main package main
import ( import (
"fmt" "fmt"
"log" "log"
"net" "net"
"os/exec" "os/exec"
"github.com/songgao/water" "github.com/songgao/water"
) )
@ -17,87 +17,86 @@ func setup_dev() *water.Interface {
DeviceType: water.TUN, DeviceType: water.TUN,
}) })
if err != nil { if err != nil {
panic(err) panic(err)
} }
return ifce return ifce
} }
func setup_dev1() *water.Interface { func setup_dev1() *water.Interface {
ifce := setup_dev() ifce := setup_dev()
cmd := exec.Command("ip", "-f", "inet6", cmd := exec.Command("ip", "-f", "inet6",
"addr", "add", "fc00::1/8", "addr", "add", "fc00::1/8",
"dev", ifce.Name()) "dev", ifce.Name())
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
fmt.Println(string(out)) fmt.Println(string(out))
fmt.Println(string(err)) fmt.Println(string(err))
panic("Failed to assign address") panic("Failed to assign address")
} }
cmd = exec.Command("ip", "link", "set", cmd = exec.Command("ip", "link", "set",
"dev", tun.name, "dev", tun.name,
"mtu", fmt.Sprintf("%d", mtu), "mtu", fmt.Sprintf("%d", mtu),
"up") "up")
out, err = cmd.CombinedOutput() out, err = cmd.CombinedOutput()
if err != nil { if err != nil {
fmt.Println(string(out)) fmt.Println(string(out))
panic("Failed to bring up interface") panic("Failed to bring up interface")
} }
return ifce return ifce
} }
func addNS(name string) { func addNS(name string) {
cmd := exec.COmmand("ip", "netns", "add", name) cmd := exec.COmmand("ip", "netns", "add", name)
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
fmt.Println(string(out)) fmt.Println(string(out))
panic("Failed to setup netns") panic("Failed to setup netns")
} }
} }
func delNS(name string) { func delNS(name string) {
cmd := exec.COmmand("ip", "netns", "delete", name) cmd := exec.COmmand("ip", "netns", "delete", name)
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
fmt.Println(string(out)) fmt.Println(string(out))
panic("Failed to setup netns") panic("Failed to setup netns")
} }
} }
func doInNetNS(comm ...string) *exec.Cmd { func doInNetNS(comm ...string) *exec.Cmd {
return exec.Command("ip", "netns", "exec", netnsName, comm...) return exec.Command("ip", "netns", "exec", netnsName, comm...)
} }
func setup_dev2() *water.Interface { func setup_dev2() *water.Interface {
ifce := setup_dev() ifce := setup_dev()
addNS(netnsName) addNS(netnsName)
cmd := exec.Command("ip", "link", "set", ifce.Name(), "netns", netnsName) cmd := exec.Command("ip", "link", "set", ifce.Name(), "netns", netnsName)
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
fmt.Println(string(out)) fmt.Println(string(out))
panic("Failed to move tun to netns") panic("Failed to move tun to netns")
} }
cmd = cmd = exec.Command(
cmd = exec.Command( "ip", "-f", "inet6",
"ip", "-f", "inet6", "addr", "add", "fc00::2/8",
"addr", "add", "fc00::2/8", "dev", ifce.Name())
"dev", ifce.Name()) out, err := cmd.CombinedOutput()
out, err := cmd.CombinedOutput() if err != nil {
if err != nil { fmt.Println(string(out))
fmt.Println(string(out)) panic("Failed to assign address")
panic("Failed to assign address") }
} cmd = exec.Command(
cmd = exec.Command( "ip", "link", "set",
"ip", "link", "set", "dev", tun.name,
"dev", tun.name, "mtu", fmt.Sprintf("%d", mtu),
"mtu", fmt.Sprintf("%d", mtu), "up")
"up") out, err := cmd.CombinedOutput()
out, err := cmd.CombinedOutput() if err != nil {
if err != nil { fmt.Println(string(out))
fmt.Println(string(out)) fmt.Println(string(err))
fmt.Println(string(err)) panic("Failed to bring up interface")
panic("Failed to bring up interface") }
} return ifce
return ifce
} }
func connect() { func connect() {
@ -112,7 +111,7 @@ func main() {
DeviceType: water.TUN, DeviceType: water.TUN,
}) })
if err != nil { if err != nil {
panic(err) panic(err)
} }
log.Printf("Interface Name: %s\n", ifce.Name()) log.Printf("Interface Name: %s\n", ifce.Name())
@ -121,10 +120,9 @@ func main() {
for { for {
n, err := ifce.Read(packet) n, err := ifce.Read(packet)
if err != nil { if err != nil {
panic(err) panic(err)
log.Fatal(err) log.Fatal(err)
} }
log.Printf("Packet Received: % x\n", packet[:n]) log.Printf("Packet Received: % x\n", packet[:n])
} }
} }

View File

@ -27,14 +27,18 @@ func main() {
} }
}() }()
address := net.ParseIP("fc00::1") address := net.ParseIP("fc00::1")
tuntap, err := tun.OpenTun(address) tuntap, err := tun.OpenTun(address)
if err != nil { panic(err) } if err != nil {
panic(err)
}
defer tuntap.Close() defer tuntap.Close()
// read data from tun into rCh channel. // read data from tun into rCh channel.
wg.Add(1) wg.Add(1)
go func() { go func() {
if err := tuntap.Read(rCh); err != nil { panic(err) } if err := tuntap.Read(rCh); err != nil {
panic(err)
}
wg.Done() wg.Done()
}() }()
wg.Wait() wg.Wait()

View File

@ -6,34 +6,35 @@ import "fmt"
import "time" import "time"
func main() { func main() {
for idx := 0 ; idx < 64 ; idx++ { for idx := 0; idx < 64; idx++ {
num := uint64(1) << uint(idx) num := uint64(1) << uint(idx)
encoded := make([]byte, 10) encoded := make([]byte, 10)
length := wire.Encode_uint64(num, encoded) length := wire.Encode_uint64(num, encoded)
decoded, _ := wire.Decode_uint64(encoded[:length]) decoded, _ := wire.Decode_uint64(encoded[:length])
if decoded != num { panic(fmt.Sprintf("%d != %d", decoded, num)) } if decoded != num {
} panic(fmt.Sprintf("%d != %d", decoded, num))
const count = 1000000 }
start := time.Now() }
encoded := make([]byte, 10) const count = 1000000
//num := ^uint64(0) // Longest possible value for full uint64 range start := time.Now()
num := ^uint64(0) >> 1 // Largest positive int64 (real use case) encoded := make([]byte, 10)
//num := uint64(0) // Shortest possible value, most will be of this length //num := ^uint64(0) // Longest possible value for full uint64 range
length := wire.Encode_uint64(num, encoded) num := ^uint64(0) >> 1 // Largest positive int64 (real use case)
for idx := 0 ; idx < count ; idx++ { //num := uint64(0) // Shortest possible value, most will be of this length
wire.Encode_uint64(num, encoded) length := wire.Encode_uint64(num, encoded)
} for idx := 0; idx < count; idx++ {
timed := time.Since(start) wire.Encode_uint64(num, encoded)
fmt.Println("Ops:", count/timed.Seconds()) }
fmt.Println("Time:", timed.Nanoseconds()/count) timed := time.Since(start)
fmt.Println("Ops:", count/timed.Seconds())
fmt.Println("Time:", timed.Nanoseconds()/count)
encoded = encoded[:length] encoded = encoded[:length]
start = time.Now() start = time.Now()
for idx := 0 ; idx < count ; idx++ { for idx := 0; idx < count; idx++ {
wire.Decode_uint64(encoded) wire.Decode_uint64(encoded)
} }
timed = time.Since(start) timed = time.Since(start)
fmt.Println("Ops:", count/timed.Seconds()) fmt.Println("Ops:", count/timed.Seconds())
fmt.Println("Time:", timed.Nanoseconds()/count) fmt.Println("Time:", timed.Nanoseconds()/count)
} }

View File

@ -1,108 +1,120 @@
package yggdrasil package yggdrasil
type address [16]byte // IPv6 address within the network type address [16]byte // IPv6 address within the network
type subnet [8]byte // It's a /64 type subnet [8]byte // It's a /64
var address_prefix = [...]byte{0xfd} // For node addresses + local subnets var address_prefix = [...]byte{0xfd} // For node addresses + local subnets
func (a *address) isValid() bool { func (a *address) isValid() bool {
for idx := range address_prefix { for idx := range address_prefix {
if (*a)[idx] != address_prefix[idx] { return false } if (*a)[idx] != address_prefix[idx] {
} return false
return (*a)[len(address_prefix)] & 0x80 == 0 }
}
return (*a)[len(address_prefix)]&0x80 == 0
} }
func (s *subnet) isValid() bool { func (s *subnet) isValid() bool {
for idx := range address_prefix { for idx := range address_prefix {
if (*s)[idx] != address_prefix[idx] { return false } if (*s)[idx] != address_prefix[idx] {
} return false
return (*s)[len(address_prefix)] & 0x80 != 0 }
}
return (*s)[len(address_prefix)]&0x80 != 0
} }
func address_addrForNodeID(nid *NodeID) *address { func address_addrForNodeID(nid *NodeID) *address {
// 128 bit address // 128 bit address
// Begins with prefix // Begins with prefix
// Next bit is a 0 // Next bit is a 0
// Next 7 bits, interpreted as a uint, are # of leading 1s in the NodeID // Next 7 bits, interpreted as a uint, are # of leading 1s in the NodeID
// Leading 1s and first leading 0 of the NodeID are truncated off // Leading 1s and first leading 0 of the NodeID are truncated off
// The rest is appended to the IPv6 address (truncated to 128 bits total) // The rest is appended to the IPv6 address (truncated to 128 bits total)
var addr address var addr address
var temp []byte var temp []byte
done := false done := false
ones := byte(0) ones := byte(0)
bits := byte(0) bits := byte(0)
nBits := 0 nBits := 0
for idx := 0 ; idx < 8*len(nid) ; idx++ { for idx := 0; idx < 8*len(nid); idx++ {
bit := (nid[idx/8] & (0x80 >> byte(idx % 8))) >> byte(7 - (idx % 8)) bit := (nid[idx/8] & (0x80 >> byte(idx%8))) >> byte(7-(idx%8))
if !done && bit != 0 { if !done && bit != 0 {
ones++ ones++
continue continue
} }
if !done && bit == 0 { if !done && bit == 0 {
done = true done = true
continue // FIXME this assumes that ones <= 127 continue // FIXME this assumes that ones <= 127
} }
bits = (bits << 1) | bit bits = (bits << 1) | bit
nBits++ nBits++
if nBits == 8 { if nBits == 8 {
nBits = 0 nBits = 0
temp = append(temp, bits) temp = append(temp, bits)
} }
} }
copy(addr[:], address_prefix[:]) copy(addr[:], address_prefix[:])
addr[len(address_prefix)] = ones & 0x7f addr[len(address_prefix)] = ones & 0x7f
copy(addr[len(address_prefix)+1:], temp) copy(addr[len(address_prefix)+1:], temp)
return &addr return &addr
} }
func address_subnetForNodeID(nid *NodeID) *subnet { func address_subnetForNodeID(nid *NodeID) *subnet {
// Exactly as the address version, with two exceptions: // Exactly as the address version, with two exceptions:
// 1) The first bit after the fixed prefix is a 1 instead of a 0 // 1) The first bit after the fixed prefix is a 1 instead of a 0
// 2) It's truncated to a subnet prefix length instead of 128 bits // 2) It's truncated to a subnet prefix length instead of 128 bits
addr := *address_addrForNodeID(nid) addr := *address_addrForNodeID(nid)
var snet subnet var snet subnet
copy(snet[:], addr[:]) copy(snet[:], addr[:])
snet[len(address_prefix)] |= 0x80 snet[len(address_prefix)] |= 0x80
return &snet return &snet
} }
func (a *address) getNodeIDandMask() (*NodeID, *NodeID) { func (a *address) getNodeIDandMask() (*NodeID, *NodeID) {
// Mask is a bitmask to mark the bits visible from the address // Mask is a bitmask to mark the bits visible from the address
// This means truncated leading 1s, first leading 0, and visible part of addr // This means truncated leading 1s, first leading 0, and visible part of addr
var nid NodeID var nid NodeID
var mask NodeID var mask NodeID
ones := int(a[len(address_prefix)] & 0x7f) ones := int(a[len(address_prefix)] & 0x7f)
for idx := 0 ; idx < ones ; idx++ { nid[idx/8] |= 0x80 >> byte(idx % 8) } for idx := 0; idx < ones; idx++ {
nidOffset := ones+1 nid[idx/8] |= 0x80 >> byte(idx%8)
addrOffset := 8*len(address_prefix)+8 }
for idx := addrOffset ; idx < 8*len(a) ; idx++ { nidOffset := ones + 1
bits := a[idx/8] & (0x80 >> byte(idx % 8)) addrOffset := 8*len(address_prefix) + 8
bits <<= byte(idx % 8) for idx := addrOffset; idx < 8*len(a); idx++ {
nidIdx := nidOffset + (idx - addrOffset) bits := a[idx/8] & (0x80 >> byte(idx%8))
bits >>= byte(nidIdx % 8) bits <<= byte(idx % 8)
nid[nidIdx/8] |= bits nidIdx := nidOffset + (idx - addrOffset)
} bits >>= byte(nidIdx % 8)
maxMask := 8*(len(a) - len(address_prefix) - 1) + ones + 1 nid[nidIdx/8] |= bits
for idx := 0 ; idx < maxMask ; idx++ { mask[idx/8] |= 0x80 >> byte(idx % 8) } }
return &nid, &mask maxMask := 8*(len(a)-len(address_prefix)-1) + ones + 1
for idx := 0; idx < maxMask; idx++ {
mask[idx/8] |= 0x80 >> byte(idx%8)
}
return &nid, &mask
} }
func (s *subnet) getNodeIDandMask() (*NodeID, *NodeID) { func (s *subnet) getNodeIDandMask() (*NodeID, *NodeID) {
// As witht he address version, but visible parts of the subnet prefix instead // As witht he address version, but visible parts of the subnet prefix instead
var nid NodeID var nid NodeID
var mask NodeID var mask NodeID
ones := int(s[len(address_prefix)] & 0x7f) ones := int(s[len(address_prefix)] & 0x7f)
for idx := 0 ; idx < ones ; idx++ { nid[idx/8] |= 0x80 >> byte(idx % 8) } for idx := 0; idx < ones; idx++ {
nidOffset := ones+1 nid[idx/8] |= 0x80 >> byte(idx%8)
addrOffset := 8*len(address_prefix)+8 }
for idx := addrOffset ; idx < 8*len(s) ; idx++ { nidOffset := ones + 1
bits := s[idx/8] & (0x80 >> byte(idx % 8)) addrOffset := 8*len(address_prefix) + 8
bits <<= byte(idx % 8) for idx := addrOffset; idx < 8*len(s); idx++ {
nidIdx := nidOffset + (idx - addrOffset) bits := s[idx/8] & (0x80 >> byte(idx%8))
bits >>= byte(nidIdx % 8) bits <<= byte(idx % 8)
nid[nidIdx/8] |= bits nidIdx := nidOffset + (idx - addrOffset)
} bits >>= byte(nidIdx % 8)
maxMask := 8*(len(s) - len(address_prefix) - 1) + ones + 1 nid[nidIdx/8] |= bits
for idx := 0 ; idx < maxMask ; idx++ { mask[idx/8] |= 0x80 >> byte(idx % 8) } }
return &nid, &mask maxMask := 8*(len(s)-len(address_prefix)-1) + ones + 1
for idx := 0; idx < maxMask; idx++ {
mask[idx/8] |= 0x80 >> byte(idx%8)
}
return &nid, &mask
} }

View File

@ -4,61 +4,60 @@ import "io/ioutil"
import "log" import "log"
type Core struct { type Core struct {
// This is the main data structure that holds everything else for a node // This is the main data structure that holds everything else for a node
// TODO? move keys out of core and into something more appropriate // TODO? move keys out of core and into something more appropriate
// e.g. box keys live in sessions // e.g. box keys live in sessions
// sig keys live in peers or sigs (or wherever signing/validating logic is) // sig keys live in peers or sigs (or wherever signing/validating logic is)
boxPub boxPubKey boxPub boxPubKey
boxPriv boxPrivKey boxPriv boxPrivKey
sigPub sigPubKey sigPub sigPubKey
sigPriv sigPrivKey sigPriv sigPrivKey
switchTable switchTable switchTable switchTable
peers peers peers peers
sigs sigManager sigs sigManager
sessions sessions sessions sessions
router router router router
dht dht dht dht
tun tunDevice tun tunDevice
searches searches searches searches
tcp *tcpInterface tcp *tcpInterface
udp *udpInterface udp *udpInterface
log *log.Logger log *log.Logger
} }
func (c *Core) Init() { func (c *Core) Init() {
// Only called by the simulator, to set up nodes with random keys // Only called by the simulator, to set up nodes with random keys
bpub, bpriv := newBoxKeys() bpub, bpriv := newBoxKeys()
spub, spriv := newSigKeys() spub, spriv := newSigKeys()
c.init(bpub, bpriv, spub, spriv) c.init(bpub, bpriv, spub, spriv)
} }
func (c *Core) init(bpub *boxPubKey, func (c *Core) init(bpub *boxPubKey,
bpriv *boxPrivKey, bpriv *boxPrivKey,
spub *sigPubKey, spub *sigPubKey,
spriv *sigPrivKey) { spriv *sigPrivKey) {
// TODO separate init and start functions // TODO separate init and start functions
// Init sets up structs // Init sets up structs
// Start launches goroutines that depend on structs being set up // Start launches goroutines that depend on structs being set up
// This is pretty much required to avoid race conditions // This is pretty much required to avoid race conditions
util_initByteStore() util_initByteStore()
c.log = log.New(ioutil.Discard, "", 0) c.log = log.New(ioutil.Discard, "", 0)
c.boxPub, c.boxPriv = *bpub, *bpriv c.boxPub, c.boxPriv = *bpub, *bpriv
c.sigPub, c.sigPriv = *spub, *spriv c.sigPub, c.sigPriv = *spub, *spriv
c.sigs.init() c.sigs.init()
c.searches.init(c) c.searches.init(c)
c.dht.init(c) c.dht.init(c)
c.sessions.init(c) c.sessions.init(c)
c.peers.init(c) c.peers.init(c)
c.router.init(c) c.router.init(c)
c.switchTable.init(c, c.sigPub) // TODO move before peers? before router? c.switchTable.init(c, c.sigPub) // TODO move before peers? before router?
c.tun.init(c) c.tun.init(c)
} }
func (c *Core) GetNodeID() *NodeID { func (c *Core) GetNodeID() *NodeID {
return getNodeID(&c.boxPub) return getNodeID(&c.boxPub)
} }
func (c *Core) GetTreeID() *TreeID { func (c *Core) GetTreeID() *TreeID {
return getTreeID(&c.sigPub) return getTreeID(&c.sigPub)
} }

View File

@ -28,20 +28,22 @@ type TreeID [TreeIDLen]byte
type handle [handleLen]byte type handle [handleLen]byte
func getNodeID(pub *boxPubKey) *NodeID { func getNodeID(pub *boxPubKey) *NodeID {
h := sha512.Sum512(pub[:]) h := sha512.Sum512(pub[:])
return (*NodeID)(&h) return (*NodeID)(&h)
} }
func getTreeID(pub *sigPubKey) *TreeID { func getTreeID(pub *sigPubKey) *TreeID {
h := sha512.Sum512(pub[:]) h := sha512.Sum512(pub[:])
return (*TreeID)(&h) return (*TreeID)(&h)
} }
func newHandle() *handle { func newHandle() *handle {
var h handle var h handle
_, err := rand.Read(h[:]) _, err := rand.Read(h[:])
if err != nil { panic(err) } if err != nil {
return &h panic(err)
}
return &h
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -57,26 +59,28 @@ type sigPrivKey [sigPrivKeyLen]byte
type sigBytes [sigLen]byte type sigBytes [sigLen]byte
func newSigKeys() (*sigPubKey, *sigPrivKey) { func newSigKeys() (*sigPubKey, *sigPrivKey) {
var pub sigPubKey var pub sigPubKey
var priv sigPrivKey var priv sigPrivKey
pubSlice, privSlice, err := ed25519.GenerateKey(rand.Reader) pubSlice, privSlice, err := ed25519.GenerateKey(rand.Reader)
if err != nil { panic(err) } if err != nil {
copy(pub[:], pubSlice) panic(err)
copy(priv[:], privSlice) }
return &pub, &priv copy(pub[:], pubSlice)
copy(priv[:], privSlice)
return &pub, &priv
} }
func sign(priv *sigPrivKey, msg []byte) *sigBytes { func sign(priv *sigPrivKey, msg []byte) *sigBytes {
var sig sigBytes var sig sigBytes
sigSlice := ed25519.Sign(priv[:], msg) sigSlice := ed25519.Sign(priv[:], msg)
copy(sig[:], sigSlice) copy(sig[:], sigSlice)
return &sig return &sig
} }
func verify(pub *sigPubKey, msg []byte, sig *sigBytes) bool { func verify(pub *sigPubKey, msg []byte, sig *sigBytes) bool {
// Should sig be an array instead of a slice?... // Should sig be an array instead of a slice?...
// It's fixed size, but // It's fixed size, but
return ed25519.Verify(pub[:], msg, sig[:]) return ed25519.Verify(pub[:], msg, sig[:])
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -94,61 +98,68 @@ type boxSharedKey [boxSharedKeyLen]byte
type boxNonce [boxNonceLen]byte type boxNonce [boxNonceLen]byte
func newBoxKeys() (*boxPubKey, *boxPrivKey) { func newBoxKeys() (*boxPubKey, *boxPrivKey) {
pubBytes, privBytes, err := box.GenerateKey(rand.Reader) pubBytes, privBytes, err := box.GenerateKey(rand.Reader)
if err != nil { panic(err) } if err != nil {
pub := (*boxPubKey)(pubBytes) panic(err)
priv := (*boxPrivKey)(privBytes) }
return pub, priv pub := (*boxPubKey)(pubBytes)
priv := (*boxPrivKey)(privBytes)
return pub, priv
} }
func getSharedKey(myPrivKey *boxPrivKey, func getSharedKey(myPrivKey *boxPrivKey,
othersPubKey *boxPubKey) *boxSharedKey { othersPubKey *boxPubKey) *boxSharedKey {
var shared [boxSharedKeyLen]byte var shared [boxSharedKeyLen]byte
priv := (*[boxPrivKeyLen]byte)(myPrivKey) priv := (*[boxPrivKeyLen]byte)(myPrivKey)
pub := (*[boxPubKeyLen]byte)(othersPubKey) pub := (*[boxPubKeyLen]byte)(othersPubKey)
box.Precompute(&shared, pub, priv) box.Precompute(&shared, pub, priv)
return (*boxSharedKey)(&shared) return (*boxSharedKey)(&shared)
} }
func boxOpen(shared *boxSharedKey, func boxOpen(shared *boxSharedKey,
boxed []byte, boxed []byte,
nonce *boxNonce) ([]byte, bool) { nonce *boxNonce) ([]byte, bool) {
out := util_getBytes() out := util_getBytes()
//return append(out, boxed...), true // XXX HACK to test without encryption //return append(out, boxed...), true // XXX HACK to test without encryption
s := (*[boxSharedKeyLen]byte)(shared) s := (*[boxSharedKeyLen]byte)(shared)
n := (*[boxNonceLen]byte)(nonce) n := (*[boxNonceLen]byte)(nonce)
unboxed, success := box.OpenAfterPrecomputation(out, boxed, n, s) unboxed, success := box.OpenAfterPrecomputation(out, boxed, n, s)
return unboxed, success return unboxed, success
} }
func boxSeal(shared *boxSharedKey, unboxed []byte, nonce *boxNonce) ([]byte, *boxNonce) { func boxSeal(shared *boxSharedKey, unboxed []byte, nonce *boxNonce) ([]byte, *boxNonce) {
if nonce == nil { nonce = newBoxNonce() } if nonce == nil {
nonce.update() nonce = newBoxNonce()
out := util_getBytes() }
//return append(out, unboxed...), nonce // XXX HACK to test without encryption nonce.update()
s := (*[boxSharedKeyLen]byte)(shared) out := util_getBytes()
n := (*[boxNonceLen]byte)(nonce) //return append(out, unboxed...), nonce // XXX HACK to test without encryption
boxed := box.SealAfterPrecomputation(out, unboxed, n, s) s := (*[boxSharedKeyLen]byte)(shared)
return boxed, nonce n := (*[boxNonceLen]byte)(nonce)
boxed := box.SealAfterPrecomputation(out, unboxed, n, s)
return boxed, nonce
} }
func newBoxNonce() *boxNonce { func newBoxNonce() *boxNonce {
var nonce boxNonce var nonce boxNonce
_, err := rand.Read(nonce[:]) _, err := rand.Read(nonce[:])
for ; err == nil && nonce[0] == 0xff ; _, err = rand.Read(nonce[:]){ for ; err == nil && nonce[0] == 0xff; _, err = rand.Read(nonce[:]) {
// Make sure nonce isn't too high // Make sure nonce isn't too high
// This is just to make rollover unlikely to happen // This is just to make rollover unlikely to happen
// Rollover is fine, but it may kill the session and force it to reopen // Rollover is fine, but it may kill the session and force it to reopen
} }
if err != nil { panic(err) } if err != nil {
return &nonce panic(err)
}
return &nonce
} }
func (n *boxNonce) update() { func (n *boxNonce) update() {
oldNonce := *n oldNonce := *n
n[len(n)-1] += 2 n[len(n)-1] += 2
for i := len(n)-2 ; i >= 0 ; i-- { for i := len(n) - 2; i >= 0; i-- {
if n[i+1] < oldNonce[i+1] { n[i] += 1 } if n[i+1] < oldNonce[i+1] {
} n[i] += 1
}
}
} }

View File

@ -15,32 +15,32 @@ import "log"
// Core // Core
func (c *Core) DEBUG_getSigPub() sigPubKey { func (c *Core) DEBUG_getSigPub() sigPubKey {
return (sigPubKey)(c.sigPub) return (sigPubKey)(c.sigPub)
} }
func (c *Core) DEBUG_getBoxPub() boxPubKey { func (c *Core) DEBUG_getBoxPub() boxPubKey {
return (boxPubKey)(c.boxPub) return (boxPubKey)(c.boxPub)
} }
func (c *Core) DEBUG_getSend() (chan<- []byte) { func (c *Core) DEBUG_getSend() chan<- []byte {
return c.tun.send return c.tun.send
} }
func (c *Core) DEBUG_getRecv() (<-chan []byte) { func (c *Core) DEBUG_getRecv() <-chan []byte {
return c.tun.recv return c.tun.recv
} }
// Peer // Peer
func (c *Core) DEBUG_getPeers() *peers { func (c *Core) DEBUG_getPeers() *peers {
return &c.peers return &c.peers
} }
func (ps *peers) DEBUG_newPeer(box boxPubKey, func (ps *peers) DEBUG_newPeer(box boxPubKey,
sig sigPubKey) *peer { sig sigPubKey) *peer {
//in <-chan []byte, //in <-chan []byte,
//out chan<- []byte) *peer { //out chan<- []byte) *peer {
return ps.newPeer(&box, &sig)//, in, out) return ps.newPeer(&box, &sig) //, in, out)
} }
/* /*
@ -55,47 +55,51 @@ func (ps *peers) DEBUG_startPeers() {
*/ */
func (ps *peers) DEBUG_hasPeer(key sigPubKey) bool { func (ps *peers) DEBUG_hasPeer(key sigPubKey) bool {
ports := ps.ports.Load().(map[switchPort]*peer) ports := ps.ports.Load().(map[switchPort]*peer)
for _, p := range ports { for _, p := range ports {
if p == nil { continue } if p == nil {
if p.sig == key { return true } continue
} }
return false if p.sig == key {
return true
}
}
return false
} }
func (ps *peers) DEBUG_getPorts() map[switchPort]*peer { func (ps *peers) DEBUG_getPorts() map[switchPort]*peer {
ports := ps.ports.Load().(map[switchPort]*peer) ports := ps.ports.Load().(map[switchPort]*peer)
newPeers := make(map[switchPort]*peer) newPeers := make(map[switchPort]*peer)
for port, p := range ports{ for port, p := range ports {
newPeers[port] = p newPeers[port] = p
} }
return newPeers return newPeers
} }
func (p *peer) DEBUG_getSigKey() sigPubKey { func (p *peer) DEBUG_getSigKey() sigPubKey {
return p.sig return p.sig
} }
func (p *peer) DEEBUG_getPort() switchPort { func (p *peer) DEEBUG_getPort() switchPort {
return p.port return p.port
} }
// Router // Router
func (c *Core) DEBUG_getSwitchTable() *switchTable { func (c *Core) DEBUG_getSwitchTable() *switchTable {
return &c.switchTable return &c.switchTable
} }
func (c *Core) DEBUG_getLocator() switchLocator { func (c *Core) DEBUG_getLocator() switchLocator {
return c.switchTable.getLocator() return c.switchTable.getLocator()
} }
func (l *switchLocator) DEBUG_getCoords() []byte { func (l *switchLocator) DEBUG_getCoords() []byte {
return l.getCoords() return l.getCoords()
} }
func (c *Core) DEBUG_switchLookup(dest []byte, ttl uint64) (switchPort, uint64) { func (c *Core) DEBUG_switchLookup(dest []byte, ttl uint64) (switchPort, uint64) {
return c.switchTable.lookup(dest, ttl) return c.switchTable.lookup(dest, ttl)
} }
/* /*
@ -109,45 +113,49 @@ func (t *switchTable) DEBUG_isDirty() bool {
*/ */
func (t *switchTable) DEBUG_dumpTable() { func (t *switchTable) DEBUG_dumpTable() {
//data := t.data.Load().(*tabledata) //data := t.data.Load().(*tabledata)
t.mutex.RLock() t.mutex.RLock()
defer t.mutex.RUnlock() defer t.mutex.RUnlock()
data := t.data data := t.data
for _, peer := range data.peers { for _, peer := range data.peers {
//fmt.Println("DUMPTABLE:", t.treeID, peer.treeID, peer.port, //fmt.Println("DUMPTABLE:", t.treeID, peer.treeID, peer.port,
// peer.locator.Root, peer.coords, // peer.locator.Root, peer.coords,
// peer.reverse.Root, peer.reverse.Coords, peer.forward) // peer.reverse.Root, peer.reverse.Coords, peer.forward)
fmt.Println("DUMPTABLE:", t.key, peer.key, peer.locator.coords, peer.port/*, peer.forward*/) fmt.Println("DUMPTABLE:", t.key, peer.key, peer.locator.coords, peer.port /*, peer.forward*/)
} }
} }
func (t *switchTable) DEBUG_getReversePort(port switchPort) switchPort { func (t *switchTable) DEBUG_getReversePort(port switchPort) switchPort {
// Returns Port(0) if it cannot get the reverse peer for any reason // Returns Port(0) if it cannot get the reverse peer for any reason
//data := t.data.Load().(*tabledata) //data := t.data.Load().(*tabledata)
t.mutex.RLock() t.mutex.RLock()
defer t.mutex.RUnlock() defer t.mutex.RUnlock()
data := t.data data := t.data
if port >= switchPort(len(data.peers)) { return switchPort(0) } if port >= switchPort(len(data.peers)) {
pinfo := data.peers[port] return switchPort(0)
if len(pinfo.locator.coords) < 1 { return switchPort(0) } }
return pinfo.locator.coords[len(pinfo.locator.coords)-1] pinfo := data.peers[port]
if len(pinfo.locator.coords) < 1 {
return switchPort(0)
}
return pinfo.locator.coords[len(pinfo.locator.coords)-1]
} }
// Wire // Wire
func DEBUG_wire_encode_coords(coords []byte) []byte { func DEBUG_wire_encode_coords(coords []byte) []byte {
return wire_encode_coords(coords) return wire_encode_coords(coords)
} }
// DHT, via core // DHT, via core
func (c *Core) DEBUG_getDHTSize() int { func (c *Core) DEBUG_getDHTSize() int {
total := 0 total := 0
for bidx := 0 ; bidx < c.dht.nBuckets() ; bidx++ { for bidx := 0; bidx < c.dht.nBuckets(); bidx++ {
b := c.dht.getBucket(bidx) b := c.dht.getBucket(bidx)
total += len(b.infos) total += len(b.infos)
} }
return total return total
} }
// udpInterface // udpInterface
@ -193,99 +201,104 @@ func (c *Core) DEBUG_startLoopbackUDPInterface() {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
func (c *Core) DEBUG_getAddr() *address { func (c *Core) DEBUG_getAddr() *address {
return address_addrForNodeID(&c.dht.nodeID) return address_addrForNodeID(&c.dht.nodeID)
} }
func (c *Core) DEBUG_startTun(ifname string) { func (c *Core) DEBUG_startTun(ifname string) {
c.DEBUG_startTunWithMTU(ifname, 1280) c.DEBUG_startTunWithMTU(ifname, 1280)
} }
func (c *Core) DEBUG_startTunWithMTU(ifname string, mtu int) { func (c *Core) DEBUG_startTunWithMTU(ifname string, mtu int) {
addr := c.DEBUG_getAddr() addr := c.DEBUG_getAddr()
straddr := fmt.Sprintf("%s/%v", net.IP(addr[:]).String(), 8*len(address_prefix)) straddr := fmt.Sprintf("%s/%v", net.IP(addr[:]).String(), 8*len(address_prefix))
err := c.tun.setup(ifname, straddr, mtu) err := c.tun.setup(ifname, straddr, mtu)
if err != nil { panic(err) } if err != nil {
go c.tun.read() panic(err)
go c.tun.write() }
go c.tun.read()
go c.tun.write()
} }
func (c *Core) DEBUG_stopTun() { func (c *Core) DEBUG_stopTun() {
c.tun.close() c.tun.close()
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
func (c *Core) DEBUG_newBoxKeys() (*boxPubKey, *boxPrivKey) { func (c *Core) DEBUG_newBoxKeys() (*boxPubKey, *boxPrivKey) {
return newBoxKeys() return newBoxKeys()
} }
func (c *Core) DEBUG_newSigKeys() (*sigPubKey, *sigPrivKey) { func (c *Core) DEBUG_newSigKeys() (*sigPubKey, *sigPrivKey) {
return newSigKeys() return newSigKeys()
} }
func (c *Core) DEBUG_getNodeID(pub *boxPubKey) *NodeID { func (c *Core) DEBUG_getNodeID(pub *boxPubKey) *NodeID {
return getNodeID(pub) return getNodeID(pub)
} }
func (c *Core) DEBUG_getTreeID(pub *sigPubKey) *TreeID { func (c *Core) DEBUG_getTreeID(pub *sigPubKey) *TreeID {
return getTreeID(pub) return getTreeID(pub)
} }
func (c *Core) DEBUG_addrForNodeID(nodeID *NodeID) string { func (c *Core) DEBUG_addrForNodeID(nodeID *NodeID) string {
return net.IP(address_addrForNodeID(nodeID)[:]).String() return net.IP(address_addrForNodeID(nodeID)[:]).String()
} }
func (c *Core) DEBUG_init(bpub []byte, func (c *Core) DEBUG_init(bpub []byte,
bpriv []byte, bpriv []byte,
spub []byte, spub []byte,
spriv []byte) { spriv []byte) {
var boxPub boxPubKey var boxPub boxPubKey
var boxPriv boxPrivKey var boxPriv boxPrivKey
var sigPub sigPubKey var sigPub sigPubKey
var sigPriv sigPrivKey var sigPriv sigPrivKey
copy(boxPub[:], bpub) copy(boxPub[:], bpub)
copy(boxPriv[:], bpriv) copy(boxPriv[:], bpriv)
copy(sigPub[:], spub) copy(sigPub[:], spub)
copy(sigPriv[:], spriv) copy(sigPriv[:], spriv)
c.init(&boxPub, &boxPriv, &sigPub, &sigPriv) c.init(&boxPub, &boxPriv, &sigPub, &sigPriv)
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
func (c *Core) DEBUG_setupAndStartGlobalUDPInterface(addrport string) { func (c *Core) DEBUG_setupAndStartGlobalUDPInterface(addrport string) {
iface := udpInterface{} iface := udpInterface{}
iface.init(c, addrport) iface.init(c, addrport)
c.udp = &iface c.udp = &iface
} }
func (c *Core) DEBUG_getGlobalUDPAddr() net.Addr { func (c *Core) DEBUG_getGlobalUDPAddr() net.Addr {
return c.udp.sock.LocalAddr() return c.udp.sock.LocalAddr()
} }
func (c *Core) DEBUG_maybeSendUDPKeys(saddr string) { func (c *Core) DEBUG_maybeSendUDPKeys(saddr string) {
addr := connAddr(saddr) addr := connAddr(saddr)
c.udp.mutex.RLock() c.udp.mutex.RLock()
_, isIn := c.udp.conns[connAddr(addr)] _, isIn := c.udp.conns[connAddr(addr)]
c.udp.mutex.RUnlock() c.udp.mutex.RUnlock()
if !isIn { c.udp.sendKeys(addr) } if !isIn {
c.udp.sendKeys(addr)
}
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
//* //*
func (c *Core) DEBUG_setupAndStartGlobalTCPInterface(addrport string) { func (c *Core) DEBUG_setupAndStartGlobalTCPInterface(addrport string) {
iface := tcpInterface{} iface := tcpInterface{}
iface.init(c, addrport) iface.init(c, addrport)
c.tcp = &iface c.tcp = &iface
} }
func (c *Core) DEBUG_getGlobalTCPAddr() *net.TCPAddr { func (c *Core) DEBUG_getGlobalTCPAddr() *net.TCPAddr {
return c.tcp.serv.Addr().(*net.TCPAddr) return c.tcp.serv.Addr().(*net.TCPAddr)
} }
func (c *Core) DEBUG_addTCPConn(saddr string) { func (c *Core) DEBUG_addTCPConn(saddr string) {
c.tcp.call(saddr) c.tcp.call(saddr)
} }
//*/ //*/
/* /*
@ -318,22 +331,21 @@ func (c *Core) DEBUG_addKCPConn(saddr string) {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
func (c *Core) DEBUG_setLogger(log *log.Logger) { func (c *Core) DEBUG_setLogger(log *log.Logger) {
c.log = log c.log = log
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
func DEBUG_simLinkPeers(p, q *peer) { func DEBUG_simLinkPeers(p, q *peer) {
// Sets q.out() to point to p and starts p.linkLoop() // Sets q.out() to point to p and starts p.linkLoop()
plinkIn := make(chan []byte, 1) plinkIn := make(chan []byte, 1)
qlinkIn := make(chan []byte, 1) qlinkIn := make(chan []byte, 1)
p.out = func(bs []byte) { p.out = func(bs []byte) {
go q.handlePacket(bs, qlinkIn) go q.handlePacket(bs, qlinkIn)
} }
q.out = func(bs []byte) { q.out = func(bs []byte) {
go p.handlePacket(bs, plinkIn) go p.handlePacket(bs, plinkIn)
} }
go p.linkLoop(plinkIn) go p.linkLoop(plinkIn)
go q.linkLoop(qlinkIn) go q.linkLoop(qlinkIn)
} }

View File

@ -24,360 +24,420 @@ Slight changes *do* make it blackhole hard, bootstrapping isn't an easy problem
import "sort" import "sort"
import "time" import "time"
//import "fmt" //import "fmt"
// Maximum size for buckets and lookups // Maximum size for buckets and lookups
// Exception for buckets if the next one is non-full // Exception for buckets if the next one is non-full
const dht_bucket_size = 2 // This should be at least 2 const dht_bucket_size = 2 // This should be at least 2
const dht_lookup_size = 2 // This should be at least 1, below 2 is impractical const dht_lookup_size = 2 // This should be at least 1, below 2 is impractical
const dht_bucket_number = 8*NodeIDLen // This shouldn't be changed const dht_bucket_number = 8 * NodeIDLen // This shouldn't be changed
type dhtInfo struct { type dhtInfo struct {
// TODO save their nodeID so we don't need to rehash if we need it again // TODO save their nodeID so we don't need to rehash if we need it again
nodeID_hidden *NodeID nodeID_hidden *NodeID
key boxPubKey key boxPubKey
coords []byte coords []byte
send time.Time // When we last sent a message send time.Time // When we last sent a message
recv time.Time // When we last received a message recv time.Time // When we last received a message
pings int // Decide when to drop pings int // Decide when to drop
} }
func (info *dhtInfo) getNodeID() *NodeID { func (info *dhtInfo) getNodeID() *NodeID {
if info.nodeID_hidden == nil { if info.nodeID_hidden == nil {
info.nodeID_hidden = getNodeID(&info.key) info.nodeID_hidden = getNodeID(&info.key)
} }
return info.nodeID_hidden return info.nodeID_hidden
} }
type bucket struct { type bucket struct {
infos []*dhtInfo infos []*dhtInfo
} }
type dhtReq struct { type dhtReq struct {
key boxPubKey // Key of whoever asked key boxPubKey // Key of whoever asked
coords []byte // Coords of whoever asked coords []byte // Coords of whoever asked
dest NodeID // NodeID they're asking about dest NodeID // NodeID they're asking about
} }
type dhtRes struct { type dhtRes struct {
key boxPubKey // key to respond to key boxPubKey // key to respond to
coords []byte // coords to respond to coords []byte // coords to respond to
dest NodeID dest NodeID
infos []*dhtInfo // response infos []*dhtInfo // response
} }
type dht struct { type dht struct {
core *Core core *Core
nodeID NodeID nodeID NodeID
buckets_hidden [dht_bucket_number]bucket // Extra is for the self-bucket buckets_hidden [dht_bucket_number]bucket // Extra is for the self-bucket
peers chan *dhtInfo // other goroutines put incoming dht updates here peers chan *dhtInfo // other goroutines put incoming dht updates here
reqs map[boxPubKey]map[NodeID]time.Time reqs map[boxPubKey]map[NodeID]time.Time
offset int offset int
} }
func (t *dht) init(c *Core) { func (t *dht) init(c *Core) {
t.core = c t.core = c
t.nodeID = *t.core.GetNodeID() t.nodeID = *t.core.GetNodeID()
t.peers = make(chan *dhtInfo, 1) t.peers = make(chan *dhtInfo, 1)
t.reqs = make(map[boxPubKey]map[NodeID]time.Time) t.reqs = make(map[boxPubKey]map[NodeID]time.Time)
} }
func (t *dht) handleReq(req *dhtReq) { func (t *dht) handleReq(req *dhtReq) {
// Send them what they asked for // Send them what they asked for
loc := t.core.switchTable.getLocator() loc := t.core.switchTable.getLocator()
coords := loc.getCoords() coords := loc.getCoords()
res := dhtRes{ res := dhtRes{
key: t.core.boxPub, key: t.core.boxPub,
coords: coords, coords: coords,
dest: req.dest, dest: req.dest,
infos: t.lookup(&req.dest), infos: t.lookup(&req.dest),
} }
t.sendRes(&res, req) t.sendRes(&res, req)
// Also (possibly) add them to our DHT // Also (possibly) add them to our DHT
info := dhtInfo{ info := dhtInfo{
key: req.key, key: req.key,
coords: req.coords, coords: req.coords,
} }
t.insertIfNew(&info) // This seems DoSable (we just trust their coords...) t.insertIfNew(&info) // This seems DoSable (we just trust their coords...)
//if req.dest != t.nodeID { t.ping(&info, info.getNodeID()) } // Or spam... //if req.dest != t.nodeID { t.ping(&info, info.getNodeID()) } // Or spam...
} }
func (t *dht) handleRes(res *dhtRes) { func (t *dht) handleRes(res *dhtRes) {
reqs, isIn := t.reqs[res.key] reqs, isIn := t.reqs[res.key]
if !isIn { return } if !isIn {
_, isIn = reqs[res.dest] return
if !isIn { return } }
rinfo := dhtInfo{ _, isIn = reqs[res.dest]
key: res.key, if !isIn {
coords: res.coords, return
send: time.Now(), // Technically wrong but should be OK... FIXME or not }
recv: time.Now(), rinfo := dhtInfo{
} key: res.key,
// If they're already in the table, then keep the correct send time coords: res.coords,
bidx, isOK := t.getBucketIndex(rinfo.getNodeID()) send: time.Now(), // Technically wrong but should be OK... FIXME or not
if !isOK { return } recv: time.Now(),
b := t.getBucket(bidx) }
for _, oldinfo := range b.infos { // If they're already in the table, then keep the correct send time
if oldinfo.key == rinfo.key {rinfo.send = oldinfo.send } bidx, isOK := t.getBucketIndex(rinfo.getNodeID())
} if !isOK {
// Insert into table return
t.insert(&rinfo) }
if res.dest == *rinfo.getNodeID() { return } // No infinite recursions b := t.getBucket(bidx)
// ping the nodes we were told about for _, oldinfo := range b.infos {
if len(res.infos) > dht_lookup_size { if oldinfo.key == rinfo.key {
// Ignore any "extra" lookup results rinfo.send = oldinfo.send
res.infos = res.infos[:dht_lookup_size] }
} }
for _, info := range res.infos { // Insert into table
bidx, isOK := t.getBucketIndex(info.getNodeID()) t.insert(&rinfo)
if !isOK { continue } if res.dest == *rinfo.getNodeID() {
b := t.getBucket(bidx) return
if b.contains(info) { continue } // wait for maintenance cycle to get them } // No infinite recursions
t.ping(info, info.getNodeID()) // ping the nodes we were told about
} if len(res.infos) > dht_lookup_size {
// Ignore any "extra" lookup results
res.infos = res.infos[:dht_lookup_size]
}
for _, info := range res.infos {
bidx, isOK := t.getBucketIndex(info.getNodeID())
if !isOK {
continue
}
b := t.getBucket(bidx)
if b.contains(info) {
continue
} // wait for maintenance cycle to get them
t.ping(info, info.getNodeID())
}
} }
func (t *dht) lookup(nodeID *NodeID) []*dhtInfo { func (t *dht) lookup(nodeID *NodeID) []*dhtInfo {
// FIXME this allocates a bunch, sorts, and keeps the part it likes // FIXME this allocates a bunch, sorts, and keeps the part it likes
// It would be better to only track the part it likes to begin with // It would be better to only track the part it likes to begin with
addInfos := func (res []*dhtInfo, infos []*dhtInfo) ([]*dhtInfo) { addInfos := func(res []*dhtInfo, infos []*dhtInfo) []*dhtInfo {
for _, info := range infos { for _, info := range infos {
if info == nil { panic ("Should never happen!") } if info == nil {
if true || dht_firstCloserThanThird(info.getNodeID(), nodeID, &t.nodeID) { panic("Should never happen!")
res = append(res, info) }
} if true || dht_firstCloserThanThird(info.getNodeID(), nodeID, &t.nodeID) {
} res = append(res, info)
return res }
} }
var res []*dhtInfo return res
for bidx := 0 ; bidx < t.nBuckets() ; bidx++ { }
b := t.getBucket(bidx) var res []*dhtInfo
res = addInfos(res, b.infos) for bidx := 0; bidx < t.nBuckets(); bidx++ {
} b := t.getBucket(bidx)
doSort := func(infos []*dhtInfo) { res = addInfos(res, b.infos)
less := func (i, j int) bool { }
return dht_firstCloserThanThird(infos[i].getNodeID(), doSort := func(infos []*dhtInfo) {
nodeID, less := func(i, j int) bool {
infos[j].getNodeID()) return dht_firstCloserThanThird(infos[i].getNodeID(),
} nodeID,
sort.SliceStable(infos, less) infos[j].getNodeID())
} }
doSort(res) sort.SliceStable(infos, less)
if len(res) > dht_lookup_size { res = res[:dht_lookup_size] } }
return res doSort(res)
if len(res) > dht_lookup_size {
res = res[:dht_lookup_size]
}
return res
} }
func (t *dht) getBucket(bidx int) *bucket { func (t *dht) getBucket(bidx int) *bucket {
return &t.buckets_hidden[bidx] return &t.buckets_hidden[bidx]
} }
func (t *dht) nBuckets() int { func (t *dht) nBuckets() int {
return len(t.buckets_hidden) return len(t.buckets_hidden)
} }
func (t *dht) insertIfNew(info *dhtInfo) { func (t *dht) insertIfNew(info *dhtInfo) {
//fmt.Println("DEBUG: dht insertIfNew:", info.getNodeID(), info.coords) //fmt.Println("DEBUG: dht insertIfNew:", info.getNodeID(), info.coords)
// Insert a peer if and only if the bucket doesn't already contain it // Insert a peer if and only if the bucket doesn't already contain it
nodeID := info.getNodeID() nodeID := info.getNodeID()
bidx, isOK := t.getBucketIndex(nodeID) bidx, isOK := t.getBucketIndex(nodeID)
if !isOK { return } if !isOK {
b := t.getBucket(bidx) return
if !b.contains(info) { }
// We've never heard this node before b := t.getBucket(bidx)
// TODO is there a better time than "now" to set send/recv to? if !b.contains(info) {
// (Is there another "natural" choice that bootstraps faster?) // We've never heard this node before
info.send = time.Now() // TODO is there a better time than "now" to set send/recv to?
info.recv = info.send // (Is there another "natural" choice that bootstraps faster?)
t.insert(info) info.send = time.Now()
} info.recv = info.send
t.insert(info)
}
} }
func (t *dht) insert(info *dhtInfo) { func (t *dht) insert(info *dhtInfo) {
//fmt.Println("DEBUG: dht insert:", info.getNodeID(), info.coords) //fmt.Println("DEBUG: dht insert:", info.getNodeID(), info.coords)
// First update the time on this info // First update the time on this info
info.recv = time.Now() info.recv = time.Now()
// Get the bucket for this node // Get the bucket for this node
nodeID := info.getNodeID() nodeID := info.getNodeID()
bidx, isOK := t.getBucketIndex(nodeID) bidx, isOK := t.getBucketIndex(nodeID)
if !isOK { return } if !isOK {
b := t.getBucket(bidx) return
// First drop any existing entry from the bucket }
b.drop(&info.key) b := t.getBucket(bidx)
// Now add to the *end* of the bucket // First drop any existing entry from the bucket
b.infos = append(b.infos, info) b.drop(&info.key)
// Check if the next bucket is non-full and return early if it is // Now add to the *end* of the bucket
if bidx+1 == t.nBuckets() { return } b.infos = append(b.infos, info)
bnext := t.getBucket(bidx+1) // Check if the next bucket is non-full and return early if it is
if len(bnext.infos) < dht_bucket_size { return } if bidx+1 == t.nBuckets() {
// Shrink from the *front* to requied size return
for len(b.infos) > dht_bucket_size { b.infos = b.infos[1:] } }
bnext := t.getBucket(bidx + 1)
if len(bnext.infos) < dht_bucket_size {
return
}
// Shrink from the *front* to requied size
for len(b.infos) > dht_bucket_size {
b.infos = b.infos[1:]
}
} }
func (t *dht) getBucketIndex(nodeID *NodeID) (int, bool) { func (t *dht) getBucketIndex(nodeID *NodeID) (int, bool) {
for bidx := 0 ; bidx < t.nBuckets() ; bidx++ { for bidx := 0; bidx < t.nBuckets(); bidx++ {
them := nodeID[bidx/8] & (0x80 >> byte(bidx % 8)) them := nodeID[bidx/8] & (0x80 >> byte(bidx%8))
me := t.nodeID[bidx/8] & (0x80 >> byte(bidx % 8)) me := t.nodeID[bidx/8] & (0x80 >> byte(bidx%8))
if them != me { return bidx, true } if them != me {
} return bidx, true
return t.nBuckets(), false }
}
return t.nBuckets(), false
} }
func (b *bucket) contains(ninfo *dhtInfo) bool { func (b *bucket) contains(ninfo *dhtInfo) bool {
// Compares if key and coords match // Compares if key and coords match
for _, info := range b.infos { for _, info := range b.infos {
if info == nil { panic("Should never happen") } if info == nil {
if info.key == ninfo.key { panic("Should never happen")
if len(info.coords) != len(ninfo.coords) { return false } }
for idx := 0 ; idx < len(info.coords) ; idx++ { if info.key == ninfo.key {
if info.coords[idx] != ninfo.coords[idx] { return false } if len(info.coords) != len(ninfo.coords) {
} return false
return true }
} for idx := 0; idx < len(info.coords); idx++ {
} if info.coords[idx] != ninfo.coords[idx] {
return false return false
}
}
return true
}
}
return false
} }
func (b *bucket) drop(key *boxPubKey) { func (b *bucket) drop(key *boxPubKey) {
clean := func (infos []*dhtInfo) []*dhtInfo { clean := func(infos []*dhtInfo) []*dhtInfo {
cleaned := infos[:0] cleaned := infos[:0]
for _, info := range infos { for _, info := range infos {
if info.key == *key { continue } if info.key == *key {
cleaned = append(cleaned, info) continue
} }
return cleaned cleaned = append(cleaned, info)
} }
b.infos = clean(b.infos) return cleaned
}
b.infos = clean(b.infos)
} }
func (t *dht) sendReq(req *dhtReq, dest *dhtInfo) { func (t *dht) sendReq(req *dhtReq, dest *dhtInfo) {
// Send a dhtReq to the node in dhtInfo // Send a dhtReq to the node in dhtInfo
bs := req.encode() bs := req.encode()
shared := t.core.sessions.getSharedKey(&t.core.boxPriv, &dest.key) shared := t.core.sessions.getSharedKey(&t.core.boxPriv, &dest.key)
payload, nonce := boxSeal(shared, bs, nil) payload, nonce := boxSeal(shared, bs, nil)
p := wire_protoTrafficPacket{ p := wire_protoTrafficPacket{
ttl: ^uint64(0), ttl: ^uint64(0),
coords: dest.coords, coords: dest.coords,
toKey: dest.key, toKey: dest.key,
fromKey: t.core.boxPub, fromKey: t.core.boxPub,
nonce: *nonce, nonce: *nonce,
payload:payload, payload: payload,
} }
packet := p.encode() packet := p.encode()
t.core.router.out(packet) t.core.router.out(packet)
reqsToDest, isIn := t.reqs[dest.key] reqsToDest, isIn := t.reqs[dest.key]
if !isIn { if !isIn {
t.reqs[dest.key] = make(map[NodeID]time.Time) t.reqs[dest.key] = make(map[NodeID]time.Time)
reqsToDest, isIn = t.reqs[dest.key] reqsToDest, isIn = t.reqs[dest.key]
if !isIn { panic("This should never happen") } if !isIn {
} panic("This should never happen")
reqsToDest[req.dest] = time.Now() }
}
reqsToDest[req.dest] = time.Now()
} }
func (t *dht) sendRes(res *dhtRes, req *dhtReq) { func (t *dht) sendRes(res *dhtRes, req *dhtReq) {
// Send a reply for a dhtReq // Send a reply for a dhtReq
bs := res.encode() bs := res.encode()
shared := t.core.sessions.getSharedKey(&t.core.boxPriv, &req.key) shared := t.core.sessions.getSharedKey(&t.core.boxPriv, &req.key)
payload, nonce := boxSeal(shared, bs, nil) payload, nonce := boxSeal(shared, bs, nil)
p := wire_protoTrafficPacket{ p := wire_protoTrafficPacket{
ttl: ^uint64(0), ttl: ^uint64(0),
coords: req.coords, coords: req.coords,
toKey: req.key, toKey: req.key,
fromKey: t.core.boxPub, fromKey: t.core.boxPub,
nonce: *nonce, nonce: *nonce,
payload: payload, payload: payload,
} }
packet := p.encode() packet := p.encode()
t.core.router.out(packet) t.core.router.out(packet)
} }
func (b *bucket) isEmpty() bool { func (b *bucket) isEmpty() bool {
return len(b.infos) == 0 return len(b.infos) == 0
} }
func (b *bucket) nextToPing() *dhtInfo { func (b *bucket) nextToPing() *dhtInfo {
// Check the nodes in the bucket // Check the nodes in the bucket
// Return whichever one responded least recently // Return whichever one responded least recently
// Delay of 6 seconds between pinging the same node // Delay of 6 seconds between pinging the same node
// Gives them time to respond // Gives them time to respond
// And time between traffic loss from short term congestion in the network // And time between traffic loss from short term congestion in the network
var toPing *dhtInfo var toPing *dhtInfo
for _, next := range b.infos { for _, next := range b.infos {
if time.Since(next.send) < 6*time.Second { continue } if time.Since(next.send) < 6*time.Second {
if toPing == nil || next.recv.Before(toPing.recv) { toPing = next } continue
} }
return toPing if toPing == nil || next.recv.Before(toPing.recv) {
toPing = next
}
}
return toPing
} }
func (t *dht) getTarget(bidx int) *NodeID { func (t *dht) getTarget(bidx int) *NodeID {
targetID := t.nodeID targetID := t.nodeID
targetID[bidx/8] ^= 0x80 >> byte(bidx % 8) targetID[bidx/8] ^= 0x80 >> byte(bidx%8)
return &targetID return &targetID
} }
func (t *dht) ping(info *dhtInfo, target *NodeID) { func (t *dht) ping(info *dhtInfo, target *NodeID) {
if info.pings > 2 { if info.pings > 2 {
bidx, isOK := t.getBucketIndex(info.getNodeID()) bidx, isOK := t.getBucketIndex(info.getNodeID())
if !isOK { panic("This should never happen") } if !isOK {
b := t.getBucket(bidx) panic("This should never happen")
b.drop(&info.key) }
return b := t.getBucket(bidx)
} b.drop(&info.key)
if target == nil { target = &t.nodeID } return
loc := t.core.switchTable.getLocator() }
coords := loc.getCoords() if target == nil {
req := dhtReq{ target = &t.nodeID
key: t.core.boxPub, }
coords: coords, loc := t.core.switchTable.getLocator()
dest: *target, coords := loc.getCoords()
} req := dhtReq{
info.pings++ key: t.core.boxPub,
info.send = time.Now() coords: coords,
t.sendReq(&req, info) dest: *target,
}
info.pings++
info.send = time.Now()
t.sendReq(&req, info)
} }
func (t *dht) doMaintenance() { func (t *dht) doMaintenance() {
// First clean up reqs // First clean up reqs
for key, reqs := range t.reqs { for key, reqs := range t.reqs {
for target, timeout := range reqs { for target, timeout := range reqs {
if time.Since(timeout) > time.Minute { delete(reqs, target) } if time.Since(timeout) > time.Minute {
} delete(reqs, target)
if len(reqs) == 0 { delete(t.reqs, key) } }
} }
// Ping the least recently contacted node if len(reqs) == 0 {
// This is to make sure we eventually notice when someone times out delete(t.reqs, key)
var oldest *dhtInfo }
last := 0 }
for bidx := 0 ; bidx < t.nBuckets() ; bidx++ { // Ping the least recently contacted node
b := t.getBucket(bidx) // This is to make sure we eventually notice when someone times out
if !b.isEmpty() { var oldest *dhtInfo
last = bidx last := 0
toPing := b.nextToPing() for bidx := 0; bidx < t.nBuckets(); bidx++ {
if toPing == nil { continue } // We've recently pinged everyone in b b := t.getBucket(bidx)
if oldest == nil || toPing.recv.Before(oldest.recv) { if !b.isEmpty() {
oldest = toPing last = bidx
} toPing := b.nextToPing()
} if toPing == nil {
} continue
if oldest != nil { t.ping(oldest, nil) } // if the DHT isn't empty } // We've recently pinged everyone in b
// Refresh buckets if oldest == nil || toPing.recv.Before(oldest.recv) {
if t.offset > last { t.offset = 0 } oldest = toPing
target := t.getTarget(t.offset) }
for _, info := range t.lookup(target) { }
t.ping(info, target) }
break if oldest != nil {
} t.ping(oldest, nil)
t.offset++ } // if the DHT isn't empty
// Refresh buckets
if t.offset > last {
t.offset = 0
}
target := t.getTarget(t.offset)
for _, info := range t.lookup(target) {
t.ping(info, target)
break
}
t.offset++
} }
func dht_firstCloserThanThird(first *NodeID, func dht_firstCloserThanThird(first *NodeID,
second *NodeID, second *NodeID,
third *NodeID) bool { third *NodeID) bool {
for idx := 0 ; idx < NodeIDLen ; idx++ { for idx := 0; idx < NodeIDLen; idx++ {
f := first[idx] ^ second[idx] f := first[idx] ^ second[idx]
t := third[idx] ^ second[idx] t := third[idx] ^ second[idx]
if f == t { continue } if f == t {
return f < t continue
} }
return false return f < t
}
return false
} }

View File

@ -11,335 +11,424 @@ import "time"
import "sync" import "sync"
import "sync/atomic" import "sync/atomic"
import "math" import "math"
//import "fmt" //import "fmt"
type peers struct { type peers struct {
core *Core core *Core
mutex sync.Mutex // Synchronize writes to atomic mutex sync.Mutex // Synchronize writes to atomic
ports atomic.Value //map[Port]*peer, use CoW semantics ports atomic.Value //map[Port]*peer, use CoW semantics
//ports map[Port]*peer //ports map[Port]*peer
} }
func (ps *peers) init(c *Core) { func (ps *peers) init(c *Core) {
ps.mutex.Lock() ps.mutex.Lock()
defer ps.mutex.Unlock() defer ps.mutex.Unlock()
ps.putPorts(make(map[switchPort]*peer)) ps.putPorts(make(map[switchPort]*peer))
ps.core = c ps.core = c
} }
func (ps *peers) getPorts() map[switchPort]*peer { func (ps *peers) getPorts() map[switchPort]*peer {
return ps.ports.Load().(map[switchPort]*peer) return ps.ports.Load().(map[switchPort]*peer)
} }
func (ps *peers) putPorts(ports map[switchPort]*peer) { func (ps *peers) putPorts(ports map[switchPort]*peer) {
ps.ports.Store(ports) ps.ports.Store(ports)
} }
type peer struct { type peer struct {
// Rolling approximation of bandwidth, in bps, used by switch, updated by tcp // Rolling approximation of bandwidth, in bps, used by switch, updated by tcp
// use get/update methods only! (atomic accessors as float64) // use get/update methods only! (atomic accessors as float64)
bandwidth uint64 bandwidth uint64
// BUG: sync/atomic, 32 bit platforms need the above to be the first element // BUG: sync/atomic, 32 bit platforms need the above to be the first element
box boxPubKey box boxPubKey
sig sigPubKey sig sigPubKey
shared boxSharedKey shared boxSharedKey
//in <-chan []byte //in <-chan []byte
//out chan<- []byte //out chan<- []byte
//in func([]byte) //in func([]byte)
out func([]byte) out func([]byte)
core *Core core *Core
port switchPort port switchPort
msgAnc *msgAnnounce msgAnc *msgAnnounce
msgHops []*msgHop msgHops []*msgHop
myMsg *switchMessage myMsg *switchMessage
mySigs []sigInfo mySigs []sigInfo
// This is used to limit how often we perform expensive operations // This is used to limit how often we perform expensive operations
// Specifically, processing switch messages, signing, and verifying sigs // Specifically, processing switch messages, signing, and verifying sigs
// Resets at the start of each tick // Resets at the start of each tick
throttle uint8 throttle uint8
} }
const peer_Throttle = 1 const peer_Throttle = 1
func (p *peer) getBandwidth() float64 { func (p *peer) getBandwidth() float64 {
bits := atomic.LoadUint64(&p.bandwidth) bits := atomic.LoadUint64(&p.bandwidth)
return math.Float64frombits(bits) return math.Float64frombits(bits)
} }
func (p *peer) updateBandwidth(bytes int, duration time.Duration) { func (p *peer) updateBandwidth(bytes int, duration time.Duration) {
if p == nil { return } if p == nil {
for ok := false ; !ok ; { return
oldBits := atomic.LoadUint64(&p.bandwidth) }
oldBandwidth := math.Float64frombits(oldBits) for ok := false; !ok; {
bandwidth := oldBandwidth * 7 / 8 + float64(bytes)/duration.Seconds() oldBits := atomic.LoadUint64(&p.bandwidth)
bits := math.Float64bits(bandwidth) oldBandwidth := math.Float64frombits(oldBits)
ok = atomic.CompareAndSwapUint64(&p.bandwidth, oldBits, bits) bandwidth := oldBandwidth*7/8 + float64(bytes)/duration.Seconds()
} bits := math.Float64bits(bandwidth)
ok = atomic.CompareAndSwapUint64(&p.bandwidth, oldBits, bits)
}
} }
func (ps *peers) newPeer(box *boxPubKey, func (ps *peers) newPeer(box *boxPubKey,
sig *sigPubKey) *peer { sig *sigPubKey) *peer {
//in <-chan []byte, //in <-chan []byte,
//out chan<- []byte) *peer { //out chan<- []byte) *peer {
p := peer{box: *box, p := peer{box: *box,
sig: *sig, sig: *sig,
shared: *getSharedKey(&ps.core.boxPriv, box), shared: *getSharedKey(&ps.core.boxPriv, box),
//in: in, //in: in,
//out: out, //out: out,
core: ps.core} core: ps.core}
ps.mutex.Lock() ps.mutex.Lock()
defer ps.mutex.Unlock() defer ps.mutex.Unlock()
oldPorts := ps.getPorts() oldPorts := ps.getPorts()
newPorts := make(map[switchPort]*peer) newPorts := make(map[switchPort]*peer)
for k,v := range oldPorts{ newPorts[k] = v } for k, v := range oldPorts {
for idx := switchPort(0) ; true ; idx++ { newPorts[k] = v
if _, isIn := newPorts[idx]; !isIn { }
p.port = switchPort(idx) for idx := switchPort(0); true; idx++ {
newPorts[p.port] = &p if _, isIn := newPorts[idx]; !isIn {
break p.port = switchPort(idx)
} newPorts[p.port] = &p
} break
ps.putPorts(newPorts) }
return &p }
ps.putPorts(newPorts)
return &p
} }
func (p *peer) linkLoop(in <-chan []byte) { func (p *peer) linkLoop(in <-chan []byte) {
ticker := time.NewTicker(time.Second) ticker := time.NewTicker(time.Second)
defer ticker.Stop() defer ticker.Stop()
for { for {
select { select {
case packet, ok := <-in: case packet, ok := <-in:
if !ok { return } if !ok {
p.handleLinkTraffic(packet) return
case <-ticker.C: { }
p.throttle = 0 p.handleLinkTraffic(packet)
if p.port == 0 { continue } // Don't send announces on selfInterface case <-ticker.C:
// Maybe we shouldn't time out, and instead wait for a kill signal? {
p.myMsg, p.mySigs = p.core.switchTable.createMessage(p.port) p.throttle = 0
p.sendSwitchAnnounce() if p.port == 0 {
} continue
} } // Don't send announces on selfInterface
} // Maybe we shouldn't time out, and instead wait for a kill signal?
p.myMsg, p.mySigs = p.core.switchTable.createMessage(p.port)
p.sendSwitchAnnounce()
}
}
}
} }
func (p *peer) handlePacket(packet []byte, linkIn (chan<- []byte)) { func (p *peer) handlePacket(packet []byte, linkIn chan<- []byte) {
pType, pTypeLen := wire_decode_uint64(packet) pType, pTypeLen := wire_decode_uint64(packet)
if pTypeLen==0 { return } if pTypeLen == 0 {
switch (pType) { return
case wire_Traffic: p.handleTraffic(packet, pTypeLen) }
case wire_ProtocolTraffic: p.handleTraffic(packet, pTypeLen) switch pType {
case wire_LinkProtocolTraffic: { case wire_Traffic:
select { p.handleTraffic(packet, pTypeLen)
case linkIn<-packet: case wire_ProtocolTraffic:
default: p.handleTraffic(packet, pTypeLen)
} case wire_LinkProtocolTraffic:
} {
default: /*panic(pType) ;*/ return select {
} case linkIn <- packet:
default:
}
}
default: /*panic(pType) ;*/
return
}
} }
func (p *peer) handleTraffic(packet []byte, pTypeLen int) { func (p *peer) handleTraffic(packet []byte, pTypeLen int) {
ttl, ttlLen := wire_decode_uint64(packet[pTypeLen:]) ttl, ttlLen := wire_decode_uint64(packet[pTypeLen:])
ttlBegin := pTypeLen ttlBegin := pTypeLen
ttlEnd := pTypeLen+ttlLen ttlEnd := pTypeLen + ttlLen
coords, coordLen := wire_decode_coords(packet[ttlEnd:]) coords, coordLen := wire_decode_coords(packet[ttlEnd:])
coordEnd := ttlEnd+coordLen coordEnd := ttlEnd + coordLen
if coordEnd == len(packet) { return } // No payload if coordEnd == len(packet) {
toPort, newTTL := p.core.switchTable.lookup(coords, ttl) return
if toPort == p.port { return } // FIXME? shouldn't happen, does it? would loop } // No payload
to := p.core.peers.getPorts()[toPort] toPort, newTTL := p.core.switchTable.lookup(coords, ttl)
if to == nil { return } if toPort == p.port {
newTTLSlice := wire_encode_uint64(newTTL) return
// This mutates the packet in-place if the length of the TTL changes! } // FIXME? shouldn't happen, does it? would loop
shift := ttlLen - len(newTTLSlice) to := p.core.peers.getPorts()[toPort]
copy(packet[ttlBegin+shift:], newTTLSlice) if to == nil {
copy(packet[shift:], packet[:pTypeLen]) return
packet = packet[shift:] }
to.sendPacket(packet) newTTLSlice := wire_encode_uint64(newTTL)
// This mutates the packet in-place if the length of the TTL changes!
shift := ttlLen - len(newTTLSlice)
copy(packet[ttlBegin+shift:], newTTLSlice)
copy(packet[shift:], packet[:pTypeLen])
packet = packet[shift:]
to.sendPacket(packet)
} }
func (p *peer) sendPacket(packet []byte) { func (p *peer) sendPacket(packet []byte) {
// Is there ever a case where something more complicated is needed? // Is there ever a case where something more complicated is needed?
// What if p.out blocks? // What if p.out blocks?
p.out(packet) p.out(packet)
} }
func (p *peer) sendLinkPacket(packet []byte) { func (p *peer) sendLinkPacket(packet []byte) {
bs, nonce := boxSeal(&p.shared, packet, nil) bs, nonce := boxSeal(&p.shared, packet, nil)
linkPacket := wire_linkProtoTrafficPacket{ linkPacket := wire_linkProtoTrafficPacket{
toKey: p.box, toKey: p.box,
fromKey: p.core.boxPub, fromKey: p.core.boxPub,
nonce: *nonce, nonce: *nonce,
payload: bs, payload: bs,
} }
packet = linkPacket.encode() packet = linkPacket.encode()
p.sendPacket(packet) p.sendPacket(packet)
} }
func (p *peer) handleLinkTraffic(bs []byte) { func (p *peer) handleLinkTraffic(bs []byte) {
packet := wire_linkProtoTrafficPacket{} packet := wire_linkProtoTrafficPacket{}
// TODO throttle on returns? // TODO throttle on returns?
if !packet.decode(bs) { return } if !packet.decode(bs) {
if packet.toKey != p.core.boxPub { return } return
if packet.fromKey != p.box { return } }
payload, isOK := boxOpen(&p.shared, packet.payload, &packet.nonce) if packet.toKey != p.core.boxPub {
if !isOK { return } return
pType, pTypeLen := wire_decode_uint64(payload) }
if pTypeLen == 0 { return } if packet.fromKey != p.box {
switch pType { return
case wire_SwitchAnnounce: p.handleSwitchAnnounce(payload) }
case wire_SwitchHopRequest: p.handleSwitchHopRequest(payload) payload, isOK := boxOpen(&p.shared, packet.payload, &packet.nonce)
case wire_SwitchHop: p.handleSwitchHop(payload) if !isOK {
} return
}
pType, pTypeLen := wire_decode_uint64(payload)
if pTypeLen == 0 {
return
}
switch pType {
case wire_SwitchAnnounce:
p.handleSwitchAnnounce(payload)
case wire_SwitchHopRequest:
p.handleSwitchHopRequest(payload)
case wire_SwitchHop:
p.handleSwitchHop(payload)
}
} }
func (p *peer) handleSwitchAnnounce(packet []byte) { func (p *peer) handleSwitchAnnounce(packet []byte) {
//p.core.log.Println("DEBUG: handleSwitchAnnounce") //p.core.log.Println("DEBUG: handleSwitchAnnounce")
anc := msgAnnounce{} anc := msgAnnounce{}
//err := wire_decode_struct(packet, &anc) //err := wire_decode_struct(packet, &anc)
//if err != nil { return } //if err != nil { return }
if !anc.decode(packet) { return } if !anc.decode(packet) {
//if p.msgAnc != nil && anc.Seq != p.msgAnc.Seq { p.msgHops = nil } return
if p.msgAnc == nil || }
anc.root != p.msgAnc.root || //if p.msgAnc != nil && anc.Seq != p.msgAnc.Seq { p.msgHops = nil }
anc.tstamp != p.msgAnc.tstamp || if p.msgAnc == nil ||
anc.seq != p.msgAnc.seq { p.msgHops = nil } anc.root != p.msgAnc.root ||
p.msgAnc = &anc anc.tstamp != p.msgAnc.tstamp ||
p.processSwitchMessage() anc.seq != p.msgAnc.seq {
p.msgHops = nil
}
p.msgAnc = &anc
p.processSwitchMessage()
} }
func (p *peer) requestHop(hop uint64) { func (p *peer) requestHop(hop uint64) {
//p.core.log.Println("DEBUG requestHop") //p.core.log.Println("DEBUG requestHop")
req := msgHopReq{} req := msgHopReq{}
req.root = p.msgAnc.root req.root = p.msgAnc.root
req.tstamp = p.msgAnc.tstamp req.tstamp = p.msgAnc.tstamp
req.seq = p.msgAnc.seq req.seq = p.msgAnc.seq
req.hop = hop req.hop = hop
packet := req.encode() packet := req.encode()
p.sendLinkPacket(packet) p.sendLinkPacket(packet)
} }
func (p *peer) handleSwitchHopRequest(packet []byte) { func (p *peer) handleSwitchHopRequest(packet []byte) {
//p.core.log.Println("DEBUG: handleSwitchHopRequest") //p.core.log.Println("DEBUG: handleSwitchHopRequest")
if p.throttle > peer_Throttle { return } if p.throttle > peer_Throttle {
if p.myMsg == nil { return } return
req := msgHopReq{} }
if !req.decode(packet) { return } if p.myMsg == nil {
if req.root != p.myMsg.locator.root { return } return
if req.tstamp != p.myMsg.locator.tstamp { return } }
if req.seq != p.myMsg.seq { return } req := msgHopReq{}
if uint64(len(p.myMsg.locator.coords)) <= req.hop { return } if !req.decode(packet) {
res := msgHop{} return
res.root = p.myMsg.locator.root }
res.tstamp = p.myMsg.locator.tstamp if req.root != p.myMsg.locator.root {
res.seq = p.myMsg.seq return
res.hop = req.hop }
res.port = p.myMsg.locator.coords[res.hop] if req.tstamp != p.myMsg.locator.tstamp {
sinfo := p.getSig(res.hop) return
//p.core.log.Println("DEBUG sig:", sinfo) }
res.next = sinfo.next if req.seq != p.myMsg.seq {
res.sig = sinfo.sig return
packet = res.encode() }
p.sendLinkPacket(packet) if uint64(len(p.myMsg.locator.coords)) <= req.hop {
return
}
res := msgHop{}
res.root = p.myMsg.locator.root
res.tstamp = p.myMsg.locator.tstamp
res.seq = p.myMsg.seq
res.hop = req.hop
res.port = p.myMsg.locator.coords[res.hop]
sinfo := p.getSig(res.hop)
//p.core.log.Println("DEBUG sig:", sinfo)
res.next = sinfo.next
res.sig = sinfo.sig
packet = res.encode()
p.sendLinkPacket(packet)
} }
func (p *peer) handleSwitchHop(packet []byte) { func (p *peer) handleSwitchHop(packet []byte) {
//p.core.log.Println("DEBUG: handleSwitchHop") //p.core.log.Println("DEBUG: handleSwitchHop")
if p.throttle > peer_Throttle { return } if p.throttle > peer_Throttle {
if p.msgAnc == nil { return } return
res := msgHop{} }
if !res.decode(packet) { return } if p.msgAnc == nil {
if res.root != p.msgAnc.root { return } return
if res.tstamp != p.msgAnc.tstamp { return } }
if res.seq != p.msgAnc.seq { return } res := msgHop{}
if res.hop != uint64(len(p.msgHops)) { return } // always process in order if !res.decode(packet) {
loc := switchLocator{coords: make([]switchPort, 0, len(p.msgHops)+1)} return
loc.root = res.root }
loc.tstamp = res.tstamp if res.root != p.msgAnc.root {
for _, hop := range p.msgHops { loc.coords = append(loc.coords, hop.port) } return
loc.coords = append(loc.coords, res.port) }
thisHopKey := &res.root if res.tstamp != p.msgAnc.tstamp {
if res.hop != 0 { thisHopKey = &p.msgHops[res.hop-1].next } return
bs := getBytesForSig(&res.next, &loc) }
if p.core.sigs.check(thisHopKey, &res.sig, bs) { if res.seq != p.msgAnc.seq {
p.msgHops = append(p.msgHops, &res) return
p.processSwitchMessage() }
} else { if res.hop != uint64(len(p.msgHops)) {
p.throttle++ return
} } // always process in order
loc := switchLocator{coords: make([]switchPort, 0, len(p.msgHops)+1)}
loc.root = res.root
loc.tstamp = res.tstamp
for _, hop := range p.msgHops {
loc.coords = append(loc.coords, hop.port)
}
loc.coords = append(loc.coords, res.port)
thisHopKey := &res.root
if res.hop != 0 {
thisHopKey = &p.msgHops[res.hop-1].next
}
bs := getBytesForSig(&res.next, &loc)
if p.core.sigs.check(thisHopKey, &res.sig, bs) {
p.msgHops = append(p.msgHops, &res)
p.processSwitchMessage()
} else {
p.throttle++
}
} }
func (p *peer) processSwitchMessage() { func (p *peer) processSwitchMessage() {
//p.core.log.Println("DEBUG: processSwitchMessage") //p.core.log.Println("DEBUG: processSwitchMessage")
if p.throttle > peer_Throttle { return } if p.throttle > peer_Throttle {
if p.msgAnc == nil { return } return
if uint64(len(p.msgHops)) < p.msgAnc.len { }
p.requestHop(uint64(len(p.msgHops))) if p.msgAnc == nil {
return return
} }
p.throttle++ if uint64(len(p.msgHops)) < p.msgAnc.len {
if p.msgAnc.len != uint64(len(p.msgHops)) { return } p.requestHop(uint64(len(p.msgHops)))
msg := switchMessage{} return
coords := make([]switchPort, 0, len(p.msgHops)) }
sigs := make([]sigInfo, 0, len(p.msgHops)) p.throttle++
for idx, hop := range p.msgHops { if p.msgAnc.len != uint64(len(p.msgHops)) {
// Consistency checks, should be redundant (already checked these...) return
if hop.root != p.msgAnc.root { return } }
if hop.tstamp != p.msgAnc.tstamp { return } msg := switchMessage{}
if hop.seq != p.msgAnc.seq { return } coords := make([]switchPort, 0, len(p.msgHops))
if hop.hop != uint64(idx) { return } sigs := make([]sigInfo, 0, len(p.msgHops))
coords = append(coords, hop.port) for idx, hop := range p.msgHops {
sigs = append(sigs, sigInfo{next: hop.next, sig: hop.sig}) // Consistency checks, should be redundant (already checked these...)
} if hop.root != p.msgAnc.root {
msg.from = p.sig return
msg.locator.root = p.msgAnc.root }
msg.locator.tstamp = p.msgAnc.tstamp if hop.tstamp != p.msgAnc.tstamp {
msg.locator.coords = coords return
msg.seq = p.msgAnc.seq }
//msg.RSeq = p.msgAnc.RSeq if hop.seq != p.msgAnc.seq {
//msg.Degree = p.msgAnc.Deg return
p.core.switchTable.handleMessage(&msg, p.port, sigs) }
if len(coords) == 0 { return } if hop.hop != uint64(idx) {
// Reuse locator, set the coords to the peer's coords, to use in dht return
msg.locator.coords = coords[:len(coords)-1] }
// Pass a mesage to the dht informing it that this peer (still) exists coords = append(coords, hop.port)
dinfo := dhtInfo{ sigs = append(sigs, sigInfo{next: hop.next, sig: hop.sig})
key: p.box, }
coords: msg.locator.getCoords(), msg.from = p.sig
} msg.locator.root = p.msgAnc.root
p.core.dht.peers<-&dinfo msg.locator.tstamp = p.msgAnc.tstamp
msg.locator.coords = coords
msg.seq = p.msgAnc.seq
//msg.RSeq = p.msgAnc.RSeq
//msg.Degree = p.msgAnc.Deg
p.core.switchTable.handleMessage(&msg, p.port, sigs)
if len(coords) == 0 {
return
}
// Reuse locator, set the coords to the peer's coords, to use in dht
msg.locator.coords = coords[:len(coords)-1]
// Pass a mesage to the dht informing it that this peer (still) exists
dinfo := dhtInfo{
key: p.box,
coords: msg.locator.getCoords(),
}
p.core.dht.peers <- &dinfo
} }
func (p *peer) sendSwitchAnnounce() { func (p *peer) sendSwitchAnnounce() {
anc := msgAnnounce{} anc := msgAnnounce{}
anc.root = p.myMsg.locator.root anc.root = p.myMsg.locator.root
anc.tstamp = p.myMsg.locator.tstamp anc.tstamp = p.myMsg.locator.tstamp
anc.seq = p.myMsg.seq anc.seq = p.myMsg.seq
anc.len = uint64(len(p.myMsg.locator.coords)) anc.len = uint64(len(p.myMsg.locator.coords))
//anc.Deg = p.myMsg.Degree //anc.Deg = p.myMsg.Degree
//anc.RSeq = p.myMsg.RSeq //anc.RSeq = p.myMsg.RSeq
packet := anc.encode() packet := anc.encode()
p.sendLinkPacket(packet) p.sendLinkPacket(packet)
} }
func (p *peer) getSig(hop uint64) sigInfo { func (p *peer) getSig(hop uint64) sigInfo {
//p.core.log.Println("DEBUG getSig:", len(p.mySigs), hop) //p.core.log.Println("DEBUG getSig:", len(p.mySigs), hop)
if hop < uint64(len(p.mySigs)) { return p.mySigs[hop] } if hop < uint64(len(p.mySigs)) {
bs := getBytesForSig(&p.sig, &p.myMsg.locator) return p.mySigs[hop]
sig := sigInfo{} }
sig.next = p.sig bs := getBytesForSig(&p.sig, &p.myMsg.locator)
sig.sig = *sign(&p.core.sigPriv, bs) sig := sigInfo{}
p.mySigs = append(p.mySigs, sig) sig.next = p.sig
//p.core.log.Println("DEBUG sig bs:", bs) sig.sig = *sign(&p.core.sigPriv, bs)
return sig p.mySigs = append(p.mySigs, sig)
//p.core.log.Println("DEBUG sig bs:", bs)
return sig
} }
func getBytesForSig(next *sigPubKey, loc *switchLocator) []byte { func getBytesForSig(next *sigPubKey, loc *switchLocator) []byte {
//bs, err := wire_encode_locator(loc) //bs, err := wire_encode_locator(loc)
//if err != nil { panic(err) } //if err != nil { panic(err) }
bs := append([]byte(nil), next[:]...) bs := append([]byte(nil), next[:]...)
bs = append(bs, wire_encode_locator(loc)...) bs = append(bs, wire_encode_locator(loc)...)
//bs := wire_encode_locator(loc) //bs := wire_encode_locator(loc)
//bs = append(next[:], bs...) //bs = append(next[:], bs...)
return bs return bs
} }

View File

@ -23,198 +23,267 @@ package yggdrasil
// The router then runs some sanity checks before passing it to the tun // The router then runs some sanity checks before passing it to the tun
import "time" import "time"
//import "fmt" //import "fmt"
//import "net" //import "net"
type router struct { type router struct {
core *Core core *Core
addr address addr address
in <-chan []byte // packets we received from the network, link to peer's "out" in <-chan []byte // packets we received from the network, link to peer's "out"
out func([]byte) // packets we're sending to the network, link to peer's "in" out func([]byte) // packets we're sending to the network, link to peer's "in"
recv chan<- []byte // place where the tun pulls received packets from recv chan<- []byte // place where the tun pulls received packets from
send <-chan []byte // place where the tun puts outgoing packets send <-chan []byte // place where the tun puts outgoing packets
reset chan struct{} // signal that coords changed (re-init sessions/dht) reset chan struct{} // signal that coords changed (re-init sessions/dht)
} }
func (r *router) init(core *Core) { func (r *router) init(core *Core) {
r.core = core r.core = core
r.addr = *address_addrForNodeID(&r.core.dht.nodeID) r.addr = *address_addrForNodeID(&r.core.dht.nodeID)
in := make(chan []byte, 1) // TODO something better than this... in := make(chan []byte, 1) // TODO something better than this...
p := r.core.peers.newPeer(&r.core.boxPub, &r.core.sigPub)//, out, in) p := r.core.peers.newPeer(&r.core.boxPub, &r.core.sigPub) //, out, in)
// TODO set in/out functions on the new peer... // TODO set in/out functions on the new peer...
p.out = func(packet []byte) { in<-packet } // FIXME in theory it blocks... p.out = func(packet []byte) { in <- packet } // FIXME in theory it blocks...
r.in = in r.in = in
// TODO? make caller responsible for go-ing if it needs to not block // TODO? make caller responsible for go-ing if it needs to not block
r.out = func(packet []byte) { p.handlePacket(packet, nil) } r.out = func(packet []byte) { p.handlePacket(packet, nil) }
// TODO attach these to the tun // TODO attach these to the tun
// Maybe that's the core's job... // Maybe that's the core's job...
// It creates tun, creates the router, creates channels, sets them? // It creates tun, creates the router, creates channels, sets them?
recv := make(chan []byte, 1) recv := make(chan []byte, 1)
send := make(chan []byte, 1) send := make(chan []byte, 1)
r.recv = recv r.recv = recv
r.send = send r.send = send
r.core.tun.recv = recv r.core.tun.recv = recv
r.core.tun.send = send r.core.tun.send = send
r.reset = make(chan struct{}, 1) r.reset = make(chan struct{}, 1)
go r.mainLoop() go r.mainLoop()
} }
func (r *router) mainLoop() { func (r *router) mainLoop() {
ticker := time.NewTicker(time.Second) ticker := time.NewTicker(time.Second)
defer ticker.Stop() defer ticker.Stop()
for { for {
select { select {
case p := <-r.in: r.handleIn(p) case p := <-r.in:
case p := <-r.send: r.sendPacket(p) r.handleIn(p)
case info := <-r.core.dht.peers: r.core.dht.insert(info) //r.core.dht.insertIfNew(info) case p := <-r.send:
case <-r.reset: r.core.sessions.resetInits() r.sendPacket(p)
case <-ticker.C: { case info := <-r.core.dht.peers:
// Any periodic maintenance stuff goes here r.core.dht.insert(info) //r.core.dht.insertIfNew(info)
r.core.dht.doMaintenance() case <-r.reset:
util_getBytes() // To slowly drain things r.core.sessions.resetInits()
} case <-ticker.C:
} {
} // Any periodic maintenance stuff goes here
r.core.dht.doMaintenance()
util_getBytes() // To slowly drain things
}
}
}
} }
func (r *router) sendPacket(bs []byte) { func (r *router) sendPacket(bs []byte) {
if len(bs) < 40 { panic("Tried to send a packet shorter than a header...") } if len(bs) < 40 {
var sourceAddr address panic("Tried to send a packet shorter than a header...")
var sourceSubnet subnet }
copy(sourceAddr[:], bs[8:]) var sourceAddr address
copy(sourceSubnet[:], bs[8:]) var sourceSubnet subnet
if !sourceAddr.isValid() && !sourceSubnet.isValid() { return } copy(sourceAddr[:], bs[8:])
var dest address copy(sourceSubnet[:], bs[8:])
copy(dest[:], bs[24:]) if !sourceAddr.isValid() && !sourceSubnet.isValid() {
var snet subnet return
copy(snet[:], bs[24:]) }
if !dest.isValid() && !snet.isValid() { return } var dest address
doSearch := func (packet []byte) { copy(dest[:], bs[24:])
var nodeID, mask *NodeID var snet subnet
if dest.isValid() { nodeID, mask = dest.getNodeIDandMask() } copy(snet[:], bs[24:])
if snet.isValid() { nodeID, mask = snet.getNodeIDandMask() } if !dest.isValid() && !snet.isValid() {
sinfo, isIn := r.core.searches.searches[*nodeID] return
if !isIn { sinfo = r.core.searches.createSearch(nodeID, mask) } }
if packet != nil { sinfo.packet = packet } doSearch := func(packet []byte) {
r.core.searches.sendSearch(sinfo) var nodeID, mask *NodeID
} if dest.isValid() {
var sinfo *sessionInfo nodeID, mask = dest.getNodeIDandMask()
var isIn bool }
if dest.isValid() { sinfo, isIn = r.core.sessions.getByTheirAddr(&dest) } if snet.isValid() {
if snet.isValid() { sinfo, isIn = r.core.sessions.getByTheirSubnet(&snet) } nodeID, mask = snet.getNodeIDandMask()
switch { }
case !isIn || !sinfo.init: sinfo, isIn := r.core.searches.searches[*nodeID]
// No or unintiialized session, so we need to search first if !isIn {
doSearch(bs) sinfo = r.core.searches.createSearch(nodeID, mask)
case time.Since(sinfo.time) > 6*time.Second: }
// We haven't heard from the dest in a while; they may have changed coords if packet != nil {
// Maybe the connection is idle, or maybe one of us changed coords sinfo.packet = packet
// Try searching to either ping them (a little overhead) or fix the coords }
doSearch(nil) r.core.searches.sendSearch(sinfo)
fallthrough }
//default: go func() { sinfo.send<-bs }() var sinfo *sessionInfo
default: sinfo.send<-bs var isIn bool
} if dest.isValid() {
sinfo, isIn = r.core.sessions.getByTheirAddr(&dest)
}
if snet.isValid() {
sinfo, isIn = r.core.sessions.getByTheirSubnet(&snet)
}
switch {
case !isIn || !sinfo.init:
// No or unintiialized session, so we need to search first
doSearch(bs)
case time.Since(sinfo.time) > 6*time.Second:
// We haven't heard from the dest in a while; they may have changed coords
// Maybe the connection is idle, or maybe one of us changed coords
// Try searching to either ping them (a little overhead) or fix the coords
doSearch(nil)
fallthrough
//default: go func() { sinfo.send<-bs }()
default:
sinfo.send <- bs
}
} }
func (r *router) recvPacket(bs []byte, theirAddr *address) { func (r *router) recvPacket(bs []byte, theirAddr *address) {
// TODO pass their NodeID, check *that* instead // TODO pass their NodeID, check *that* instead
// Or store their address in the session?... // Or store their address in the session?...
//fmt.Println("Recv packet") //fmt.Println("Recv packet")
if theirAddr == nil { panic("Should not happen ever") } if theirAddr == nil {
if len(bs) < 24 { return } panic("Should not happen ever")
var source address }
copy(source[:], bs[8:]) if len(bs) < 24 {
var snet subnet return
copy(snet[:], bs[8:]) }
if !source.isValid() && !snet.isValid() { return } var source address
//go func() { r.recv<-bs }() copy(source[:], bs[8:])
r.recv<-bs var snet subnet
copy(snet[:], bs[8:])
if !source.isValid() && !snet.isValid() {
return
}
//go func() { r.recv<-bs }()
r.recv <- bs
} }
func (r *router) handleIn(packet []byte) { func (r *router) handleIn(packet []byte) {
pType, pTypeLen := wire_decode_uint64(packet) pType, pTypeLen := wire_decode_uint64(packet)
if pTypeLen == 0 { return } if pTypeLen == 0 {
switch pType { return
case wire_Traffic: r.handleTraffic(packet) }
case wire_ProtocolTraffic: r.handleProto(packet) switch pType {
default: /*panic("Should not happen in testing") ;*/ return case wire_Traffic:
} r.handleTraffic(packet)
case wire_ProtocolTraffic:
r.handleProto(packet)
default: /*panic("Should not happen in testing") ;*/
return
}
} }
func (r *router) handleTraffic(packet []byte) { func (r *router) handleTraffic(packet []byte) {
defer util_putBytes(packet) defer util_putBytes(packet)
p := wire_trafficPacket{} p := wire_trafficPacket{}
if !p.decode(packet) { return } if !p.decode(packet) {
sinfo, isIn := r.core.sessions.getSessionForHandle(&p.handle) return
if !isIn { return } }
//go func () { sinfo.recv<-&p }() sinfo, isIn := r.core.sessions.getSessionForHandle(&p.handle)
sinfo.recv<-&p if !isIn {
return
}
//go func () { sinfo.recv<-&p }()
sinfo.recv <- &p
} }
func (r *router) handleProto(packet []byte) { func (r *router) handleProto(packet []byte) {
// First parse the packet // First parse the packet
p := wire_protoTrafficPacket{} p := wire_protoTrafficPacket{}
if !p.decode(packet) { return } if !p.decode(packet) {
// Now try to open the payload return
var sharedKey *boxSharedKey }
//var theirPermPub *boxPubKey // Now try to open the payload
if p.toKey == r.core.boxPub { var sharedKey *boxSharedKey
// Try to open using our permanent key //var theirPermPub *boxPubKey
sharedKey = r.core.sessions.getSharedKey(&r.core.boxPriv, &p.fromKey) if p.toKey == r.core.boxPub {
} else { return } // Try to open using our permanent key
bs, isOK := boxOpen(sharedKey, p.payload, &p.nonce) sharedKey = r.core.sessions.getSharedKey(&r.core.boxPriv, &p.fromKey)
if !isOK { return } } else {
// Now do something with the bytes in bs... return
// send dht messages to dht, sessionRefresh to sessions, data to tun... }
// For data, should check that key and IP match... bs, isOK := boxOpen(sharedKey, p.payload, &p.nonce)
bsType, bsTypeLen := wire_decode_uint64(bs) if !isOK {
if bsTypeLen == 0 { return } return
//fmt.Println("RECV bytes:", bs) }
switch bsType { // Now do something with the bytes in bs...
case wire_SessionPing: r.handlePing(bs, &p.fromKey) // send dht messages to dht, sessionRefresh to sessions, data to tun...
case wire_SessionPong: r.handlePong(bs, &p.fromKey) // For data, should check that key and IP match...
case wire_DHTLookupRequest: r.handleDHTReq(bs, &p.fromKey) bsType, bsTypeLen := wire_decode_uint64(bs)
case wire_DHTLookupResponse: r.handleDHTRes(bs, &p.fromKey) if bsTypeLen == 0 {
case wire_SearchRequest: r.handleSearchReq(bs) return
case wire_SearchResponse: r.handleSearchRes(bs) }
default: /*panic("Should not happen in testing") ;*/ return //fmt.Println("RECV bytes:", bs)
} switch bsType {
case wire_SessionPing:
r.handlePing(bs, &p.fromKey)
case wire_SessionPong:
r.handlePong(bs, &p.fromKey)
case wire_DHTLookupRequest:
r.handleDHTReq(bs, &p.fromKey)
case wire_DHTLookupResponse:
r.handleDHTRes(bs, &p.fromKey)
case wire_SearchRequest:
r.handleSearchReq(bs)
case wire_SearchResponse:
r.handleSearchRes(bs)
default: /*panic("Should not happen in testing") ;*/
return
}
} }
func (r *router) handlePing(bs []byte, fromKey *boxPubKey) { func (r *router) handlePing(bs []byte, fromKey *boxPubKey) {
ping := sessionPing{} ping := sessionPing{}
if !ping.decode(bs) { return } if !ping.decode(bs) {
ping.sendPermPub = *fromKey return
r.core.sessions.handlePing(&ping) }
ping.sendPermPub = *fromKey
r.core.sessions.handlePing(&ping)
} }
func (r *router) handlePong(bs []byte, fromKey *boxPubKey) { func (r *router) handlePong(bs []byte, fromKey *boxPubKey) {
r.handlePing(bs, fromKey) r.handlePing(bs, fromKey)
} }
func (r *router) handleDHTReq(bs []byte, fromKey *boxPubKey) { func (r *router) handleDHTReq(bs []byte, fromKey *boxPubKey) {
req := dhtReq{} req := dhtReq{}
if !req.decode(bs) { return } if !req.decode(bs) {
if req.key != *fromKey { return } return
r.core.dht.handleReq(&req) }
if req.key != *fromKey {
return
}
r.core.dht.handleReq(&req)
} }
func (r *router) handleDHTRes(bs []byte, fromKey *boxPubKey) { func (r *router) handleDHTRes(bs []byte, fromKey *boxPubKey) {
res := dhtRes{} res := dhtRes{}
if !res.decode(bs) { return } if !res.decode(bs) {
if res.key != *fromKey { return } return
r.core.dht.handleRes(&res) }
if res.key != *fromKey {
return
}
r.core.dht.handleRes(&res)
} }
func (r *router) handleSearchReq(bs []byte) { func (r *router) handleSearchReq(bs []byte) {
req := searchReq{} req := searchReq{}
if !req.decode(bs) { return } if !req.decode(bs) {
r.core.searches.handleSearchReq(&req) return
}
r.core.searches.handleSearchReq(&req)
} }
func (r *router) handleSearchRes(bs []byte) { func (r *router) handleSearchRes(bs []byte) {
res := searchRes{} res := searchRes{}
if !res.decode(bs) { return } if !res.decode(bs) {
r.core.searches.handleSearchRes(&res) return
}
r.core.searches.handleSearchRes(&res)
} }

View File

@ -17,152 +17,162 @@ package yggdrasil
// This hides bugs, which I don't want to do right now // This hides bugs, which I don't want to do right now
import "time" import "time"
//import "fmt" //import "fmt"
type searchInfo struct { type searchInfo struct {
dest *NodeID dest *NodeID
mask *NodeID mask *NodeID
time time.Time time time.Time
packet []byte packet []byte
} }
type searches struct { type searches struct {
core *Core core *Core
searches map[NodeID]*searchInfo searches map[NodeID]*searchInfo
} }
func (s *searches) init(core *Core) { func (s *searches) init(core *Core) {
s.core = core s.core = core
s.searches = make(map[NodeID]*searchInfo) s.searches = make(map[NodeID]*searchInfo)
} }
func (s *searches) createSearch(dest *NodeID, mask *NodeID) *searchInfo { func (s *searches) createSearch(dest *NodeID, mask *NodeID) *searchInfo {
now := time.Now() now := time.Now()
for dest, sinfo := range s.searches { for dest, sinfo := range s.searches {
if now.Sub(sinfo.time) > time.Minute { if now.Sub(sinfo.time) > time.Minute {
delete(s.searches, dest) delete(s.searches, dest)
} }
} }
info := searchInfo{ info := searchInfo{
dest: dest, dest: dest,
mask: mask, mask: mask,
time: now.Add(-time.Second), time: now.Add(-time.Second),
} }
s.searches[*dest] = &info s.searches[*dest] = &info
return &info return &info
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
type searchReq struct { type searchReq struct {
key boxPubKey // Who I am key boxPubKey // Who I am
coords []byte // Where I am coords []byte // Where I am
dest NodeID // Who I'm trying to connect to dest NodeID // Who I'm trying to connect to
} }
type searchRes struct { type searchRes struct {
key boxPubKey // Who I am key boxPubKey // Who I am
coords []byte // Where I am coords []byte // Where I am
dest NodeID // Who I was asked about dest NodeID // Who I was asked about
} }
func (s *searches) sendSearch(info *searchInfo) { func (s *searches) sendSearch(info *searchInfo) {
now := time.Now() now := time.Now()
if now.Sub(info.time) < time.Second { return } if now.Sub(info.time) < time.Second {
loc := s.core.switchTable.getLocator() return
coords := loc.getCoords() }
req := searchReq{ loc := s.core.switchTable.getLocator()
key: s.core.boxPub, coords := loc.getCoords()
coords: coords, req := searchReq{
dest: *info.dest, key: s.core.boxPub,
} coords: coords,
info.time = time.Now() dest: *info.dest,
s.handleSearchReq(&req) }
info.time = time.Now()
s.handleSearchReq(&req)
} }
func (s *searches) handleSearchReq(req *searchReq) { func (s *searches) handleSearchReq(req *searchReq) {
lookup := s.core.dht.lookup(&req.dest) lookup := s.core.dht.lookup(&req.dest)
sent := false sent := false
//fmt.Println("DEBUG len:", len(lookup)) //fmt.Println("DEBUG len:", len(lookup))
for _, info := range lookup { for _, info := range lookup {
//fmt.Println("DEBUG lup:", info.getNodeID()) //fmt.Println("DEBUG lup:", info.getNodeID())
if dht_firstCloserThanThird(info.getNodeID(), if dht_firstCloserThanThird(info.getNodeID(),
&req.dest, &req.dest,
&s.core.dht.nodeID) { &s.core.dht.nodeID) {
s.forwardSearch(req, info) s.forwardSearch(req, info)
sent = true sent = true
break break
} }
} }
if !sent { s.sendSearchRes(req) } if !sent {
s.sendSearchRes(req)
}
} }
func (s *searches) forwardSearch(req *searchReq, next *dhtInfo) { func (s *searches) forwardSearch(req *searchReq, next *dhtInfo) {
//fmt.Println("DEBUG fwd:", req.dest, next.getNodeID()) //fmt.Println("DEBUG fwd:", req.dest, next.getNodeID())
bs := req.encode() bs := req.encode()
shared := s.core.sessions.getSharedKey(&s.core.boxPriv, &next.key) shared := s.core.sessions.getSharedKey(&s.core.boxPriv, &next.key)
payload, nonce := boxSeal(shared, bs, nil) payload, nonce := boxSeal(shared, bs, nil)
p := wire_protoTrafficPacket{ p := wire_protoTrafficPacket{
ttl: ^uint64(0), ttl: ^uint64(0),
coords: next.coords, coords: next.coords,
toKey: next.key, toKey: next.key,
fromKey: s.core.boxPub, fromKey: s.core.boxPub,
nonce: *nonce, nonce: *nonce,
payload: payload, payload: payload,
} }
packet := p.encode() packet := p.encode()
s.core.router.out(packet) s.core.router.out(packet)
} }
func (s *searches) sendSearchRes(req *searchReq) { func (s *searches) sendSearchRes(req *searchReq) {
//fmt.Println("DEBUG res:", req.dest, s.core.dht.nodeID) //fmt.Println("DEBUG res:", req.dest, s.core.dht.nodeID)
loc := s.core.switchTable.getLocator() loc := s.core.switchTable.getLocator()
coords := loc.getCoords() coords := loc.getCoords()
res := searchRes{ res := searchRes{
key: s.core.boxPub, key: s.core.boxPub,
coords: coords, coords: coords,
dest: req.dest, dest: req.dest,
} }
bs := res.encode() bs := res.encode()
shared := s.core.sessions.getSharedKey(&s.core.boxPriv, &req.key) shared := s.core.sessions.getSharedKey(&s.core.boxPriv, &req.key)
payload, nonce := boxSeal(shared, bs, nil) payload, nonce := boxSeal(shared, bs, nil)
p := wire_protoTrafficPacket{ p := wire_protoTrafficPacket{
ttl: ^uint64(0), ttl: ^uint64(0),
coords: req.coords, coords: req.coords,
toKey: req.key, toKey: req.key,
fromKey: s.core.boxPub, fromKey: s.core.boxPub,
nonce: *nonce, nonce: *nonce,
payload: payload, payload: payload,
} }
packet := p.encode() packet := p.encode()
s.core.router.out(packet) s.core.router.out(packet)
} }
func (s *searches) handleSearchRes(res *searchRes) { func (s *searches) handleSearchRes(res *searchRes) {
info, isIn := s.searches[res.dest] info, isIn := s.searches[res.dest]
if !isIn { return } if !isIn {
them := getNodeID(&res.key) return
var destMasked NodeID }
var themMasked NodeID them := getNodeID(&res.key)
for idx := 0 ; idx < NodeIDLen ; idx++ { var destMasked NodeID
destMasked[idx] = info.dest[idx] & info.mask[idx] var themMasked NodeID
themMasked[idx] = them[idx] & info.mask[idx] for idx := 0; idx < NodeIDLen; idx++ {
} destMasked[idx] = info.dest[idx] & info.mask[idx]
//fmt.Println("DEBUG search res1:", themMasked, destMasked) themMasked[idx] = them[idx] & info.mask[idx]
//fmt.Println("DEBUG search res2:", *them, *info.dest, *info.mask) }
if themMasked != destMasked { return } //fmt.Println("DEBUG search res1:", themMasked, destMasked)
// They match, so create a session and send a sessionRequest //fmt.Println("DEBUG search res2:", *them, *info.dest, *info.mask)
sinfo, isIn := s.core.sessions.getByTheirPerm(&res.key) if themMasked != destMasked {
if !isIn { return
sinfo = s.core.sessions.createSession(&res.key) }
_, isIn := s.core.sessions.getByTheirPerm(&res.key) // They match, so create a session and send a sessionRequest
if !isIn { panic("This should never happen") } sinfo, isIn := s.core.sessions.getByTheirPerm(&res.key)
} if !isIn {
// FIXME replay attacks could mess with coords? sinfo = s.core.sessions.createSession(&res.key)
sinfo.coords = res.coords _, isIn := s.core.sessions.getByTheirPerm(&res.key)
sinfo.packet = info.packet if !isIn {
s.core.sessions.ping(sinfo) panic("This should never happen")
// Cleanup }
delete(s.searches, res.dest) }
// FIXME replay attacks could mess with coords?
sinfo.coords = res.coords
sinfo.packet = info.packet
s.core.sessions.ping(sinfo)
// Cleanup
delete(s.searches, res.dest)
} }

View File

@ -7,281 +7,315 @@ package yggdrasil
import "time" import "time"
type sessionInfo struct { type sessionInfo struct {
core *Core core *Core
theirAddr address theirAddr address
theirSubnet subnet theirSubnet subnet
theirPermPub boxPubKey theirPermPub boxPubKey
theirSesPub boxPubKey theirSesPub boxPubKey
mySesPub boxPubKey mySesPub boxPubKey
mySesPriv boxPrivKey mySesPriv boxPrivKey
sharedSesKey boxSharedKey // derived from session keys sharedSesKey boxSharedKey // derived from session keys
theirHandle handle theirHandle handle
myHandle handle myHandle handle
theirNonce boxNonce theirNonce boxNonce
myNonce boxNonce myNonce boxNonce
time time.Time // Time we last received a packet time time.Time // Time we last received a packet
coords []byte // coords of destination coords []byte // coords of destination
packet []byte // a buffered packet, sent immediately on ping/pong packet []byte // a buffered packet, sent immediately on ping/pong
init bool // Reset if coords change init bool // Reset if coords change
send chan []byte send chan []byte
recv chan *wire_trafficPacket recv chan *wire_trafficPacket
nonceMask uint64 nonceMask uint64
tstamp int64 // tstamp from their last session ping, replay attack mitigation tstamp int64 // tstamp from their last session ping, replay attack mitigation
} }
// FIXME replay attacks (include nonce or some sequence number) // FIXME replay attacks (include nonce or some sequence number)
type sessionPing struct { type sessionPing struct {
sendPermPub boxPubKey // Sender's permanent key sendPermPub boxPubKey // Sender's permanent key
handle handle // Random number to ID session handle handle // Random number to ID session
sendSesPub boxPubKey // Session key to use sendSesPub boxPubKey // Session key to use
coords []byte coords []byte
tstamp int64 // unix time, but the only real requirement is that it increases tstamp int64 // unix time, but the only real requirement is that it increases
isPong bool isPong bool
} }
// Returns true if the session was updated, false otherwise // Returns true if the session was updated, false otherwise
func (s *sessionInfo) update(p *sessionPing) bool { func (s *sessionInfo) update(p *sessionPing) bool {
if !(p.tstamp > s.tstamp) { return false } if !(p.tstamp > s.tstamp) {
if p.sendPermPub != s.theirPermPub { return false } // Shouldn't happen return false
if p.sendSesPub != s.theirSesPub { }
// FIXME need to protect against replay attacks if p.sendPermPub != s.theirPermPub {
// Put a sequence number or a timestamp or something in the pings? return false
// Or just return false, make the session time out? } // Shouldn't happen
s.theirSesPub = p.sendSesPub if p.sendSesPub != s.theirSesPub {
s.theirHandle = p.handle // FIXME need to protect against replay attacks
s.sharedSesKey = *getSharedKey(&s.mySesPriv, &s.theirSesPub) // Put a sequence number or a timestamp or something in the pings?
s.theirNonce = boxNonce{} // Or just return false, make the session time out?
s.nonceMask = 0 s.theirSesPub = p.sendSesPub
} s.theirHandle = p.handle
s.coords = append([]byte{}, p.coords...) s.sharedSesKey = *getSharedKey(&s.mySesPriv, &s.theirSesPub)
s.time = time.Now() s.theirNonce = boxNonce{}
s.tstamp = p.tstamp s.nonceMask = 0
s.init = true }
return true s.coords = append([]byte{}, p.coords...)
s.time = time.Now()
s.tstamp = p.tstamp
s.init = true
return true
} }
func (s *sessionInfo) timedout() bool { func (s *sessionInfo) timedout() bool {
return time.Since(s.time) > time.Minute return time.Since(s.time) > time.Minute
} }
type sessions struct { type sessions struct {
core *Core core *Core
// Maps known permanent keys to their shared key, used by DHT a lot // Maps known permanent keys to their shared key, used by DHT a lot
permShared map[boxPubKey]*boxSharedKey permShared map[boxPubKey]*boxSharedKey
// Maps (secret) handle onto session info // Maps (secret) handle onto session info
sinfos map[handle]*sessionInfo sinfos map[handle]*sessionInfo
// Maps mySesPub onto handle // Maps mySesPub onto handle
byMySes map[boxPubKey]*handle byMySes map[boxPubKey]*handle
// Maps theirPermPub onto handle // Maps theirPermPub onto handle
byTheirPerm map[boxPubKey]*handle byTheirPerm map[boxPubKey]*handle
addrToPerm map[address]*boxPubKey addrToPerm map[address]*boxPubKey
subnetToPerm map[subnet]*boxPubKey subnetToPerm map[subnet]*boxPubKey
} }
func (ss *sessions) init(core *Core) { func (ss *sessions) init(core *Core) {
ss.core = core ss.core = core
ss.permShared = make(map[boxPubKey]*boxSharedKey) ss.permShared = make(map[boxPubKey]*boxSharedKey)
ss.sinfos = make(map[handle]*sessionInfo) ss.sinfos = make(map[handle]*sessionInfo)
ss.byMySes = make(map[boxPubKey]*handle) ss.byMySes = make(map[boxPubKey]*handle)
ss.byTheirPerm = make(map[boxPubKey]*handle) ss.byTheirPerm = make(map[boxPubKey]*handle)
ss.addrToPerm = make(map[address]*boxPubKey) ss.addrToPerm = make(map[address]*boxPubKey)
ss.subnetToPerm = make(map[subnet]*boxPubKey) ss.subnetToPerm = make(map[subnet]*boxPubKey)
} }
func (ss *sessions) getSessionForHandle(handle *handle) (*sessionInfo, bool) { func (ss *sessions) getSessionForHandle(handle *handle) (*sessionInfo, bool) {
sinfo, isIn := ss.sinfos[*handle] sinfo, isIn := ss.sinfos[*handle]
if isIn && sinfo.timedout() { if isIn && sinfo.timedout() {
// We have a session, but it has timed out // We have a session, but it has timed out
return nil, false return nil, false
} }
return sinfo, isIn return sinfo, isIn
} }
func (ss *sessions) getByMySes(key *boxPubKey) (*sessionInfo, bool) { func (ss *sessions) getByMySes(key *boxPubKey) (*sessionInfo, bool) {
h, isIn := ss.byMySes[*key] h, isIn := ss.byMySes[*key]
if !isIn { return nil, false } if !isIn {
sinfo, isIn := ss.getSessionForHandle(h) return nil, false
return sinfo, isIn }
sinfo, isIn := ss.getSessionForHandle(h)
return sinfo, isIn
} }
func (ss *sessions) getByTheirPerm(key *boxPubKey) (*sessionInfo, bool) { func (ss *sessions) getByTheirPerm(key *boxPubKey) (*sessionInfo, bool) {
h, isIn := ss.byTheirPerm[*key] h, isIn := ss.byTheirPerm[*key]
if !isIn { return nil, false } if !isIn {
sinfo, isIn := ss.getSessionForHandle(h) return nil, false
return sinfo, isIn }
sinfo, isIn := ss.getSessionForHandle(h)
return sinfo, isIn
} }
func (ss *sessions) getByTheirAddr(addr *address) (*sessionInfo, bool) { func (ss *sessions) getByTheirAddr(addr *address) (*sessionInfo, bool) {
p, isIn := ss.addrToPerm[*addr] p, isIn := ss.addrToPerm[*addr]
if !isIn { return nil, false } if !isIn {
sinfo, isIn := ss.getByTheirPerm(p) return nil, false
return sinfo, isIn }
sinfo, isIn := ss.getByTheirPerm(p)
return sinfo, isIn
} }
func (ss *sessions) getByTheirSubnet(snet *subnet) (*sessionInfo, bool) { func (ss *sessions) getByTheirSubnet(snet *subnet) (*sessionInfo, bool) {
p, isIn := ss.subnetToPerm[*snet] p, isIn := ss.subnetToPerm[*snet]
if !isIn { return nil, false } if !isIn {
sinfo, isIn := ss.getByTheirPerm(p) return nil, false
return sinfo, isIn }
sinfo, isIn := ss.getByTheirPerm(p)
return sinfo, isIn
} }
func (ss *sessions) createSession(theirPermKey *boxPubKey) *sessionInfo { func (ss *sessions) createSession(theirPermKey *boxPubKey) *sessionInfo {
sinfo := sessionInfo{} sinfo := sessionInfo{}
sinfo.core = ss.core sinfo.core = ss.core
sinfo.theirPermPub = *theirPermKey sinfo.theirPermPub = *theirPermKey
pub, priv := newBoxKeys() pub, priv := newBoxKeys()
sinfo.mySesPub = *pub sinfo.mySesPub = *pub
sinfo.mySesPriv = *priv sinfo.mySesPriv = *priv
sinfo.myNonce = *newBoxNonce() // TODO make sure nonceIsOK tolerates this sinfo.myNonce = *newBoxNonce() // TODO make sure nonceIsOK tolerates this
higher := false higher := false
for idx := range ss.core.boxPub { for idx := range ss.core.boxPub {
if ss.core.boxPub[idx] > sinfo.theirPermPub[idx] { if ss.core.boxPub[idx] > sinfo.theirPermPub[idx] {
higher = true higher = true
break break
} else if ss.core.boxPub[idx] < sinfo.theirPermPub[idx] { } else if ss.core.boxPub[idx] < sinfo.theirPermPub[idx] {
break break
} }
} }
if higher { if higher {
// higher => odd nonce // higher => odd nonce
sinfo.myNonce[len(sinfo.myNonce)-1] |= 0x01 sinfo.myNonce[len(sinfo.myNonce)-1] |= 0x01
} else { } else {
// lower => even nonce // lower => even nonce
sinfo.myNonce[len(sinfo.myNonce)-1] &= 0xfe sinfo.myNonce[len(sinfo.myNonce)-1] &= 0xfe
} }
sinfo.myHandle = *newHandle() sinfo.myHandle = *newHandle()
sinfo.theirAddr = *address_addrForNodeID(getNodeID(&sinfo.theirPermPub)) sinfo.theirAddr = *address_addrForNodeID(getNodeID(&sinfo.theirPermPub))
sinfo.theirSubnet = *address_subnetForNodeID(getNodeID(&sinfo.theirPermPub)) sinfo.theirSubnet = *address_subnetForNodeID(getNodeID(&sinfo.theirPermPub))
sinfo.send = make(chan []byte, 1) sinfo.send = make(chan []byte, 1)
sinfo.recv = make(chan *wire_trafficPacket, 1) sinfo.recv = make(chan *wire_trafficPacket, 1)
go sinfo.doWorker() go sinfo.doWorker()
sinfo.time = time.Now() sinfo.time = time.Now()
// Do some cleanup // Do some cleanup
// Time thresholds almost certainly could use some adjusting // Time thresholds almost certainly could use some adjusting
for _, s := range ss.sinfos { for _, s := range ss.sinfos {
if s.timedout() { s.close() } if s.timedout() {
} s.close()
ss.sinfos[sinfo.myHandle] = &sinfo }
ss.byMySes[sinfo.mySesPub] = &sinfo.myHandle }
ss.byTheirPerm[sinfo.theirPermPub] = &sinfo.myHandle ss.sinfos[sinfo.myHandle] = &sinfo
ss.addrToPerm[sinfo.theirAddr] = &sinfo.theirPermPub ss.byMySes[sinfo.mySesPub] = &sinfo.myHandle
ss.subnetToPerm[sinfo.theirSubnet] = &sinfo.theirPermPub ss.byTheirPerm[sinfo.theirPermPub] = &sinfo.myHandle
return &sinfo ss.addrToPerm[sinfo.theirAddr] = &sinfo.theirPermPub
ss.subnetToPerm[sinfo.theirSubnet] = &sinfo.theirPermPub
return &sinfo
} }
func (sinfo *sessionInfo) close() { func (sinfo *sessionInfo) close() {
delete(sinfo.core.sessions.sinfos, sinfo.myHandle) delete(sinfo.core.sessions.sinfos, sinfo.myHandle)
delete(sinfo.core.sessions.byMySes, sinfo.mySesPub) delete(sinfo.core.sessions.byMySes, sinfo.mySesPub)
delete(sinfo.core.sessions.byTheirPerm, sinfo.theirPermPub) delete(sinfo.core.sessions.byTheirPerm, sinfo.theirPermPub)
delete(sinfo.core.sessions.addrToPerm, sinfo.theirAddr) delete(sinfo.core.sessions.addrToPerm, sinfo.theirAddr)
delete(sinfo.core.sessions.subnetToPerm, sinfo.theirSubnet) delete(sinfo.core.sessions.subnetToPerm, sinfo.theirSubnet)
close(sinfo.send) close(sinfo.send)
close(sinfo.recv) close(sinfo.recv)
} }
func (ss *sessions) getPing(sinfo *sessionInfo) sessionPing { func (ss *sessions) getPing(sinfo *sessionInfo) sessionPing {
loc := ss.core.switchTable.getLocator() loc := ss.core.switchTable.getLocator()
coords := loc.getCoords() coords := loc.getCoords()
ref := sessionPing{ ref := sessionPing{
sendPermPub: ss.core.boxPub, sendPermPub: ss.core.boxPub,
handle: sinfo.myHandle, handle: sinfo.myHandle,
sendSesPub: sinfo.mySesPub, sendSesPub: sinfo.mySesPub,
tstamp: time.Now().Unix(), tstamp: time.Now().Unix(),
coords: coords, coords: coords,
} }
sinfo.myNonce.update() sinfo.myNonce.update()
return ref return ref
} }
func (ss *sessions) getSharedKey(myPriv *boxPrivKey, func (ss *sessions) getSharedKey(myPriv *boxPrivKey,
theirPub *boxPubKey) *boxSharedKey { theirPub *boxPubKey) *boxSharedKey {
if skey, isIn := ss.permShared[*theirPub] ; isIn { return skey } if skey, isIn := ss.permShared[*theirPub]; isIn {
// First do some cleanup return skey
const maxKeys = dht_bucket_number*dht_bucket_size }
for key := range ss.permShared { // First do some cleanup
// Remove a random key until the store is small enough const maxKeys = dht_bucket_number * dht_bucket_size
if len(ss.permShared) < maxKeys { break } for key := range ss.permShared {
delete(ss.permShared, key) // Remove a random key until the store is small enough
} if len(ss.permShared) < maxKeys {
ss.permShared[*theirPub] = getSharedKey(myPriv, theirPub) break
return ss.permShared[*theirPub] }
delete(ss.permShared, key)
}
ss.permShared[*theirPub] = getSharedKey(myPriv, theirPub)
return ss.permShared[*theirPub]
} }
func (ss *sessions) ping(sinfo *sessionInfo) { func (ss *sessions) ping(sinfo *sessionInfo) {
ss.sendPingPong(sinfo, false) ss.sendPingPong(sinfo, false)
} }
func (ss *sessions) sendPingPong(sinfo *sessionInfo, isPong bool) { func (ss *sessions) sendPingPong(sinfo *sessionInfo, isPong bool) {
ping := ss.getPing(sinfo) ping := ss.getPing(sinfo)
ping.isPong = isPong ping.isPong = isPong
bs := ping.encode() bs := ping.encode()
shared := ss.getSharedKey(&ss.core.boxPriv, &sinfo.theirPermPub) shared := ss.getSharedKey(&ss.core.boxPriv, &sinfo.theirPermPub)
payload, nonce := boxSeal(shared, bs, nil) payload, nonce := boxSeal(shared, bs, nil)
p := wire_protoTrafficPacket{ p := wire_protoTrafficPacket{
ttl: ^uint64(0), ttl: ^uint64(0),
coords: sinfo.coords, coords: sinfo.coords,
toKey: sinfo.theirPermPub, toKey: sinfo.theirPermPub,
fromKey: ss.core.boxPub, fromKey: ss.core.boxPub,
nonce: *nonce, nonce: *nonce,
payload: payload, payload: payload,
} }
packet := p.encode() packet := p.encode()
ss.core.router.out(packet) ss.core.router.out(packet)
} }
func (ss *sessions) handlePing(ping *sessionPing) { func (ss *sessions) handlePing(ping *sessionPing) {
// Get the corresponding session (or create a new session) // Get the corresponding session (or create a new session)
sinfo, isIn := ss.getByTheirPerm(&ping.sendPermPub) sinfo, isIn := ss.getByTheirPerm(&ping.sendPermPub)
if !isIn || sinfo.timedout() { if !isIn || sinfo.timedout() {
if isIn { sinfo.close() } if isIn {
ss.createSession(&ping.sendPermPub) sinfo.close()
sinfo, isIn = ss.getByTheirPerm(&ping.sendPermPub) }
if !isIn { panic("This should not happen") } ss.createSession(&ping.sendPermPub)
} sinfo, isIn = ss.getByTheirPerm(&ping.sendPermPub)
// Update the session if !isIn {
if !sinfo.update(ping) { /*panic("Should not happen in testing")*/ ; return } panic("This should not happen")
if !ping.isPong{ ss.sendPingPong(sinfo, true) } }
if sinfo.packet != nil { }
// send // Update the session
var bs []byte if !sinfo.update(ping) { /*panic("Should not happen in testing")*/
bs, sinfo.packet = sinfo.packet, nil return
go func() { sinfo.send<-bs }() }
} if !ping.isPong {
ss.sendPingPong(sinfo, true)
}
if sinfo.packet != nil {
// send
var bs []byte
bs, sinfo.packet = sinfo.packet, nil
go func() { sinfo.send <- bs }()
}
} }
func (n *boxNonce) minus(m *boxNonce) int64 { func (n *boxNonce) minus(m *boxNonce) int64 {
diff := int64(0) diff := int64(0)
for idx := range n { for idx := range n {
diff *= 256 diff *= 256
diff += int64(n[idx]) - int64(m[idx]) diff += int64(n[idx]) - int64(m[idx])
if diff > 64 { diff = 64 } if diff > 64 {
if diff < -64 { diff = -64 } diff = 64
} }
return diff if diff < -64 {
diff = -64
}
}
return diff
} }
func (sinfo *sessionInfo) nonceIsOK(theirNonce *boxNonce) bool { func (sinfo *sessionInfo) nonceIsOK(theirNonce *boxNonce) bool {
// The bitmask is to allow for some non-duplicate out-of-order packets // The bitmask is to allow for some non-duplicate out-of-order packets
diff := theirNonce.minus(&sinfo.theirNonce) diff := theirNonce.minus(&sinfo.theirNonce)
if diff > 0 { return true } if diff > 0 {
return ^sinfo.nonceMask & (0x01 << uint64(-diff)) != 0 return true
}
return ^sinfo.nonceMask&(0x01<<uint64(-diff)) != 0
} }
func (sinfo *sessionInfo) updateNonce(theirNonce *boxNonce) { func (sinfo *sessionInfo) updateNonce(theirNonce *boxNonce) {
// Shift nonce mask if needed // Shift nonce mask if needed
// Set bit // Set bit
diff := theirNonce.minus(&sinfo.theirNonce) diff := theirNonce.minus(&sinfo.theirNonce)
if diff > 0 { if diff > 0 {
sinfo.nonceMask <<= uint64(diff) sinfo.nonceMask <<= uint64(diff)
sinfo.nonceMask &= 0x01 sinfo.nonceMask &= 0x01
} else { } else {
sinfo.nonceMask &= 0x01 << uint64(-diff) sinfo.nonceMask &= 0x01 << uint64(-diff)
} }
sinfo.theirNonce = *theirNonce sinfo.theirNonce = *theirNonce
} }
func (ss *sessions) resetInits() { func (ss *sessions) resetInits() {
for _, sinfo := range ss.sinfos { sinfo.init = false } for _, sinfo := range ss.sinfos {
sinfo.init = false
}
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -291,37 +325,53 @@ func (ss *sessions) resetInits() {
// It's also responsible for keeping nonces consistent // It's also responsible for keeping nonces consistent
func (sinfo *sessionInfo) doWorker() { func (sinfo *sessionInfo) doWorker() {
for { for {
select { select {
case p, ok := <-sinfo.recv: if ok { sinfo.doRecv(p) } else { return } case p, ok := <-sinfo.recv:
case bs, ok := <-sinfo.send: if ok { sinfo.doSend(bs) } else { return } if ok {
} sinfo.doRecv(p)
} } else {
return
}
case bs, ok := <-sinfo.send:
if ok {
sinfo.doSend(bs)
} else {
return
}
}
}
} }
func (sinfo *sessionInfo) doSend(bs []byte) { func (sinfo *sessionInfo) doSend(bs []byte) {
defer util_putBytes(bs) defer util_putBytes(bs)
if !sinfo.init { return } // To prevent using empty session keys if !sinfo.init {
payload, nonce := boxSeal(&sinfo.sharedSesKey, bs, &sinfo.myNonce) return
defer util_putBytes(payload) } // To prevent using empty session keys
p := wire_trafficPacket{ payload, nonce := boxSeal(&sinfo.sharedSesKey, bs, &sinfo.myNonce)
ttl: ^uint64(0), defer util_putBytes(payload)
coords: sinfo.coords, p := wire_trafficPacket{
handle: sinfo.theirHandle, ttl: ^uint64(0),
nonce: *nonce, coords: sinfo.coords,
payload: payload, handle: sinfo.theirHandle,
} nonce: *nonce,
packet := p.encode() payload: payload,
sinfo.core.router.out(packet) }
packet := p.encode()
sinfo.core.router.out(packet)
} }
func (sinfo *sessionInfo) doRecv(p *wire_trafficPacket) { func (sinfo *sessionInfo) doRecv(p *wire_trafficPacket) {
defer util_putBytes(p.payload) defer util_putBytes(p.payload)
if !sinfo.nonceIsOK(&p.nonce) { return } if !sinfo.nonceIsOK(&p.nonce) {
bs, isOK := boxOpen(&sinfo.sharedSesKey, p.payload, &p.nonce) return
if !isOK { util_putBytes(bs) ; return } }
sinfo.updateNonce(&p.nonce) bs, isOK := boxOpen(&sinfo.sharedSesKey, p.payload, &p.nonce)
sinfo.time = time.Now() if !isOK {
sinfo.core.router.recvPacket(bs, &sinfo.theirAddr) util_putBytes(bs)
return
}
sinfo.updateNonce(&p.nonce)
sinfo.time = time.Now()
sinfo.core.router.recvPacket(bs, &sinfo.theirAddr)
} }

View File

@ -7,52 +7,63 @@ import "sync"
import "time" import "time"
type sigManager struct { type sigManager struct {
mutex sync.RWMutex mutex sync.RWMutex
checked map[sigBytes]knownSig checked map[sigBytes]knownSig
lastCleaned time.Time lastCleaned time.Time
} }
type knownSig struct { type knownSig struct {
bs []byte bs []byte
time time.Time time time.Time
} }
func (m *sigManager) init() { func (m *sigManager) init() {
m.checked = make(map[sigBytes]knownSig) m.checked = make(map[sigBytes]knownSig)
} }
func (m *sigManager) check(key *sigPubKey, sig *sigBytes, bs []byte) bool { func (m *sigManager) check(key *sigPubKey, sig *sigBytes, bs []byte) bool {
if m.isChecked(sig, bs) { return true } if m.isChecked(sig, bs) {
verified := verify(key, bs, sig) return true
if verified { m.putChecked(sig, bs) } }
return verified verified := verify(key, bs, sig)
if verified {
m.putChecked(sig, bs)
}
return verified
} }
func (m *sigManager) isChecked(sig *sigBytes, bs []byte) bool { func (m *sigManager) isChecked(sig *sigBytes, bs []byte) bool {
m.mutex.RLock() m.mutex.RLock()
defer m.mutex.RUnlock() defer m.mutex.RUnlock()
k, isIn := m.checked[*sig] k, isIn := m.checked[*sig]
if !isIn { return false } if !isIn {
if len(bs) != len(k.bs) { return false } return false
for idx := 0 ; idx < len(bs) ; idx++ { }
if bs[idx] != k.bs[idx] { return false } if len(bs) != len(k.bs) {
} return false
k.time = time.Now() }
return true for idx := 0; idx < len(bs); idx++ {
if bs[idx] != k.bs[idx] {
return false
}
}
k.time = time.Now()
return true
} }
func (m *sigManager) putChecked(newsig *sigBytes, bs []byte) { func (m *sigManager) putChecked(newsig *sigBytes, bs []byte) {
m.mutex.Lock() m.mutex.Lock()
defer m.mutex.Unlock() defer m.mutex.Unlock()
now := time.Now() now := time.Now()
if time.Since(m.lastCleaned) > 60*time.Second { if time.Since(m.lastCleaned) > 60*time.Second {
// Since we have the write lock anyway, do some cleanup // Since we have the write lock anyway, do some cleanup
for s, k := range m.checked { for s, k := range m.checked {
if time.Since(k.time) > 60*time.Second { delete(m.checked, s) } if time.Since(k.time) > 60*time.Second {
} delete(m.checked, s)
m.lastCleaned = now }
} }
k := knownSig{bs: bs, time: now} m.lastCleaned = now
m.checked[*newsig] = k }
k := knownSig{bs: bs, time: now}
m.checked[*newsig] = k
} }

View File

@ -23,366 +23,414 @@ const switch_timeout = time.Minute
// 1 signature per coord, from the *sender* to that coord // 1 signature per coord, from the *sender* to that coord
// E.g. A->B->C has sigA(A->B) and sigB(A->B->C) // E.g. A->B->C has sigA(A->B) and sigB(A->B->C)
type switchLocator struct { type switchLocator struct {
root sigPubKey root sigPubKey
tstamp int64 tstamp int64
coords []switchPort coords []switchPort
} }
func firstIsBetter(first, second *sigPubKey) bool { func firstIsBetter(first, second *sigPubKey) bool {
// Higher TreeID is better // Higher TreeID is better
ftid := getTreeID(first) ftid := getTreeID(first)
stid := getTreeID(second) stid := getTreeID(second)
for idx := 0 ; idx < len(ftid) ; idx++ { for idx := 0; idx < len(ftid); idx++ {
if ftid[idx] == stid[idx] { continue } if ftid[idx] == stid[idx] {
return ftid[idx] > stid[idx] continue
} }
// Edge case, when comparing identical IDs return ftid[idx] > stid[idx]
return false }
// Edge case, when comparing identical IDs
return false
} }
func (l *switchLocator) clone() switchLocator { func (l *switchLocator) clone() switchLocator {
// Used to create a deep copy for use in messages // Used to create a deep copy for use in messages
// Copy required because we need to mutate coords before sending // Copy required because we need to mutate coords before sending
// (By appending the port from us to the destination) // (By appending the port from us to the destination)
loc := *l loc := *l
loc.coords = make([]switchPort, len(l.coords), len(l.coords)+1) loc.coords = make([]switchPort, len(l.coords), len(l.coords)+1)
copy(loc.coords, l.coords) copy(loc.coords, l.coords)
return loc return loc
} }
func (l *switchLocator) dist(dest []byte) int { func (l *switchLocator) dist(dest []byte) int {
// Returns distance (on the tree) from these coords // Returns distance (on the tree) from these coords
offset := 0 offset := 0
fdc := 0 fdc := 0
for { for {
if fdc >= len(l.coords) { break } if fdc >= len(l.coords) {
coord, length := wire_decode_uint64(dest[offset:]) break
if length == 0 { break } }
if l.coords[fdc] != switchPort(coord) { break } coord, length := wire_decode_uint64(dest[offset:])
fdc++ if length == 0 {
offset += length break
} }
dist := len(l.coords[fdc:]) if l.coords[fdc] != switchPort(coord) {
for { break
_, length := wire_decode_uint64(dest[offset:]) }
if length == 0 { break } fdc++
dist++ offset += length
offset += length }
} dist := len(l.coords[fdc:])
return dist for {
_, length := wire_decode_uint64(dest[offset:])
if length == 0 {
break
}
dist++
offset += length
}
return dist
} }
func (l *switchLocator) getCoords() []byte { func (l *switchLocator) getCoords() []byte {
bs := make([]byte, 0, len(l.coords)) bs := make([]byte, 0, len(l.coords))
for _, coord := range l.coords { for _, coord := range l.coords {
c := wire_encode_uint64(uint64(coord)) c := wire_encode_uint64(uint64(coord))
bs = append(bs, c...) bs = append(bs, c...)
} }
return bs return bs
} }
func (x *switchLocator) isAncestorOf(y *switchLocator) bool { func (x *switchLocator) isAncestorOf(y *switchLocator) bool {
if x.root != y.root { return false } if x.root != y.root {
if len(x.coords) > len(y.coords) { return false } return false
for idx := range x.coords { }
if x.coords[idx] != y.coords[idx] { return false } if len(x.coords) > len(y.coords) {
} return false
return true }
for idx := range x.coords {
if x.coords[idx] != y.coords[idx] {
return false
}
}
return true
} }
type peerInfo struct { type peerInfo struct {
key sigPubKey // ID of this peer key sigPubKey // ID of this peer
locator switchLocator // Should be able to respond with signatures upon request locator switchLocator // Should be able to respond with signatures upon request
degree uint64 // Self-reported degree degree uint64 // Self-reported degree
coords []switchPort // Coords of this peer (taken from coords of the sent locator) coords []switchPort // Coords of this peer (taken from coords of the sent locator)
time time.Time // Time this node was last seen time time.Time // Time this node was last seen
firstSeen time.Time firstSeen time.Time
port switchPort // Interface number of this peer port switchPort // Interface number of this peer
seq uint64 // Seq number we last saw this peer advertise seq uint64 // Seq number we last saw this peer advertise
} }
type switchMessage struct { type switchMessage struct {
from sigPubKey // key of the sender from sigPubKey // key of the sender
locator switchLocator // Locator advertised for the receiver, not the sender's loc! locator switchLocator // Locator advertised for the receiver, not the sender's loc!
seq uint64 seq uint64
} }
type switchPort uint64 type switchPort uint64
type tableElem struct { type tableElem struct {
locator switchLocator locator switchLocator
firstSeen time.Time firstSeen time.Time
} }
type lookupTable struct { type lookupTable struct {
self switchLocator self switchLocator
elems map[switchPort]tableElem elems map[switchPort]tableElem
} }
type switchData struct { type switchData struct {
// All data that's mutable and used by exported Table methods // All data that's mutable and used by exported Table methods
// To be read/written with atomic.Value Store/Load calls // To be read/written with atomic.Value Store/Load calls
locator switchLocator locator switchLocator
seq uint64 // Sequence number, reported to peers, so they know about changes seq uint64 // Sequence number, reported to peers, so they know about changes
peers map[switchPort]peerInfo peers map[switchPort]peerInfo
sigs []sigInfo sigs []sigInfo
} }
type switchTable struct { type switchTable struct {
core *Core core *Core
key sigPubKey // Our own key key sigPubKey // Our own key
time time.Time // Time when locator.tstamp was last updated time time.Time // Time when locator.tstamp was last updated
parent switchPort // Port of whatever peer is our parent, or self if we're root parent switchPort // Port of whatever peer is our parent, or self if we're root
drop map[sigPubKey]int64 // Tstamp associated with a dropped root drop map[sigPubKey]int64 // Tstamp associated with a dropped root
mutex sync.RWMutex // Lock for reads/writes of switchData mutex sync.RWMutex // Lock for reads/writes of switchData
data switchData data switchData
updater atomic.Value //*sync.Once updater atomic.Value //*sync.Once
table atomic.Value //lookupTable table atomic.Value //lookupTable
} }
func (t *switchTable) init(core *Core, key sigPubKey) { func (t *switchTable) init(core *Core, key sigPubKey) {
now := time.Now() now := time.Now()
t.core = core t.core = core
t.key = key t.key = key
locator := switchLocator{root: key, tstamp: now.Unix()} locator := switchLocator{root: key, tstamp: now.Unix()}
peers := make(map[switchPort]peerInfo) peers := make(map[switchPort]peerInfo)
t.data = switchData{locator: locator, peers: peers} t.data = switchData{locator: locator, peers: peers}
t.updater.Store(&sync.Once{}) t.updater.Store(&sync.Once{})
t.table.Store(lookupTable{elems: make(map[switchPort]tableElem)}) t.table.Store(lookupTable{elems: make(map[switchPort]tableElem)})
t.drop = make(map[sigPubKey]int64) t.drop = make(map[sigPubKey]int64)
doTicker := func () { doTicker := func() {
ticker := time.NewTicker(time.Second) ticker := time.NewTicker(time.Second)
defer ticker.Stop() defer ticker.Stop()
for { for {
<-ticker.C <-ticker.C
t.Tick() t.Tick()
} }
} }
go doTicker() go doTicker()
} }
func (t *switchTable) getLocator() switchLocator { func (t *switchTable) getLocator() switchLocator {
t.mutex.RLock() t.mutex.RLock()
defer t.mutex.RUnlock() defer t.mutex.RUnlock()
return t.data.locator.clone() return t.data.locator.clone()
} }
func (t *switchTable) Tick() { func (t *switchTable) Tick() {
// Periodic maintenance work to keep things internally consistent // Periodic maintenance work to keep things internally consistent
t.mutex.Lock() // Write lock t.mutex.Lock() // Write lock
defer t.mutex.Unlock() // Release lock when we're done defer t.mutex.Unlock() // Release lock when we're done
t.cleanRoot() t.cleanRoot()
t.cleanPeers() t.cleanPeers()
t.cleanDropped() t.cleanDropped()
} }
func (t *switchTable) cleanRoot() { func (t *switchTable) cleanRoot() {
// TODO rethink how this is done?... // TODO rethink how this is done?...
// Get rid of the root if it looks like its timed out // Get rid of the root if it looks like its timed out
now := time.Now() now := time.Now()
doUpdate := false doUpdate := false
//fmt.Println("DEBUG clean root:", now.Sub(t.time)) //fmt.Println("DEBUG clean root:", now.Sub(t.time))
if now.Sub(t.time) > switch_timeout { if now.Sub(t.time) > switch_timeout {
//fmt.Println("root timed out", t.data.locator) //fmt.Println("root timed out", t.data.locator)
dropped := t.data.peers[t.parent] dropped := t.data.peers[t.parent]
dropped.time = t.time dropped.time = t.time
t.drop[t.data.locator.root] = t.data.locator.tstamp t.drop[t.data.locator.root] = t.data.locator.tstamp
doUpdate = true doUpdate = true
//t.core.log.Println("DEBUG: switch root timeout", len(t.drop)) //t.core.log.Println("DEBUG: switch root timeout", len(t.drop))
} }
// Or, if we're better than our root, root ourself // Or, if we're better than our root, root ourself
if firstIsBetter(&t.key, &t.data.locator.root) { if firstIsBetter(&t.key, &t.data.locator.root) {
//fmt.Println("root is worse than us", t.data.locator.Root) //fmt.Println("root is worse than us", t.data.locator.Root)
doUpdate = true doUpdate = true
//t.core.log.Println("DEBUG: switch root replace with self", t.data.locator.Root) //t.core.log.Println("DEBUG: switch root replace with self", t.data.locator.Root)
} }
// Or, if we are the root, possibly update our timestamp // Or, if we are the root, possibly update our timestamp
if t.data.locator.root == t.key && if t.data.locator.root == t.key &&
now.Sub(t.time) > switch_timeout/2 { now.Sub(t.time) > switch_timeout/2 {
//fmt.Println("root is self and old, updating", t.data.locator.Root) //fmt.Println("root is self and old, updating", t.data.locator.Root)
doUpdate = true doUpdate = true
} }
if doUpdate { if doUpdate {
t.parent = switchPort(0) t.parent = switchPort(0)
t.time = now t.time = now
if t.data.locator.root != t.key { if t.data.locator.root != t.key {
t.data.seq++ t.data.seq++
t.updater.Store(&sync.Once{}) t.updater.Store(&sync.Once{})
select { select {
case t.core.router.reset<-struct{}{}: case t.core.router.reset <- struct{}{}:
default: default:
} }
} }
t.data.locator = switchLocator{root: t.key, tstamp: now.Unix()} t.data.locator = switchLocator{root: t.key, tstamp: now.Unix()}
t.data.sigs = nil t.data.sigs = nil
} }
} }
func (t *switchTable) cleanPeers() { func (t *switchTable) cleanPeers() {
now := time.Now() now := time.Now()
changed := false changed := false
for idx, info := range t.data.peers { for idx, info := range t.data.peers {
if info.port != switchPort(0) && now.Sub(info.time) > 6*time.Second /*switch_timeout*/ { if info.port != switchPort(0) && now.Sub(info.time) > 6*time.Second /*switch_timeout*/ {
//fmt.Println("peer timed out", t.key, info.locator) //fmt.Println("peer timed out", t.key, info.locator)
delete(t.data.peers, idx) delete(t.data.peers, idx)
changed = true changed = true
} }
} }
if changed { t.updater.Store(&sync.Once{}) } if changed {
t.updater.Store(&sync.Once{})
}
} }
func (t *switchTable) cleanDropped() { func (t *switchTable) cleanDropped() {
// TODO only call this after root changes, not periodically // TODO only call this after root changes, not periodically
for root, _ := range t.drop { for root := range t.drop {
if !firstIsBetter(&root, &t.data.locator.root) { delete(t.drop, root) } if !firstIsBetter(&root, &t.data.locator.root) {
} delete(t.drop, root)
}
}
} }
func (t *switchTable) createMessage(port switchPort) (*switchMessage, []sigInfo) { func (t *switchTable) createMessage(port switchPort) (*switchMessage, []sigInfo) {
t.mutex.RLock() t.mutex.RLock()
defer t.mutex.RUnlock() defer t.mutex.RUnlock()
msg := switchMessage{from: t.key, locator: t.data.locator.clone()} msg := switchMessage{from: t.key, locator: t.data.locator.clone()}
msg.locator.coords = append(msg.locator.coords, port) msg.locator.coords = append(msg.locator.coords, port)
msg.seq = t.data.seq msg.seq = t.data.seq
return &msg, t.data.sigs return &msg, t.data.sigs
} }
func (t *switchTable) handleMessage(msg *switchMessage, fromPort switchPort, sigs []sigInfo) { func (t *switchTable) handleMessage(msg *switchMessage, fromPort switchPort, sigs []sigInfo) {
t.mutex.Lock() t.mutex.Lock()
defer t.mutex.Unlock() defer t.mutex.Unlock()
now := time.Now() now := time.Now()
if len(msg.locator.coords) == 0 { return } // Should always have >=1 links if len(msg.locator.coords) == 0 {
oldSender, isIn := t.data.peers[fromPort] return
if !isIn { oldSender.firstSeen = now } } // Should always have >=1 links
sender := peerInfo{key: msg.from, oldSender, isIn := t.data.peers[fromPort]
locator: msg.locator, if !isIn {
coords: msg.locator.coords[:len(msg.locator.coords)-1], oldSender.firstSeen = now
time: now, }
firstSeen: oldSender.firstSeen, sender := peerInfo{key: msg.from,
port: fromPort, locator: msg.locator,
seq: msg.seq} coords: msg.locator.coords[:len(msg.locator.coords)-1],
equiv := func (x *switchLocator, y *switchLocator) bool { time: now,
if x.root != y.root { return false } firstSeen: oldSender.firstSeen,
if len(x.coords) != len(y.coords) { return false } port: fromPort,
for idx := range x.coords { seq: msg.seq}
if x.coords[idx] != y.coords[idx] { return false } equiv := func(x *switchLocator, y *switchLocator) bool {
} if x.root != y.root {
return true return false
} }
doUpdate := false if len(x.coords) != len(y.coords) {
if !equiv(&msg.locator, &oldSender.locator) { return false
doUpdate = true }
sender.firstSeen = now for idx := range x.coords {
} if x.coords[idx] != y.coords[idx] {
t.data.peers[fromPort] = sender return false
updateRoot := false }
oldParent, isIn := t.data.peers[t.parent] }
noParent := !isIn return true
noLoop := func () bool { }
for idx := 0 ; idx < len(sigs)-1 ; idx++ { doUpdate := false
if sigs[idx].next == t.core.sigPub { return false } if !equiv(&msg.locator, &oldSender.locator) {
} doUpdate = true
if msg.locator.root == t.core.sigPub { return false } sender.firstSeen = now
return true }
}() t.data.peers[fromPort] = sender
sTime := now.Sub(sender.firstSeen) updateRoot := false
pTime := oldParent.time.Sub(oldParent.firstSeen) + switch_timeout oldParent, isIn := t.data.peers[t.parent]
// Really want to compare sLen/sTime and pLen/pTime noParent := !isIn
// Cross multiplied to avoid divide-by-zero noLoop := func() bool {
cost := len(msg.locator.coords)*int(pTime.Seconds()) for idx := 0; idx < len(sigs)-1; idx++ {
pCost := len(t.data.locator.coords)*int(sTime.Seconds()) if sigs[idx].next == t.core.sigPub {
dropTstamp, isIn := t.drop[msg.locator.root] return false
// Here be dragons }
switch { }
case !noLoop: // do nothing if msg.locator.root == t.core.sigPub {
case isIn && dropTstamp >= msg.locator.tstamp: // do nothing return false
case firstIsBetter(&msg.locator.root, &t.data.locator.root): updateRoot = true }
case t.data.locator.root != msg.locator.root: // do nothing return true
case t.data.locator.tstamp > msg.locator.tstamp: // do nothing }()
case noParent: updateRoot = true sTime := now.Sub(sender.firstSeen)
case cost < pCost: updateRoot = true pTime := oldParent.time.Sub(oldParent.firstSeen) + switch_timeout
case sender.port == t.parent && // Really want to compare sLen/sTime and pLen/pTime
(msg.locator.tstamp > t.data.locator.tstamp || // Cross multiplied to avoid divide-by-zero
!equiv(&msg.locator, &t.data.locator)): updateRoot = true cost := len(msg.locator.coords) * int(pTime.Seconds())
} pCost := len(t.data.locator.coords) * int(sTime.Seconds())
if updateRoot { dropTstamp, isIn := t.drop[msg.locator.root]
if !equiv(&msg.locator, &t.data.locator) { // Here be dragons
doUpdate = true switch {
t.data.seq++ case !noLoop: // do nothing
select { case isIn && dropTstamp >= msg.locator.tstamp: // do nothing
case t.core.router.reset<-struct{}{}: case firstIsBetter(&msg.locator.root, &t.data.locator.root):
default: updateRoot = true
} case t.data.locator.root != msg.locator.root: // do nothing
//t.core.log.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords) case t.data.locator.tstamp > msg.locator.tstamp: // do nothing
//fmt.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords) case noParent:
} updateRoot = true
if t.data.locator.tstamp != msg.locator.tstamp { t.time = now } case cost < pCost:
t.data.locator = msg.locator updateRoot = true
t.parent = sender.port case sender.port == t.parent &&
t.data.sigs = sigs (msg.locator.tstamp > t.data.locator.tstamp ||
//t.core.log.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords) !equiv(&msg.locator, &t.data.locator)):
} updateRoot = true
if doUpdate { t.updater.Store(&sync.Once{}) } }
return if updateRoot {
if !equiv(&msg.locator, &t.data.locator) {
doUpdate = true
t.data.seq++
select {
case t.core.router.reset <- struct{}{}:
default:
}
//t.core.log.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords)
//fmt.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords)
}
if t.data.locator.tstamp != msg.locator.tstamp {
t.time = now
}
t.data.locator = msg.locator
t.parent = sender.port
t.data.sigs = sigs
//t.core.log.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords)
}
if doUpdate {
t.updater.Store(&sync.Once{})
}
return
} }
func (t *switchTable) updateTable() { func (t *switchTable) updateTable() {
// WARNING this should only be called from within t.data.updater.Do() // WARNING this should only be called from within t.data.updater.Do()
// It relies on the sync.Once for synchronization with messages and lookups // It relies on the sync.Once for synchronization with messages and lookups
// TODO use a pre-computed faster lookup table // TODO use a pre-computed faster lookup table
// Instead of checking distance for every destination every time // Instead of checking distance for every destination every time
// Array of structs, indexed by first coord that differs from self // Array of structs, indexed by first coord that differs from self
// Each struct has stores the best port to forward to, and a next coord map // Each struct has stores the best port to forward to, and a next coord map
// Move to struct, then iterate over coord maps until you dead end // Move to struct, then iterate over coord maps until you dead end
// The last port before the dead end should be the closest // The last port before the dead end should be the closest
t.mutex.RLock() t.mutex.RLock()
defer t.mutex.RUnlock() defer t.mutex.RUnlock()
newTable := lookupTable{ newTable := lookupTable{
self: t.data.locator.clone(), self: t.data.locator.clone(),
elems: make(map[switchPort]tableElem), elems: make(map[switchPort]tableElem),
} }
for _, pinfo := range t.data.peers { for _, pinfo := range t.data.peers {
//if !pinfo.forward { continue } //if !pinfo.forward { continue }
loc := pinfo.locator.clone() loc := pinfo.locator.clone()
loc.coords = loc.coords[:len(loc.coords)-1] // Remove the them->self link loc.coords = loc.coords[:len(loc.coords)-1] // Remove the them->self link
newTable.elems[pinfo.port] = tableElem { newTable.elems[pinfo.port] = tableElem{
locator: loc, locator: loc,
//degree: pinfo.degree, //degree: pinfo.degree,
firstSeen: pinfo.firstSeen, firstSeen: pinfo.firstSeen,
//forward: pinfo.forward, //forward: pinfo.forward,
} }
} }
t.table.Store(newTable) t.table.Store(newTable)
} }
func (t *switchTable) lookup(dest []byte, ttl uint64) (switchPort, uint64) { func (t *switchTable) lookup(dest []byte, ttl uint64) (switchPort, uint64) {
t.updater.Load().(*sync.Once).Do(t.updateTable) t.updater.Load().(*sync.Once).Do(t.updateTable)
table := t.table.Load().(lookupTable) table := t.table.Load().(lookupTable)
ports := t.core.peers.getPorts() ports := t.core.peers.getPorts()
getBandwidth := func (port switchPort) float64 { getBandwidth := func(port switchPort) float64 {
var bandwidth float64 var bandwidth float64
if p, isIn := ports[port]; isIn { if p, isIn := ports[port]; isIn {
bandwidth = p.getBandwidth() bandwidth = p.getBandwidth()
} }
return bandwidth return bandwidth
} }
var best switchPort var best switchPort
myDist := table.self.dist(dest) //getDist(table.self.coords) myDist := table.self.dist(dest) //getDist(table.self.coords)
if !(uint64(myDist) < ttl) { return 0, 0 } if !(uint64(myDist) < ttl) {
// score is in units of bandwidth / distance return 0, 0
bestScore := float64(-1) }
for port, info := range table.elems { // score is in units of bandwidth / distance
if info.locator.root != table.self.root { continue } bestScore := float64(-1)
dist := info.locator.dist(dest) //getDist(info.locator.coords) for port, info := range table.elems {
if !(dist < myDist) { continue } if info.locator.root != table.self.root {
score := getBandwidth(port) continue
score /= float64(1+dist) }
if score > bestScore { dist := info.locator.dist(dest) //getDist(info.locator.coords)
best = port if !(dist < myDist) {
bestScore = score continue
} }
} score := getBandwidth(port)
//t.core.log.Println("DEBUG: sending to", best, "bandwidth", getBandwidth(best)) score /= float64(1 + dist)
return best, uint64(myDist) if score > bestScore {
best = port
bestScore = score
}
}
//t.core.log.Println("DEBUG: sending to", best, "bandwidth", getBandwidth(best))
return best, uint64(myDist)
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -390,9 +438,8 @@ func (t *switchTable) lookup(dest []byte, ttl uint64) (switchPort, uint64) {
//Signature stuff //Signature stuff
type sigInfo struct { type sigInfo struct {
next sigPubKey next sigPubKey
sig sigBytes sig sigBytes
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////

View File

@ -16,189 +16,226 @@ import "errors"
import "sync" import "sync"
import "fmt" import "fmt"
const tcp_msgSize = 2048+65535 // TODO figure out what makes sense const tcp_msgSize = 2048 + 65535 // TODO figure out what makes sense
type tcpInterface struct { type tcpInterface struct {
core *Core core *Core
serv *net.TCPListener serv *net.TCPListener
mutex sync.Mutex // Protecting the below mutex sync.Mutex // Protecting the below
calls map[string]struct{} calls map[string]struct{}
} }
type tcpKeys struct { type tcpKeys struct {
box boxPubKey box boxPubKey
sig sigPubKey sig sigPubKey
} }
func (iface *tcpInterface) init(core *Core, addr string) { func (iface *tcpInterface) init(core *Core, addr string) {
iface.core = core iface.core = core
tcpAddr, err := net.ResolveTCPAddr("tcp", addr) tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil { panic(err) } if err != nil {
iface.serv, err = net.ListenTCP("tcp", tcpAddr) panic(err)
if err != nil { panic(err) } }
iface.calls = make(map[string]struct{}) iface.serv, err = net.ListenTCP("tcp", tcpAddr)
go iface.listener() if err != nil {
panic(err)
}
iface.calls = make(map[string]struct{})
go iface.listener()
} }
func (iface *tcpInterface) listener() { func (iface *tcpInterface) listener() {
defer iface.serv.Close() defer iface.serv.Close()
iface.core.log.Println("Listening on:", iface.serv.Addr().String()) iface.core.log.Println("Listening on:", iface.serv.Addr().String())
for { for {
sock, err := iface.serv.AcceptTCP() sock, err := iface.serv.AcceptTCP()
if err != nil { panic(err) } if err != nil {
go iface.handler(sock) panic(err)
} }
go iface.handler(sock)
}
} }
func (iface *tcpInterface) call(saddr string) { func (iface *tcpInterface) call(saddr string) {
go func() { go func() {
quit := false quit := false
iface.mutex.Lock() iface.mutex.Lock()
if _, isIn := iface.calls[saddr]; isIn { if _, isIn := iface.calls[saddr]; isIn {
quit = true quit = true
} else { } else {
iface.calls[saddr] = struct{}{} iface.calls[saddr] = struct{}{}
defer func() { defer func() {
iface.mutex.Lock() iface.mutex.Lock()
delete(iface.calls, saddr) delete(iface.calls, saddr)
iface.mutex.Unlock() iface.mutex.Unlock()
}() }()
} }
iface.mutex.Unlock() iface.mutex.Unlock()
if !quit { if !quit {
conn, err := net.DialTimeout("tcp", saddr, 6*time.Second) conn, err := net.DialTimeout("tcp", saddr, 6*time.Second)
if err != nil { return } if err != nil {
sock := conn.(*net.TCPConn) return
iface.handler(sock) }
} sock := conn.(*net.TCPConn)
}() iface.handler(sock)
}
}()
} }
func (iface *tcpInterface) handler(sock *net.TCPConn) { func (iface *tcpInterface) handler(sock *net.TCPConn) {
defer sock.Close() defer sock.Close()
// Get our keys // Get our keys
keys := []byte{} keys := []byte{}
keys = append(keys, tcp_key[:]...) keys = append(keys, tcp_key[:]...)
keys = append(keys, iface.core.boxPub[:]...) keys = append(keys, iface.core.boxPub[:]...)
keys = append(keys, iface.core.sigPub[:]...) keys = append(keys, iface.core.sigPub[:]...)
_, err := sock.Write(keys) _, err := sock.Write(keys)
if err != nil { return } if err != nil {
timeout := time.Now().Add(6*time.Second) return
sock.SetReadDeadline(timeout) }
n, err := sock.Read(keys) timeout := time.Now().Add(6 * time.Second)
if err != nil { return } sock.SetReadDeadline(timeout)
if n < len(keys) { /*panic("Partial key packet?") ;*/ return } n, err := sock.Read(keys)
ks := tcpKeys{} if err != nil {
if !tcp_chop_keys(&ks.box, &ks.sig, &keys) { /*panic("Invalid key packet?") ;*/ return } return
// Quit the parent call if this is a connection to ourself }
equiv := func(k1, k2 []byte) bool { if n < len(keys) { /*panic("Partial key packet?") ;*/
for idx := range k1 { return
if k1[idx] != k2[idx] { return false } }
} ks := tcpKeys{}
return true if !tcp_chop_keys(&ks.box, &ks.sig, &keys) { /*panic("Invalid key packet?") ;*/
} return
if equiv(ks.box[:], iface.core.boxPub[:]) { return } // testing }
if equiv(ks.sig[:], iface.core.sigPub[:]) { return } // Quit the parent call if this is a connection to ourself
// Note that multiple connections to the same node are allowed equiv := func(k1, k2 []byte) bool {
// E.g. over different interfaces for idx := range k1 {
linkIn := make(chan []byte, 1) if k1[idx] != k2[idx] {
p := iface.core.peers.newPeer(&ks.box, &ks.sig)//, in, out) return false
in := func(bs []byte) { }
p.handlePacket(bs, linkIn) }
} return true
out := make(chan []byte, 1024) // TODO? what size makes sense }
defer close(out) if equiv(ks.box[:], iface.core.boxPub[:]) {
go func() { return
var stack [][]byte } // testing
put := func(msg []byte) { if equiv(ks.sig[:], iface.core.sigPub[:]) {
stack = append(stack, msg) return
for len(stack) > 1024 { }
util_putBytes(stack[0]) // Note that multiple connections to the same node are allowed
stack = stack[1:] // E.g. over different interfaces
} linkIn := make(chan []byte, 1)
} p := iface.core.peers.newPeer(&ks.box, &ks.sig) //, in, out)
send := func() { in := func(bs []byte) {
msg := stack[len(stack)-1] p.handlePacket(bs, linkIn)
stack = stack[:len(stack)-1] }
buf := net.Buffers{tcp_msg[:], out := make(chan []byte, 1024) // TODO? what size makes sense
wire_encode_uint64(uint64(len(msg))), defer close(out)
msg} go func() {
size := 0 var stack [][]byte
for _, bs := range buf { size += len(bs) } put := func(msg []byte) {
start := time.Now() stack = append(stack, msg)
buf.WriteTo(sock) for len(stack) > 1024 {
timed := time.Since(start) util_putBytes(stack[0])
pType, _ := wire_decode_uint64(msg) stack = stack[1:]
if pType == wire_LinkProtocolTraffic { }
p.updateBandwidth(size, timed) }
} send := func() {
util_putBytes(msg) msg := stack[len(stack)-1]
} stack = stack[:len(stack)-1]
for msg := range out { buf := net.Buffers{tcp_msg[:],
put(msg) wire_encode_uint64(uint64(len(msg))),
for len(stack) > 0 { msg}
// Keep trying to fill the stack (LIFO order) while sending size := 0
select { for _, bs := range buf {
case msg, ok := <-out: size += len(bs)
if !ok { return } }
put(msg) start := time.Now()
default: send() buf.WriteTo(sock)
} timed := time.Since(start)
} pType, _ := wire_decode_uint64(msg)
} if pType == wire_LinkProtocolTraffic {
}() p.updateBandwidth(size, timed)
p.out = func(msg []byte) { }
defer func() { recover() }() util_putBytes(msg)
for { }
select { for msg := range out {
case out<-msg: return put(msg)
default: util_putBytes(<-out) for len(stack) > 0 {
} // Keep trying to fill the stack (LIFO order) while sending
} select {
} case msg, ok := <-out:
sock.SetNoDelay(true) if !ok {
go p.linkLoop(linkIn) return
defer func() { }
// Put all of our cleanup here... put(msg)
p.core.peers.mutex.Lock() default:
oldPorts := p.core.peers.getPorts() send()
newPorts := make(map[switchPort]*peer) }
for k,v := range oldPorts{ newPorts[k] = v } }
delete(newPorts, p.port) }
p.core.peers.putPorts(newPorts) }()
p.core.peers.mutex.Unlock() p.out = func(msg []byte) {
close(linkIn) defer func() { recover() }()
}() for {
them := sock.RemoteAddr() select {
themNodeID := getNodeID(&ks.box) case out <- msg:
themAddr := address_addrForNodeID(themNodeID) return
themAddrString := net.IP(themAddr[:]).String() default:
themString := fmt.Sprintf("%s@%s", themAddrString, them) util_putBytes(<-out)
iface.core.log.Println("Connected:", themString) }
iface.reader(sock, in) // In this goroutine, because of defers }
iface.core.log.Println("Disconnected:", themString) }
return sock.SetNoDelay(true)
go p.linkLoop(linkIn)
defer func() {
// Put all of our cleanup here...
p.core.peers.mutex.Lock()
oldPorts := p.core.peers.getPorts()
newPorts := make(map[switchPort]*peer)
for k, v := range oldPorts {
newPorts[k] = v
}
delete(newPorts, p.port)
p.core.peers.putPorts(newPorts)
p.core.peers.mutex.Unlock()
close(linkIn)
}()
them := sock.RemoteAddr()
themNodeID := getNodeID(&ks.box)
themAddr := address_addrForNodeID(themNodeID)
themAddrString := net.IP(themAddr[:]).String()
themString := fmt.Sprintf("%s@%s", themAddrString, them)
iface.core.log.Println("Connected:", themString)
iface.reader(sock, in) // In this goroutine, because of defers
iface.core.log.Println("Disconnected:", themString)
return
} }
func (iface *tcpInterface) reader(sock *net.TCPConn, in func([]byte)) { func (iface *tcpInterface) reader(sock *net.TCPConn, in func([]byte)) {
bs := make([]byte, 2*tcp_msgSize) bs := make([]byte, 2*tcp_msgSize)
frag := bs[:0] frag := bs[:0]
for { for {
timeout := time.Now().Add(6*time.Second) timeout := time.Now().Add(6 * time.Second)
sock.SetReadDeadline(timeout) sock.SetReadDeadline(timeout)
n, err := sock.Read(bs[len(frag):]) n, err := sock.Read(bs[len(frag):])
if err != nil || n == 0 { break } if err != nil || n == 0 {
frag = bs[:len(frag)+n] break
for { }
msg, ok, err := tcp_chop_msg(&frag) frag = bs[:len(frag)+n]
if err != nil { return } for {
if !ok { break } // We didn't get the whole message yet msg, ok, err := tcp_chop_msg(&frag)
newMsg := append(util_getBytes(), msg...) if err != nil {
in(newMsg) return
util_yield() }
} if !ok {
frag = append(bs[:0], frag...) break
} } // We didn't get the whole message yet
newMsg := append(util_getBytes(), msg...)
in(newMsg)
util_yield()
}
frag = append(bs[:0], frag...)
}
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -208,39 +245,46 @@ var tcp_key = [...]byte{'k', 'e', 'y', 's'}
var tcp_msg = [...]byte{0xde, 0xad, 0xb1, 0x75} // "dead bits" var tcp_msg = [...]byte{0xde, 0xad, 0xb1, 0x75} // "dead bits"
func tcp_chop_keys(box *boxPubKey, sig *sigPubKey, bs *[]byte) bool { func tcp_chop_keys(box *boxPubKey, sig *sigPubKey, bs *[]byte) bool {
// This one is pretty simple: we know how long the message should be // This one is pretty simple: we know how long the message should be
// So don't call this with a message that's too short // So don't call this with a message that's too short
if len(*bs) < len(tcp_key) + len(*box) + len(*sig) { return false } if len(*bs) < len(tcp_key)+len(*box)+len(*sig) {
for idx := range tcp_key { return false
if (*bs)[idx] != tcp_key[idx] { return false } }
} for idx := range tcp_key {
(*bs) = (*bs)[len(tcp_key):] if (*bs)[idx] != tcp_key[idx] {
copy(box[:], *bs) return false
(*bs) = (*bs)[len(box):] }
copy(sig[:], *bs) }
(*bs) = (*bs)[len(sig):] (*bs) = (*bs)[len(tcp_key):]
return true copy(box[:], *bs)
(*bs) = (*bs)[len(box):]
copy(sig[:], *bs)
(*bs) = (*bs)[len(sig):]
return true
} }
func tcp_chop_msg(bs *[]byte) ([]byte, bool, error) { func tcp_chop_msg(bs *[]byte) ([]byte, bool, error) {
// Returns msg, ok, err // Returns msg, ok, err
if len(*bs) < len(tcp_msg) { return nil, false, nil } if len(*bs) < len(tcp_msg) {
for idx := range tcp_msg { return nil, false, nil
if (*bs)[idx] != tcp_msg[idx] { }
return nil, false, errors.New("Bad message!") for idx := range tcp_msg {
} if (*bs)[idx] != tcp_msg[idx] {
} return nil, false, errors.New("Bad message!")
msgLen, msgLenLen := wire_decode_uint64((*bs)[len(tcp_msg):]) }
if msgLen > tcp_msgSize { return nil, false, errors.New("Oversized message!") } }
msgBegin := len(tcp_msg) + msgLenLen msgLen, msgLenLen := wire_decode_uint64((*bs)[len(tcp_msg):])
msgEnd := msgBegin + int(msgLen) if msgLen > tcp_msgSize {
if msgLenLen == 0 || len(*bs) < msgEnd { return nil, false, errors.New("Oversized message!")
// We don't have the full message }
// Need to buffer this and wait for the rest to come in msgBegin := len(tcp_msg) + msgLenLen
return nil, false, nil msgEnd := msgBegin + int(msgLen)
} if msgLenLen == 0 || len(*bs) < msgEnd {
msg := (*bs)[msgBegin:msgEnd] // We don't have the full message
(*bs) = (*bs)[msgEnd:] // Need to buffer this and wait for the rest to come in
return msg, true, nil return nil, false, nil
}
msg := (*bs)[msgBegin:msgEnd]
(*bs) = (*bs)[msgEnd:]
return msg, true, nil
} }

View File

@ -7,41 +7,45 @@ import water "github.com/songgao/water"
const IPv6_HEADER_LENGTH = 40 const IPv6_HEADER_LENGTH = 40
type tunDevice struct { type tunDevice struct {
core *Core core *Core
send chan<- []byte send chan<- []byte
recv <-chan []byte recv <-chan []byte
mtu int mtu int
iface *water.Interface iface *water.Interface
} }
func (tun *tunDevice) init(core *Core) { func (tun *tunDevice) init(core *Core) {
tun.core = core tun.core = core
} }
func (tun *tunDevice) write() error { func (tun *tunDevice) write() error {
for { for {
data := <-tun.recv data := <-tun.recv
if _, err := tun.iface.Write(data); err != nil { return err } if _, err := tun.iface.Write(data); err != nil {
util_putBytes(data) return err
} }
util_putBytes(data)
}
} }
func (tun *tunDevice) read() error { func (tun *tunDevice) read() error {
buf := make([]byte, tun.mtu) buf := make([]byte, tun.mtu)
for { for {
n, err := tun.iface.Read(buf) n, err := tun.iface.Read(buf)
if err != nil { return err } if err != nil {
if buf[0] & 0xf0 != 0x60 || return err
n != 256*int(buf[4]) + int(buf[5]) + IPv6_HEADER_LENGTH { }
// Either not an IPv6 packet or not the complete packet for some reason if buf[0]&0xf0 != 0x60 ||
//panic("Should not happen in testing") n != 256*int(buf[4])+int(buf[5])+IPv6_HEADER_LENGTH {
continue // Either not an IPv6 packet or not the complete packet for some reason
} //panic("Should not happen in testing")
packet := append(util_getBytes(), buf[:n]...) continue
tun.send<-packet }
} packet := append(util_getBytes(), buf[:n]...)
tun.send <- packet
}
} }
func (tun *tunDevice) close() error { func (tun *tunDevice) close() error {
return tun.iface.Close() return tun.iface.Close()
} }

View File

@ -10,40 +10,42 @@ import "strings"
import water "github.com/songgao/water" import water "github.com/songgao/water"
func (tun *tunDevice) setup(ifname string, addr string, mtu int) error { func (tun *tunDevice) setup(ifname string, addr string, mtu int) error {
config := water.Config{ DeviceType: water.TUN } config := water.Config{DeviceType: water.TUN}
if ifname != "" && ifname != "auto" { if ifname != "" && ifname != "auto" {
config.Name = ifname config.Name = ifname
} }
iface, err := water.New(config) iface, err := water.New(config)
if err != nil { panic(err) } if err != nil {
tun.iface = iface panic(err)
tun.mtu = mtu //1280 // Lets default to the smallest thing allowed for now }
return tun.setupAddress(addr) tun.iface = iface
tun.mtu = mtu //1280 // Lets default to the smallest thing allowed for now
return tun.setupAddress(addr)
} }
func (tun *tunDevice) setupAddress(addr string) error { func (tun *tunDevice) setupAddress(addr string) error {
// Set address // Set address
cmd := exec.Command("ip", "-f", "inet6", cmd := exec.Command("ip", "-f", "inet6",
"addr", "add", addr, "addr", "add", addr,
"dev", tun.iface.Name()) "dev", tun.iface.Name())
tun.core.log.Printf("ip command: %v", strings.Join(cmd.Args, " ")) tun.core.log.Printf("ip command: %v", strings.Join(cmd.Args, " "))
output, err := cmd.CombinedOutput() output, err := cmd.CombinedOutput()
if err != nil { if err != nil {
tun.core.log.Printf("Linux ip failed: %v.", err) tun.core.log.Printf("Linux ip failed: %v.", err)
tun.core.log.Println(string(output)) tun.core.log.Println(string(output))
return err return err
} }
// Set MTU and bring device up // Set MTU and bring device up
cmd = exec.Command("ip", "link", "set", cmd = exec.Command("ip", "link", "set",
"dev", tun.iface.Name(), "dev", tun.iface.Name(),
"mtu", fmt.Sprintf("%d", tun.mtu), "mtu", fmt.Sprintf("%d", tun.mtu),
"up") "up")
tun.core.log.Printf("ip command: %v", strings.Join(cmd.Args, " ")) tun.core.log.Printf("ip command: %v", strings.Join(cmd.Args, " "))
output, err = cmd.CombinedOutput() output, err = cmd.CombinedOutput()
if err != nil { if err != nil {
tun.core.log.Printf("Linux ip failed: %v.", err) tun.core.log.Printf("Linux ip failed: %v.", err)
tun.core.log.Println(string(output)) tun.core.log.Println(string(output))
return err return err
} }
return nil return nil
} }

View File

@ -8,15 +8,17 @@ import water "github.com/songgao/water"
// If your platform supports tun devices, you could try configuring it manually // If your platform supports tun devices, you could try configuring it manually
func (tun *tunDevice) setup(ifname string, addr string, mtu int) error { func (tun *tunDevice) setup(ifname string, addr string, mtu int) error {
config := water.Config{ DeviceType: water.TUN } config := water.Config{DeviceType: water.TUN}
iface, err := water.New(config) iface, err := water.New(config)
if err != nil { panic(err) } if err != nil {
tun.iface = iface panic(err)
tun.mtu = mtu //1280 // Lets default to the smallest thing allowed for now }
return tun.setupAddress(addr) tun.iface = iface
tun.mtu = mtu //1280 // Lets default to the smallest thing allowed for now
return tun.setupAddress(addr)
} }
func (tun *tunDevice) setupAddress(addr string) error { func (tun *tunDevice) setupAddress(addr string) error {
tun.core.log.Println("Platform not supported, you must set the address of", tun.iface.Name(), "to", addr) tun.core.log.Println("Platform not supported, you must set the address of", tun.iface.Name(), "to", addr)
return nil return nil
} }

View File

@ -15,247 +15,277 @@ import "sync"
import "fmt" import "fmt"
type udpInterface struct { type udpInterface struct {
core *Core core *Core
sock *net.UDPConn // Or more general PacketConn? sock *net.UDPConn // Or more general PacketConn?
mutex sync.RWMutex // each conn has an owner goroutine mutex sync.RWMutex // each conn has an owner goroutine
conns map[connAddr]*connInfo conns map[connAddr]*connInfo
} }
type connAddr string // TODO something more efficient, but still a valid map key type connAddr string // TODO something more efficient, but still a valid map key
type connInfo struct { type connInfo struct {
addr connAddr addr connAddr
peer *peer peer *peer
linkIn chan []byte linkIn chan []byte
keysIn chan *udpKeys keysIn chan *udpKeys
timeout int // count of how many heartbeats have been missed timeout int // count of how many heartbeats have been missed
in func([]byte) in func([]byte)
out chan []byte out chan []byte
countIn uint8 countIn uint8
countOut uint8 countOut uint8
} }
type udpKeys struct { type udpKeys struct {
box boxPubKey box boxPubKey
sig sigPubKey sig sigPubKey
} }
func (iface *udpInterface) init(core *Core, addr string) { func (iface *udpInterface) init(core *Core, addr string) {
iface.core = core iface.core = core
udpAddr, err := net.ResolveUDPAddr("udp", addr) udpAddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil { panic(err) } if err != nil {
iface.sock, err = net.ListenUDP("udp", udpAddr) panic(err)
if err != nil { panic(err) } }
iface.conns = make(map[connAddr]*connInfo) iface.sock, err = net.ListenUDP("udp", udpAddr)
go iface.reader() if err != nil {
panic(err)
}
iface.conns = make(map[connAddr]*connInfo)
go iface.reader()
} }
func (iface *udpInterface) sendKeys(addr connAddr) { func (iface *udpInterface) sendKeys(addr connAddr) {
udpAddr, err := net.ResolveUDPAddr("udp", string(addr)) udpAddr, err := net.ResolveUDPAddr("udp", string(addr))
if err != nil { panic(err) } if err != nil {
msg := []byte{} panic(err)
msg = udp_encode(msg, 0, 0, 0, nil) }
msg = append(msg, iface.core.boxPub[:]...) msg := []byte{}
msg = append(msg, iface.core.sigPub[:]...) msg = udp_encode(msg, 0, 0, 0, nil)
iface.sock.WriteToUDP(msg, udpAddr) msg = append(msg, iface.core.boxPub[:]...)
msg = append(msg, iface.core.sigPub[:]...)
iface.sock.WriteToUDP(msg, udpAddr)
} }
func udp_isKeys(msg []byte) bool { func udp_isKeys(msg []byte) bool {
keyLen := 3 + boxPubKeyLen + sigPubKeyLen keyLen := 3 + boxPubKeyLen + sigPubKeyLen
return len(msg) == keyLen && msg[0] == 0x00 return len(msg) == keyLen && msg[0] == 0x00
} }
func (iface *udpInterface) startConn(info *connInfo) { func (iface *udpInterface) startConn(info *connInfo) {
ticker := time.NewTicker(6*time.Second) ticker := time.NewTicker(6 * time.Second)
defer ticker.Stop() defer ticker.Stop()
defer func () { defer func() {
// Cleanup // Cleanup
// FIXME this still leaks a peer struct // FIXME this still leaks a peer struct
iface.mutex.Lock() iface.mutex.Lock()
delete(iface.conns, info.addr) delete(iface.conns, info.addr)
iface.mutex.Unlock() iface.mutex.Unlock()
iface.core.peers.mutex.Lock() iface.core.peers.mutex.Lock()
oldPorts := iface.core.peers.getPorts() oldPorts := iface.core.peers.getPorts()
newPorts := make(map[switchPort]*peer) newPorts := make(map[switchPort]*peer)
for k,v := range oldPorts{ newPorts[k] = v } for k, v := range oldPorts {
delete(newPorts, info.peer.port) newPorts[k] = v
iface.core.peers.putPorts(newPorts) }
iface.core.peers.mutex.Unlock() delete(newPorts, info.peer.port)
close(info.linkIn) iface.core.peers.putPorts(newPorts)
close(info.keysIn) iface.core.peers.mutex.Unlock()
close(info.out) close(info.linkIn)
iface.core.log.Println("Removing peer:", info.addr) close(info.keysIn)
}() close(info.out)
for { iface.core.log.Println("Removing peer:", info.addr)
select { }()
case ks := <-info.keysIn: { for {
// FIXME? need signatures/sequence-numbers or something select {
// Spoofers could lock out a peer with fake/bad keys case ks := <-info.keysIn:
if ks.box == info.peer.box && ks.sig == info.peer.sig { {
info.timeout = 0 // FIXME? need signatures/sequence-numbers or something
} // Spoofers could lock out a peer with fake/bad keys
} if ks.box == info.peer.box && ks.sig == info.peer.sig {
case <-ticker.C: { info.timeout = 0
if info.timeout > 10 { return } }
info.timeout++ }
iface.sendKeys(info.addr) case <-ticker.C:
} {
} if info.timeout > 10 {
} return
}
info.timeout++
iface.sendKeys(info.addr)
}
}
}
} }
func (iface *udpInterface) handleKeys(msg []byte, addr connAddr) { func (iface *udpInterface) handleKeys(msg []byte, addr connAddr) {
//defer util_putBytes(msg) //defer util_putBytes(msg)
var ks udpKeys var ks udpKeys
_, _, _, bs := udp_decode(msg) _, _, _, bs := udp_decode(msg)
switch { switch {
case !wire_chop_slice(ks.box[:], &bs): return case !wire_chop_slice(ks.box[:], &bs):
case !wire_chop_slice(ks.sig[:], &bs): return return
} case !wire_chop_slice(ks.sig[:], &bs):
if ks.box == iface.core.boxPub { return } return
if ks.sig == iface.core.sigPub { return } }
iface.mutex.RLock() if ks.box == iface.core.boxPub {
conn, isIn := iface.conns[addr] return
iface.mutex.RUnlock() // TODO? keep the lock longer?... }
if !isIn { if ks.sig == iface.core.sigPub {
udpAddr, err := net.ResolveUDPAddr("udp", string(addr)) return
if err != nil { panic(err) } }
conn = &connInfo{ iface.mutex.RLock()
addr: connAddr(addr), conn, isIn := iface.conns[addr]
peer: iface.core.peers.newPeer(&ks.box, &ks.sig), iface.mutex.RUnlock() // TODO? keep the lock longer?...
linkIn: make(chan []byte, 1), if !isIn {
keysIn: make(chan *udpKeys, 1), udpAddr, err := net.ResolveUDPAddr("udp", string(addr))
out: make(chan []byte, 1024), if err != nil {
} panic(err)
/* }
conn.in = func (msg []byte) { conn.peer.handlePacket(msg, conn.linkIn) } conn = &connInfo{
conn.peer.out = func (msg []byte) { addr: connAddr(addr),
start := time.Now() peer: iface.core.peers.newPeer(&ks.box, &ks.sig),
iface.sock.WriteToUDP(msg, udpAddr) linkIn: make(chan []byte, 1),
timed := time.Since(start) keysIn: make(chan *udpKeys, 1),
conn.peer.updateBandwidth(len(msg), timed) out: make(chan []byte, 1024),
util_putBytes(msg) }
} // Old version, always one syscall per packet /*
//*/ conn.in = func (msg []byte) { conn.peer.handlePacket(msg, conn.linkIn) }
/* conn.peer.out = func (msg []byte) {
conn.peer.out = func (msg []byte) { start := time.Now()
defer func() { recover() }() iface.sock.WriteToUDP(msg, udpAddr)
select { timed := time.Since(start)
case conn.out<-msg: conn.peer.updateBandwidth(len(msg), timed)
default: util_putBytes(msg) util_putBytes(msg)
} } // Old version, always one syscall per packet
} //*/
go func () { /*
for msg := range conn.out { conn.peer.out = func (msg []byte) {
start := time.Now() defer func() { recover() }()
iface.sock.WriteToUDP(msg, udpAddr) select {
timed := time.Since(start) case conn.out<-msg:
conn.peer.updateBandwidth(len(msg), timed) default: util_putBytes(msg)
util_putBytes(msg) }
} }
}() go func () {
//*/ for msg := range conn.out {
//* start := time.Now()
var inChunks uint8 iface.sock.WriteToUDP(msg, udpAddr)
var inBuf []byte timed := time.Since(start)
conn.in = func(bs []byte) { conn.peer.updateBandwidth(len(msg), timed)
//defer util_putBytes(bs) util_putBytes(msg)
chunks, chunk, count, payload := udp_decode(bs) }
//iface.core.log.Println("DEBUG:", addr, chunks, chunk, count, len(payload)) }()
//iface.core.log.Println("DEBUG: payload:", payload) //*/
if count != conn.countIn { //*
inChunks = 0 var inChunks uint8
inBuf = inBuf[:0] var inBuf []byte
conn.countIn = count conn.in = func(bs []byte) {
} //defer util_putBytes(bs)
if chunk <= chunks && chunk == inChunks + 1 { chunks, chunk, count, payload := udp_decode(bs)
//iface.core.log.Println("GOING:", addr, chunks, chunk, count, len(payload)) //iface.core.log.Println("DEBUG:", addr, chunks, chunk, count, len(payload))
inChunks += 1 //iface.core.log.Println("DEBUG: payload:", payload)
inBuf = append(inBuf, payload...) if count != conn.countIn {
if chunks != chunk { return } inChunks = 0
msg := append(util_getBytes(), inBuf...) inBuf = inBuf[:0]
conn.peer.handlePacket(msg, conn.linkIn) conn.countIn = count
//iface.core.log.Println("DONE:", addr, chunks, chunk, count, len(payload)) }
} if chunk <= chunks && chunk == inChunks+1 {
} //iface.core.log.Println("GOING:", addr, chunks, chunk, count, len(payload))
conn.peer.out = func (msg []byte) { inChunks += 1
defer func() { recover() }() inBuf = append(inBuf, payload...)
select { if chunks != chunk {
case conn.out<-msg: return
default: util_putBytes(msg) }
} msg := append(util_getBytes(), inBuf...)
} conn.peer.handlePacket(msg, conn.linkIn)
go func () { //iface.core.log.Println("DONE:", addr, chunks, chunk, count, len(payload))
//var chunks [][]byte }
var out []byte }
for msg := range conn.out { conn.peer.out = func(msg []byte) {
var chunks [][]byte defer func() { recover() }()
bs := msg select {
for len(bs) > udp_chunkSize { case conn.out <- msg:
chunks, bs = append(chunks, bs[:udp_chunkSize]), bs[udp_chunkSize:] default:
} util_putBytes(msg)
chunks = append(chunks, bs) }
//iface.core.log.Println("DEBUG: out chunks:", len(chunks), len(msg)) }
if len(chunks) > 255 { continue } go func() {
start := time.Now() //var chunks [][]byte
for idx,bs := range chunks { var out []byte
nChunks, nChunk, count := uint8(len(chunks)), uint8(idx)+1, conn.countOut for msg := range conn.out {
out = udp_encode(out[:0], nChunks, nChunk, count, bs) var chunks [][]byte
//iface.core.log.Println("DEBUG out:", nChunks, nChunk, count, len(bs)) bs := msg
iface.sock.WriteToUDP(out, udpAddr) for len(bs) > udp_chunkSize {
} chunks, bs = append(chunks, bs[:udp_chunkSize]), bs[udp_chunkSize:]
timed := time.Since(start) }
conn.countOut += 1 chunks = append(chunks, bs)
conn.peer.updateBandwidth(len(msg), timed) //iface.core.log.Println("DEBUG: out chunks:", len(chunks), len(msg))
util_putBytes(msg) if len(chunks) > 255 {
} continue
}() }
//*/ start := time.Now()
iface.mutex.Lock() for idx, bs := range chunks {
iface.conns[addr] = conn nChunks, nChunk, count := uint8(len(chunks)), uint8(idx)+1, conn.countOut
iface.mutex.Unlock() out = udp_encode(out[:0], nChunks, nChunk, count, bs)
themNodeID := getNodeID(&ks.box) //iface.core.log.Println("DEBUG out:", nChunks, nChunk, count, len(bs))
themAddr := address_addrForNodeID(themNodeID) iface.sock.WriteToUDP(out, udpAddr)
themAddrString := net.IP(themAddr[:]).String() }
themString := fmt.Sprintf("%s@%s", themAddrString, addr) timed := time.Since(start)
iface.core.log.Println("Adding peer:", themString) conn.countOut += 1
go iface.startConn(conn) conn.peer.updateBandwidth(len(msg), timed)
go conn.peer.linkLoop(conn.linkIn) util_putBytes(msg)
iface.sendKeys(conn.addr) }
} }()
func() { //*/
defer func() { recover() }() iface.mutex.Lock()
select { iface.conns[addr] = conn
case conn.keysIn<-&ks: iface.mutex.Unlock()
default: themNodeID := getNodeID(&ks.box)
} themAddr := address_addrForNodeID(themNodeID)
}() themAddrString := net.IP(themAddr[:]).String()
themString := fmt.Sprintf("%s@%s", themAddrString, addr)
iface.core.log.Println("Adding peer:", themString)
go iface.startConn(conn)
go conn.peer.linkLoop(conn.linkIn)
iface.sendKeys(conn.addr)
}
func() {
defer func() { recover() }()
select {
case conn.keysIn <- &ks:
default:
}
}()
} }
func (iface *udpInterface) handlePacket(msg []byte, addr connAddr) { func (iface *udpInterface) handlePacket(msg []byte, addr connAddr) {
iface.mutex.RLock() iface.mutex.RLock()
if conn, isIn := iface.conns[addr]; isIn { if conn, isIn := iface.conns[addr]; isIn {
conn.in(msg) conn.in(msg)
} }
iface.mutex.RUnlock() iface.mutex.RUnlock()
} }
func (iface *udpInterface) reader() { func (iface *udpInterface) reader() {
bs := make([]byte, 2048) // This needs to be large enough for everything... bs := make([]byte, 2048) // This needs to be large enough for everything...
for { for {
//iface.core.log.Println("Starting read") //iface.core.log.Println("Starting read")
n, udpAddr, err := iface.sock.ReadFromUDP(bs) n, udpAddr, err := iface.sock.ReadFromUDP(bs)
//iface.core.log.Println("Read", n, udpAddr.String(), err) //iface.core.log.Println("Read", n, udpAddr.String(), err)
if err != nil { panic(err) ; break } if err != nil {
if n > 1500 { panic(n) } panic(err)
//msg := append(util_getBytes(), bs[:n]...) break
msg := bs[:n] }
addr := connAddr(udpAddr.String()) if n > 1500 {
if udp_isKeys(msg) { panic(n)
iface.handleKeys(msg, addr) }
} else { //msg := append(util_getBytes(), bs[:n]...)
iface.handlePacket(msg, addr) msg := bs[:n]
} addr := connAddr(udpAddr.String())
} if udp_isKeys(msg) {
iface.handleKeys(msg, addr)
} else {
iface.handlePacket(msg, addr)
}
}
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -263,13 +293,12 @@ func (iface *udpInterface) reader() {
const udp_chunkSize = 65535 const udp_chunkSize = 65535
func udp_decode(bs []byte) (chunks, chunk, count uint8, payload []byte) { func udp_decode(bs []byte) (chunks, chunk, count uint8, payload []byte) {
if len(bs) >= 3 { if len(bs) >= 3 {
chunks, chunk, count, payload = bs[0], bs[1], bs[2], bs[3:] chunks, chunk, count, payload = bs[0], bs[1], bs[2], bs[3:]
} }
return return
} }
func udp_encode(out []byte, chunks, chunk, count uint8, payload []byte) []byte { func udp_encode(out []byte, chunks, chunk, count uint8, payload []byte) []byte {
return append(append(out, chunks, chunk, count), payload...) return append(append(out, chunks, chunk, count), payload...)
} }

View File

@ -4,41 +4,42 @@ package yggdrasil
import "fmt" import "fmt"
import "runtime" import "runtime"
//import "sync" //import "sync"
func Util_testAddrIDMask() { func Util_testAddrIDMask() {
for idx := 0 ; idx < 16 ; idx++ { for idx := 0; idx < 16; idx++ {
var orig NodeID var orig NodeID
orig[8] = 42 orig[8] = 42
for bidx := 0 ; bidx < idx ; bidx++ { for bidx := 0; bidx < idx; bidx++ {
orig[bidx/8] |= (0x80 >> uint8(bidx % 8)) orig[bidx/8] |= (0x80 >> uint8(bidx%8))
} }
addr := address_addrForNodeID(&orig) addr := address_addrForNodeID(&orig)
nid, mask := addr.getNodeIDandMask() nid, mask := addr.getNodeIDandMask()
for b := 0 ; b < len(mask) ; b++ { for b := 0; b < len(mask); b++ {
nid[b] &= mask[b] nid[b] &= mask[b]
orig[b] &= mask[b] orig[b] &= mask[b]
} }
if *nid != orig { if *nid != orig {
fmt.Println(orig) fmt.Println(orig)
fmt.Println(*addr) fmt.Println(*addr)
fmt.Println(*nid) fmt.Println(*nid)
fmt.Println(*mask) fmt.Println(*mask)
panic(idx) panic(idx)
} }
} }
} }
func util_yield() { func util_yield() {
runtime.Gosched() runtime.Gosched()
} }
func util_lockthread() { func util_lockthread() {
runtime.LockOSThread() runtime.LockOSThread()
} }
func util_unlockthread() { func util_unlockthread() {
runtime.UnlockOSThread() runtime.UnlockOSThread()
} }
/* /*
@ -58,22 +59,23 @@ func util_putBytes(bs []byte) {
var byteStore chan []byte var byteStore chan []byte
func util_initByteStore() { func util_initByteStore() {
if byteStore == nil { if byteStore == nil {
byteStore = make(chan []byte, 32) byteStore = make(chan []byte, 32)
} }
} }
func util_getBytes() []byte { func util_getBytes() []byte {
select { select {
case bs := <-byteStore: return bs[:0] case bs := <-byteStore:
default: return nil return bs[:0]
} default:
return nil
}
} }
func util_putBytes(bs []byte) { func util_putBytes(bs []byte) {
select { select {
case byteStore<-bs: case byteStore <- bs:
default: default:
} }
} }

View File

@ -7,101 +7,107 @@ package yggdrasil
// TODO? make things still work after reordering (after things stabilize more?) // TODO? make things still work after reordering (after things stabilize more?)
// Type safety would also be nice, `type wire_type uint64`, rewrite as needed? // Type safety would also be nice, `type wire_type uint64`, rewrite as needed?
const ( const (
wire_Traffic = iota // data being routed somewhere, handle for crypto wire_Traffic = iota // data being routed somewhere, handle for crypto
wire_ProtocolTraffic // protocol traffic, pub keys for crypto wire_ProtocolTraffic // protocol traffic, pub keys for crypto
wire_LinkProtocolTraffic // link proto traffic, pub keys for crypto wire_LinkProtocolTraffic // link proto traffic, pub keys for crypto
wire_SwitchAnnounce // TODO put inside protocol traffic header wire_SwitchAnnounce // TODO put inside protocol traffic header
wire_SwitchHopRequest // TODO put inside protocol traffic header wire_SwitchHopRequest // TODO put inside protocol traffic header
wire_SwitchHop // TODO put inside protocol traffic header wire_SwitchHop // TODO put inside protocol traffic header
wire_SessionPing // inside protocol traffic header wire_SessionPing // inside protocol traffic header
wire_SessionPong // inside protocol traffic header wire_SessionPong // inside protocol traffic header
wire_DHTLookupRequest // inside protocol traffic header wire_DHTLookupRequest // inside protocol traffic header
wire_DHTLookupResponse // inside protocol traffic header wire_DHTLookupResponse // inside protocol traffic header
wire_SearchRequest // inside protocol traffic header wire_SearchRequest // inside protocol traffic header
wire_SearchResponse // inside protocol traffic header wire_SearchResponse // inside protocol traffic header
//wire_Keys // udp key packet (boxPub, sigPub) //wire_Keys // udp key packet (boxPub, sigPub)
) )
// Encode uint64 using a variable length scheme // Encode uint64 using a variable length scheme
// Similar to binary.Uvarint, but big-endian // Similar to binary.Uvarint, but big-endian
func wire_encode_uint64(elem uint64) []byte { func wire_encode_uint64(elem uint64) []byte {
return wire_put_uint64(elem, nil) return wire_put_uint64(elem, nil)
} }
// Occasionally useful for appending to an existing slice (if there's room) // Occasionally useful for appending to an existing slice (if there's room)
func wire_put_uint64(elem uint64, out []byte) []byte { func wire_put_uint64(elem uint64, out []byte) []byte {
bs := make([]byte, 0, 10) bs := make([]byte, 0, 10)
bs = append(bs, byte(elem & 0x7f)) bs = append(bs, byte(elem&0x7f))
for e := elem >> 7 ; e > 0 ; e >>= 7 { for e := elem >> 7; e > 0; e >>= 7 {
bs = append(bs, byte(e | 0x80)) bs = append(bs, byte(e|0x80))
} }
// Now reverse bytes, because we set them in the wrong order // Now reverse bytes, because we set them in the wrong order
// TODO just put them in the right place the first time... // TODO just put them in the right place the first time...
last := len(bs)-1 last := len(bs) - 1
for idx := 0 ; idx < len(bs)/2 ; idx++ { for idx := 0; idx < len(bs)/2; idx++ {
bs[idx], bs[last-idx] = bs[last-idx], bs[idx] bs[idx], bs[last-idx] = bs[last-idx], bs[idx]
} }
return append(out, bs...) return append(out, bs...)
} }
// Decode uint64 from a []byte slice // Decode uint64 from a []byte slice
// Returns the decoded uint64 and the number of bytes used // Returns the decoded uint64 and the number of bytes used
func wire_decode_uint64(bs []byte) (uint64, int) { func wire_decode_uint64(bs []byte) (uint64, int) {
length := 0 length := 0
elem := uint64(0) elem := uint64(0)
for _, b := range bs { for _, b := range bs {
elem <<= 7 elem <<= 7
elem |= uint64(b & 0x7f) elem |= uint64(b & 0x7f)
length++ length++
if b & 0x80 == 0 { break } if b&0x80 == 0 {
} break
return elem, length }
}
return elem, length
} }
func wire_intToUint(i int64) uint64 { func wire_intToUint(i int64) uint64 {
var u uint64 var u uint64
if i < 0 { if i < 0 {
u = uint64(-i) << 1 u = uint64(-i) << 1
u |= 0x01 // sign bit u |= 0x01 // sign bit
} else { } else {
u = uint64(i) << 1 u = uint64(i) << 1
} }
return u return u
} }
func wire_intFromUint(u uint64) int64 { func wire_intFromUint(u uint64) int64 {
var i int64 var i int64
i = int64(u >> 1) i = int64(u >> 1)
if u & 0x01 != 0 { i *= -1 } if u&0x01 != 0 {
return i i *= -1
}
return i
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// Takes coords, returns coords prefixed with encoded coord length // Takes coords, returns coords prefixed with encoded coord length
func wire_encode_coords(coords []byte) ([]byte) { func wire_encode_coords(coords []byte) []byte {
coordLen := wire_encode_uint64(uint64(len(coords))) coordLen := wire_encode_uint64(uint64(len(coords)))
bs := make([]byte, 0, len(coordLen)+len(coords)) bs := make([]byte, 0, len(coordLen)+len(coords))
bs = append(bs, coordLen...) bs = append(bs, coordLen...)
bs = append(bs, coords...) bs = append(bs, coords...)
return bs return bs
} }
func wire_put_coords(coords []byte, bs []byte) ([]byte) { func wire_put_coords(coords []byte, bs []byte) []byte {
bs = wire_put_uint64(uint64(len(coords)), bs) bs = wire_put_uint64(uint64(len(coords)), bs)
bs = append(bs, coords...) bs = append(bs, coords...)
return bs return bs
} }
// Takes a packet that begins with coords (starting with coord length) // Takes a packet that begins with coords (starting with coord length)
// Returns a slice of coords and the number of bytes read // Returns a slice of coords and the number of bytes read
func wire_decode_coords(packet []byte) ([]byte, int) { func wire_decode_coords(packet []byte) ([]byte, int) {
coordLen, coordBegin := wire_decode_uint64(packet) coordLen, coordBegin := wire_decode_uint64(packet)
coordEnd := coordBegin+int(coordLen) coordEnd := coordBegin + int(coordLen)
//if coordBegin == 0 { panic("No coords found") } // Testing //if coordBegin == 0 { panic("No coords found") } // Testing
//if coordEnd > len(packet) { panic("Packet too short") } // Testing //if coordEnd > len(packet) { panic("Packet too short") } // Testing
if coordBegin == 0 || coordEnd > len(packet) { return nil, 0 } if coordBegin == 0 || coordEnd > len(packet) {
return packet[coordBegin:coordEnd], coordEnd return nil, 0
}
return packet[coordBegin:coordEnd], coordEnd
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -109,144 +115,171 @@ func wire_decode_coords(packet []byte) ([]byte, int) {
// Announces that we can send parts of a Message with a particular seq // Announces that we can send parts of a Message with a particular seq
type msgAnnounce struct { type msgAnnounce struct {
root sigPubKey root sigPubKey
tstamp int64 tstamp int64
seq uint64 seq uint64
len uint64 len uint64
//Deg uint64 //Deg uint64
//RSeq uint64 //RSeq uint64
} }
func (m *msgAnnounce) encode() []byte { func (m *msgAnnounce) encode() []byte {
bs := wire_encode_uint64(wire_SwitchAnnounce) bs := wire_encode_uint64(wire_SwitchAnnounce)
bs = append(bs, m.root[:]...) bs = append(bs, m.root[:]...)
bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...) bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...)
bs = append(bs, wire_encode_uint64(m.seq)...) bs = append(bs, wire_encode_uint64(m.seq)...)
bs = append(bs, wire_encode_uint64(m.len)...) bs = append(bs, wire_encode_uint64(m.len)...)
//bs = append(bs, wire_encode_uint64(m.Deg)...) //bs = append(bs, wire_encode_uint64(m.Deg)...)
//bs = append(bs, wire_encode_uint64(m.RSeq)...) //bs = append(bs, wire_encode_uint64(m.RSeq)...)
return bs return bs
} }
func (m *msgAnnounce) decode(bs []byte) bool { func (m *msgAnnounce) decode(bs []byte) bool {
var pType uint64 var pType uint64
var tstamp uint64 var tstamp uint64
switch { switch {
case !wire_chop_uint64(&pType, &bs): return false case !wire_chop_uint64(&pType, &bs):
case pType != wire_SwitchAnnounce: return false return false
case !wire_chop_slice(m.root[:], &bs): return false case pType != wire_SwitchAnnounce:
case !wire_chop_uint64(&tstamp, &bs): return false return false
case !wire_chop_uint64(&m.seq, &bs): return false case !wire_chop_slice(m.root[:], &bs):
case !wire_chop_uint64(&m.len, &bs): return false return false
//case !wire_chop_uint64(&m.Deg, &bs): return false case !wire_chop_uint64(&tstamp, &bs):
//case !wire_chop_uint64(&m.RSeq, &bs): return false return false
} case !wire_chop_uint64(&m.seq, &bs):
m.tstamp = wire_intFromUint(tstamp) return false
return true case !wire_chop_uint64(&m.len, &bs):
return false
//case !wire_chop_uint64(&m.Deg, &bs): return false
//case !wire_chop_uint64(&m.RSeq, &bs): return false
}
m.tstamp = wire_intFromUint(tstamp)
return true
} }
type msgHopReq struct { type msgHopReq struct {
root sigPubKey root sigPubKey
tstamp int64 tstamp int64
seq uint64 seq uint64
hop uint64 hop uint64
} }
func (m *msgHopReq) encode() []byte { func (m *msgHopReq) encode() []byte {
bs := wire_encode_uint64(wire_SwitchHopRequest) bs := wire_encode_uint64(wire_SwitchHopRequest)
bs = append(bs, m.root[:]...) bs = append(bs, m.root[:]...)
bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...) bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...)
bs = append(bs, wire_encode_uint64(m.seq)...) bs = append(bs, wire_encode_uint64(m.seq)...)
bs = append(bs, wire_encode_uint64(m.hop)...) bs = append(bs, wire_encode_uint64(m.hop)...)
return bs return bs
} }
func (m *msgHopReq) decode(bs []byte) bool { func (m *msgHopReq) decode(bs []byte) bool {
var pType uint64 var pType uint64
var tstamp uint64 var tstamp uint64
switch { switch {
case !wire_chop_uint64(&pType, &bs): return false case !wire_chop_uint64(&pType, &bs):
case pType != wire_SwitchHopRequest: return false return false
case !wire_chop_slice(m.root[:], &bs): return false case pType != wire_SwitchHopRequest:
case !wire_chop_uint64(&tstamp, &bs): return false return false
case !wire_chop_uint64(&m.seq, &bs): return false case !wire_chop_slice(m.root[:], &bs):
case !wire_chop_uint64(&m.hop, &bs): return false return false
} case !wire_chop_uint64(&tstamp, &bs):
m.tstamp = wire_intFromUint(tstamp) return false
return true case !wire_chop_uint64(&m.seq, &bs):
return false
case !wire_chop_uint64(&m.hop, &bs):
return false
}
m.tstamp = wire_intFromUint(tstamp)
return true
} }
type msgHop struct { type msgHop struct {
root sigPubKey root sigPubKey
tstamp int64 tstamp int64
seq uint64 seq uint64
hop uint64 hop uint64
port switchPort port switchPort
next sigPubKey next sigPubKey
sig sigBytes sig sigBytes
} }
func (m *msgHop) encode() []byte { func (m *msgHop) encode() []byte {
bs := wire_encode_uint64(wire_SwitchHop) bs := wire_encode_uint64(wire_SwitchHop)
bs = append(bs, m.root[:]...) bs = append(bs, m.root[:]...)
bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...) bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...)
bs = append(bs, wire_encode_uint64(m.seq)...) bs = append(bs, wire_encode_uint64(m.seq)...)
bs = append(bs, wire_encode_uint64(m.hop)...) bs = append(bs, wire_encode_uint64(m.hop)...)
bs = append(bs, wire_encode_uint64(uint64(m.port))...) bs = append(bs, wire_encode_uint64(uint64(m.port))...)
bs = append(bs, m.next[:]...) bs = append(bs, m.next[:]...)
bs = append(bs, m.sig[:]...) bs = append(bs, m.sig[:]...)
return bs return bs
} }
func (m *msgHop) decode(bs []byte) bool { func (m *msgHop) decode(bs []byte) bool {
var pType uint64 var pType uint64
var tstamp uint64 var tstamp uint64
switch { switch {
case !wire_chop_uint64(&pType, &bs): return false case !wire_chop_uint64(&pType, &bs):
case pType != wire_SwitchHop: return false return false
case !wire_chop_slice(m.root[:], &bs): return false case pType != wire_SwitchHop:
case !wire_chop_uint64(&tstamp, &bs): return false return false
case !wire_chop_uint64(&m.seq, &bs): return false case !wire_chop_slice(m.root[:], &bs):
case !wire_chop_uint64(&m.hop, &bs): return false return false
case !wire_chop_uint64((*uint64)(&m.port), &bs): return false case !wire_chop_uint64(&tstamp, &bs):
case !wire_chop_slice(m.next[:], &bs): return false return false
case !wire_chop_slice(m.sig[:], &bs): return false case !wire_chop_uint64(&m.seq, &bs):
} return false
m.tstamp = wire_intFromUint(tstamp) case !wire_chop_uint64(&m.hop, &bs):
return true return false
case !wire_chop_uint64((*uint64)(&m.port), &bs):
return false
case !wire_chop_slice(m.next[:], &bs):
return false
case !wire_chop_slice(m.sig[:], &bs):
return false
}
m.tstamp = wire_intFromUint(tstamp)
return true
} }
// Format used to check signatures only, so no need to also support decoding // Format used to check signatures only, so no need to also support decoding
func wire_encode_locator(loc *switchLocator) []byte { func wire_encode_locator(loc *switchLocator) []byte {
coords := wire_encode_coords(loc.getCoords()) coords := wire_encode_coords(loc.getCoords())
var bs []byte var bs []byte
bs = append(bs, loc.root[:]...) bs = append(bs, loc.root[:]...)
bs = append(bs, wire_encode_uint64(wire_intToUint(loc.tstamp))...) bs = append(bs, wire_encode_uint64(wire_intToUint(loc.tstamp))...)
bs = append(bs, coords...) bs = append(bs, coords...)
return bs return bs
} }
func wire_chop_slice(toSlice []byte, fromSlice *[]byte) bool { func wire_chop_slice(toSlice []byte, fromSlice *[]byte) bool {
if len(*fromSlice) < len(toSlice) { return false } if len(*fromSlice) < len(toSlice) {
copy(toSlice, *fromSlice) return false
*fromSlice = (*fromSlice)[len(toSlice):] }
return true copy(toSlice, *fromSlice)
*fromSlice = (*fromSlice)[len(toSlice):]
return true
} }
func wire_chop_coords(toCoords *[]byte, fromSlice *[]byte) bool { func wire_chop_coords(toCoords *[]byte, fromSlice *[]byte) bool {
coords, coordLen := wire_decode_coords(*fromSlice) coords, coordLen := wire_decode_coords(*fromSlice)
if coordLen == 0 { return false } if coordLen == 0 {
*toCoords = append((*toCoords)[:0], coords...) return false
*fromSlice = (*fromSlice)[coordLen:] }
return true *toCoords = append((*toCoords)[:0], coords...)
*fromSlice = (*fromSlice)[coordLen:]
return true
} }
func wire_chop_uint64(toUInt64 *uint64, fromSlice *[]byte) bool { func wire_chop_uint64(toUInt64 *uint64, fromSlice *[]byte) bool {
dec, decLen := wire_decode_uint64(*fromSlice) dec, decLen := wire_decode_uint64(*fromSlice)
if decLen == 0 { return false } if decLen == 0 {
*toUInt64 = dec return false
*fromSlice = (*fromSlice)[decLen:] }
return true *toUInt64 = dec
*fromSlice = (*fromSlice)[decLen:]
return true
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -254,239 +287,289 @@ func wire_chop_uint64(toUInt64 *uint64, fromSlice *[]byte) bool {
// Wire traffic packets // Wire traffic packets
type wire_trafficPacket struct { type wire_trafficPacket struct {
ttl uint64 // TODO? hide this as a wire format detail, not set by user ttl uint64 // TODO? hide this as a wire format detail, not set by user
coords []byte coords []byte
handle handle handle handle
nonce boxNonce nonce boxNonce
payload []byte payload []byte
} }
// This is basically MarshalBinary, but decode doesn't allow that... // This is basically MarshalBinary, but decode doesn't allow that...
func (p *wire_trafficPacket) encode() []byte { func (p *wire_trafficPacket) encode() []byte {
bs := util_getBytes() bs := util_getBytes()
bs = wire_put_uint64(wire_Traffic, bs) bs = wire_put_uint64(wire_Traffic, bs)
bs = wire_put_uint64(p.ttl, bs) bs = wire_put_uint64(p.ttl, bs)
bs = wire_put_coords(p.coords, bs) bs = wire_put_coords(p.coords, bs)
bs = append(bs, p.handle[:]...) bs = append(bs, p.handle[:]...)
bs = append(bs, p.nonce[:]...) bs = append(bs, p.nonce[:]...)
bs = append(bs, p.payload...) bs = append(bs, p.payload...)
return bs return bs
} }
// Not just UnmarshalBinary becuase the original slice isn't always copied from // Not just UnmarshalBinary becuase the original slice isn't always copied from
func (p *wire_trafficPacket) decode(bs []byte) bool { func (p *wire_trafficPacket) decode(bs []byte) bool {
var pType uint64 var pType uint64
switch { switch {
case !wire_chop_uint64(&pType, &bs): return false case !wire_chop_uint64(&pType, &bs):
case pType != wire_Traffic: return false return false
case !wire_chop_uint64(&p.ttl, &bs): return false case pType != wire_Traffic:
case !wire_chop_coords(&p.coords, &bs): return false return false
case !wire_chop_slice(p.handle[:], &bs): return false case !wire_chop_uint64(&p.ttl, &bs):
case !wire_chop_slice(p.nonce[:], &bs): return false return false
} case !wire_chop_coords(&p.coords, &bs):
p.payload = append(util_getBytes(), bs...) return false
return true case !wire_chop_slice(p.handle[:], &bs):
return false
case !wire_chop_slice(p.nonce[:], &bs):
return false
}
p.payload = append(util_getBytes(), bs...)
return true
} }
type wire_protoTrafficPacket struct { type wire_protoTrafficPacket struct {
ttl uint64 // TODO? hide this as a wire format detail, not set by user ttl uint64 // TODO? hide this as a wire format detail, not set by user
coords []byte coords []byte
toKey boxPubKey toKey boxPubKey
fromKey boxPubKey fromKey boxPubKey
nonce boxNonce nonce boxNonce
payload []byte payload []byte
} }
func (p *wire_protoTrafficPacket) encode() []byte { func (p *wire_protoTrafficPacket) encode() []byte {
coords := wire_encode_coords(p.coords) coords := wire_encode_coords(p.coords)
bs := wire_encode_uint64(wire_ProtocolTraffic) bs := wire_encode_uint64(wire_ProtocolTraffic)
bs = append(bs, wire_encode_uint64(p.ttl)...) bs = append(bs, wire_encode_uint64(p.ttl)...)
bs = append(bs, coords...) bs = append(bs, coords...)
bs = append(bs, p.toKey[:]...) bs = append(bs, p.toKey[:]...)
bs = append(bs, p.fromKey[:]...) bs = append(bs, p.fromKey[:]...)
bs = append(bs, p.nonce[:]...) bs = append(bs, p.nonce[:]...)
bs = append(bs, p.payload...) bs = append(bs, p.payload...)
return bs return bs
} }
func(p *wire_protoTrafficPacket) decode(bs []byte) bool { func (p *wire_protoTrafficPacket) decode(bs []byte) bool {
var pType uint64 var pType uint64
switch { switch {
case !wire_chop_uint64(&pType, &bs): return false case !wire_chop_uint64(&pType, &bs):
case pType != wire_ProtocolTraffic: return false return false
case !wire_chop_uint64(&p.ttl, &bs): return false case pType != wire_ProtocolTraffic:
case !wire_chop_coords(&p.coords, &bs): return false return false
case !wire_chop_slice(p.toKey[:], &bs): return false case !wire_chop_uint64(&p.ttl, &bs):
case !wire_chop_slice(p.fromKey[:], &bs): return false return false
case !wire_chop_slice(p.nonce[:], &bs): return false case !wire_chop_coords(&p.coords, &bs):
} return false
p.payload = bs case !wire_chop_slice(p.toKey[:], &bs):
return true return false
case !wire_chop_slice(p.fromKey[:], &bs):
return false
case !wire_chop_slice(p.nonce[:], &bs):
return false
}
p.payload = bs
return true
} }
type wire_linkProtoTrafficPacket struct { type wire_linkProtoTrafficPacket struct {
toKey boxPubKey toKey boxPubKey
fromKey boxPubKey fromKey boxPubKey
nonce boxNonce nonce boxNonce
payload []byte payload []byte
} }
func (p *wire_linkProtoTrafficPacket) encode() []byte { func (p *wire_linkProtoTrafficPacket) encode() []byte {
bs := wire_encode_uint64(wire_LinkProtocolTraffic) bs := wire_encode_uint64(wire_LinkProtocolTraffic)
bs = append(bs, p.toKey[:]...) bs = append(bs, p.toKey[:]...)
bs = append(bs, p.fromKey[:]...) bs = append(bs, p.fromKey[:]...)
bs = append(bs, p.nonce[:]...) bs = append(bs, p.nonce[:]...)
bs = append(bs, p.payload...) bs = append(bs, p.payload...)
return bs return bs
} }
func(p *wire_linkProtoTrafficPacket) decode(bs []byte) bool { func (p *wire_linkProtoTrafficPacket) decode(bs []byte) bool {
var pType uint64 var pType uint64
switch { switch {
case !wire_chop_uint64(&pType, &bs): return false case !wire_chop_uint64(&pType, &bs):
case pType != wire_LinkProtocolTraffic: return false return false
case !wire_chop_slice(p.toKey[:], &bs): return false case pType != wire_LinkProtocolTraffic:
case !wire_chop_slice(p.fromKey[:], &bs): return false return false
case !wire_chop_slice(p.nonce[:], &bs): return false case !wire_chop_slice(p.toKey[:], &bs):
} return false
p.payload = bs case !wire_chop_slice(p.fromKey[:], &bs):
return true return false
case !wire_chop_slice(p.nonce[:], &bs):
return false
}
p.payload = bs
return true
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
func (p *sessionPing) encode() []byte { func (p *sessionPing) encode() []byte {
var pTypeVal uint64 var pTypeVal uint64
if p.isPong { if p.isPong {
pTypeVal = wire_SessionPong pTypeVal = wire_SessionPong
} else { } else {
pTypeVal = wire_SessionPing pTypeVal = wire_SessionPing
} }
bs := wire_encode_uint64(pTypeVal) bs := wire_encode_uint64(pTypeVal)
//p.sendPermPub used in top level (crypto), so skipped here //p.sendPermPub used in top level (crypto), so skipped here
bs = append(bs, p.handle[:]...) bs = append(bs, p.handle[:]...)
bs = append(bs, p.sendSesPub[:]...) bs = append(bs, p.sendSesPub[:]...)
bs = append(bs, wire_encode_uint64(wire_intToUint(p.tstamp))...) bs = append(bs, wire_encode_uint64(wire_intToUint(p.tstamp))...)
coords := wire_encode_coords(p.coords) coords := wire_encode_coords(p.coords)
bs = append(bs, coords...) bs = append(bs, coords...)
return bs return bs
} }
func (p *sessionPing) decode(bs []byte) bool { func (p *sessionPing) decode(bs []byte) bool {
var pType uint64 var pType uint64
var tstamp uint64 var tstamp uint64
switch { switch {
case !wire_chop_uint64(&pType, &bs): return false case !wire_chop_uint64(&pType, &bs):
case pType != wire_SessionPing && pType != wire_SessionPong: return false return false
//p.sendPermPub used in top level (crypto), so skipped here case pType != wire_SessionPing && pType != wire_SessionPong:
case !wire_chop_slice(p.handle[:], &bs): return false return false
case !wire_chop_slice(p.sendSesPub[:], &bs): return false //p.sendPermPub used in top level (crypto), so skipped here
case !wire_chop_uint64(&tstamp, &bs): return false case !wire_chop_slice(p.handle[:], &bs):
case !wire_chop_coords(&p.coords, &bs): return false return false
} case !wire_chop_slice(p.sendSesPub[:], &bs):
p.tstamp = wire_intFromUint(tstamp) return false
if pType == wire_SessionPong { p.isPong = true } case !wire_chop_uint64(&tstamp, &bs):
return true return false
case !wire_chop_coords(&p.coords, &bs):
return false
}
p.tstamp = wire_intFromUint(tstamp)
if pType == wire_SessionPong {
p.isPong = true
}
return true
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
func (r *dhtReq) encode() []byte { func (r *dhtReq) encode() []byte {
coords := wire_encode_coords(r.coords) coords := wire_encode_coords(r.coords)
bs := wire_encode_uint64(wire_DHTLookupRequest) bs := wire_encode_uint64(wire_DHTLookupRequest)
bs = append(bs, r.key[:]...) bs = append(bs, r.key[:]...)
bs = append(bs, coords...) bs = append(bs, coords...)
bs = append(bs, r.dest[:]...) bs = append(bs, r.dest[:]...)
return bs return bs
} }
func (r *dhtReq) decode(bs []byte) bool { func (r *dhtReq) decode(bs []byte) bool {
var pType uint64 var pType uint64
switch { switch {
case !wire_chop_uint64(&pType, &bs): return false case !wire_chop_uint64(&pType, &bs):
case pType != wire_DHTLookupRequest: return false return false
case !wire_chop_slice(r.key[:], &bs): return false case pType != wire_DHTLookupRequest:
case !wire_chop_coords(&r.coords, &bs): return false return false
case !wire_chop_slice(r.dest[:], &bs): return false case !wire_chop_slice(r.key[:], &bs):
default: return true return false
} case !wire_chop_coords(&r.coords, &bs):
return false
case !wire_chop_slice(r.dest[:], &bs):
return false
default:
return true
}
} }
func (r *dhtRes) encode() []byte { func (r *dhtRes) encode() []byte {
coords := wire_encode_coords(r.coords) coords := wire_encode_coords(r.coords)
bs := wire_encode_uint64(wire_DHTLookupResponse) bs := wire_encode_uint64(wire_DHTLookupResponse)
bs = append(bs, r.key[:]...) bs = append(bs, r.key[:]...)
bs = append(bs, coords...) bs = append(bs, coords...)
bs = append(bs, r.dest[:]...) bs = append(bs, r.dest[:]...)
for _, info := range r.infos { for _, info := range r.infos {
coords = wire_encode_coords(info.coords) coords = wire_encode_coords(info.coords)
bs = append(bs, info.key[:]...) bs = append(bs, info.key[:]...)
bs = append(bs, coords...) bs = append(bs, coords...)
} }
return bs return bs
} }
func (r *dhtRes) decode(bs []byte) bool { func (r *dhtRes) decode(bs []byte) bool {
var pType uint64 var pType uint64
switch { switch {
case !wire_chop_uint64(&pType, &bs): return false case !wire_chop_uint64(&pType, &bs):
case pType != wire_DHTLookupResponse: return false return false
case !wire_chop_slice(r.key[:], &bs): return false case pType != wire_DHTLookupResponse:
case !wire_chop_coords(&r.coords, &bs): return false return false
case !wire_chop_slice(r.dest[:], &bs): return false case !wire_chop_slice(r.key[:], &bs):
} return false
for len(bs) > 0 { case !wire_chop_coords(&r.coords, &bs):
info := dhtInfo{} return false
switch { case !wire_chop_slice(r.dest[:], &bs):
case !wire_chop_slice(info.key[:], &bs): return false return false
case !wire_chop_coords(&info.coords, &bs): return false }
} for len(bs) > 0 {
r.infos = append(r.infos, &info) info := dhtInfo{}
} switch {
return true case !wire_chop_slice(info.key[:], &bs):
return false
case !wire_chop_coords(&info.coords, &bs):
return false
}
r.infos = append(r.infos, &info)
}
return true
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
func (r *searchReq) encode() []byte { func (r *searchReq) encode() []byte {
coords := wire_encode_coords(r.coords) coords := wire_encode_coords(r.coords)
bs := wire_encode_uint64(wire_SearchRequest) bs := wire_encode_uint64(wire_SearchRequest)
bs = append(bs, r.key[:]...) bs = append(bs, r.key[:]...)
bs = append(bs, coords...) bs = append(bs, coords...)
bs = append(bs, r.dest[:]...) bs = append(bs, r.dest[:]...)
return bs return bs
} }
func (r *searchReq) decode(bs []byte) bool { func (r *searchReq) decode(bs []byte) bool {
var pType uint64 var pType uint64
switch { switch {
case !wire_chop_uint64(&pType, &bs): return false case !wire_chop_uint64(&pType, &bs):
case pType != wire_SearchRequest: return false return false
case !wire_chop_slice(r.key[:], &bs): return false case pType != wire_SearchRequest:
case !wire_chop_coords(&r.coords, &bs): return false return false
case !wire_chop_slice(r.dest[:], &bs): return false case !wire_chop_slice(r.key[:], &bs):
default: return true return false
} case !wire_chop_coords(&r.coords, &bs):
return false
case !wire_chop_slice(r.dest[:], &bs):
return false
default:
return true
}
} }
func (r *searchRes) encode() []byte { func (r *searchRes) encode() []byte {
coords := wire_encode_coords(r.coords) coords := wire_encode_coords(r.coords)
bs := wire_encode_uint64(wire_SearchResponse) bs := wire_encode_uint64(wire_SearchResponse)
bs = append(bs, r.key[:]...) bs = append(bs, r.key[:]...)
bs = append(bs, coords...) bs = append(bs, coords...)
bs = append(bs, r.dest[:]...) bs = append(bs, r.dest[:]...)
return bs return bs
} }
func (r *searchRes) decode(bs []byte) bool { func (r *searchRes) decode(bs []byte) bool {
var pType uint64 var pType uint64
switch { switch {
case !wire_chop_uint64(&pType, &bs): return false case !wire_chop_uint64(&pType, &bs):
case pType != wire_SearchResponse: return false return false
case !wire_chop_slice(r.key[:], &bs): return false case pType != wire_SearchResponse:
case !wire_chop_coords(&r.coords, &bs): return false return false
case !wire_chop_slice(r.dest[:], &bs): return false case !wire_chop_slice(r.key[:], &bs):
default: return true return false
} case !wire_chop_coords(&r.coords, &bs):
return false
case !wire_chop_slice(r.dest[:], &bs):
return false
default:
return true
}
} }

View File

@ -25,133 +25,172 @@ import . "yggdrasil"
* It can generate a new config (--genconf) * It can generate a new config (--genconf)
* It can read a config from stdin (--useconf) * It can read a config from stdin (--useconf)
* It can run with an automatic config (--autoconf) * It can run with an automatic config (--autoconf)
*/ */
type nodeConfig struct { type nodeConfig struct {
Listen string Listen string
Peers []string Peers []string
BoxPub string BoxPub string
BoxPriv string BoxPriv string
SigPub string SigPub string
SigPriv string SigPriv string
Multicast bool Multicast bool
IfName string IfName string
} }
type node struct { type node struct {
core Core core Core
sock *ipv6.PacketConn sock *ipv6.PacketConn
} }
func (n *node) init(cfg *nodeConfig, logger *log.Logger) { func (n *node) init(cfg *nodeConfig, logger *log.Logger) {
boxPub, err := hex.DecodeString(cfg.BoxPub) boxPub, err := hex.DecodeString(cfg.BoxPub)
if err != nil { panic(err) } if err != nil {
boxPriv, err := hex.DecodeString(cfg.BoxPriv) panic(err)
if err != nil { panic(err) } }
sigPub, err := hex.DecodeString(cfg.SigPub) boxPriv, err := hex.DecodeString(cfg.BoxPriv)
if err != nil { panic(err) } if err != nil {
sigPriv, err := hex.DecodeString(cfg.SigPriv) panic(err)
if err != nil { panic(err) } }
n.core.DEBUG_init(boxPub, boxPriv, sigPub, sigPriv) sigPub, err := hex.DecodeString(cfg.SigPub)
n.core.DEBUG_setLogger(logger) if err != nil {
logger.Println("Starting interface...") panic(err)
n.core.DEBUG_setupAndStartGlobalUDPInterface(cfg.Listen) }
logger.Println("Started interface") sigPriv, err := hex.DecodeString(cfg.SigPriv)
go func () { if err != nil {
if len(cfg.Peers) == 0 { return } panic(err)
for { }
for _, p := range cfg.Peers { n.core.DEBUG_init(boxPub, boxPriv, sigPub, sigPriv)
n.core.DEBUG_maybeSendUDPKeys(p) n.core.DEBUG_setLogger(logger)
time.Sleep(time.Second) logger.Println("Starting interface...")
} n.core.DEBUG_setupAndStartGlobalUDPInterface(cfg.Listen)
time.Sleep(time.Minute) logger.Println("Started interface")
} go func() {
}() if len(cfg.Peers) == 0 {
return
}
for {
for _, p := range cfg.Peers {
n.core.DEBUG_maybeSendUDPKeys(p)
time.Sleep(time.Second)
}
time.Sleep(time.Minute)
}
}()
} }
func generateConfig() *nodeConfig { func generateConfig() *nodeConfig {
core := Core{} core := Core{}
bpub, bpriv := core.DEBUG_newBoxKeys() bpub, bpriv := core.DEBUG_newBoxKeys()
spub, spriv := core.DEBUG_newSigKeys() spub, spriv := core.DEBUG_newSigKeys()
cfg := nodeConfig{} cfg := nodeConfig{}
cfg.Listen = "[::]:0" cfg.Listen = "[::]:0"
cfg.BoxPub = hex.EncodeToString(bpub[:]) cfg.BoxPub = hex.EncodeToString(bpub[:])
cfg.BoxPriv = hex.EncodeToString(bpriv[:]) cfg.BoxPriv = hex.EncodeToString(bpriv[:])
cfg.SigPub = hex.EncodeToString(spub[:]) cfg.SigPub = hex.EncodeToString(spub[:])
cfg.SigPriv = hex.EncodeToString(spriv[:]) cfg.SigPriv = hex.EncodeToString(spriv[:])
cfg.Peers = []string{} cfg.Peers = []string{}
cfg.Multicast = true cfg.Multicast = true
cfg.IfName = "auto" cfg.IfName = "auto"
return &cfg return &cfg
} }
func doGenconf() string { func doGenconf() string {
cfg := generateConfig() cfg := generateConfig()
bs, err := json.MarshalIndent(cfg, "", " ") bs, err := json.MarshalIndent(cfg, "", " ")
if err != nil { panic(err) } if err != nil {
return string(bs) panic(err)
}
return string(bs)
} }
var multicastAddr = "[ff02::114]:9001" var multicastAddr = "[ff02::114]:9001"
func (n *node) listen() { func (n *node) listen() {
groupAddr, err := net.ResolveUDPAddr("udp6", multicastAddr) groupAddr, err := net.ResolveUDPAddr("udp6", multicastAddr)
if err != nil { panic(err) } if err != nil {
bs := make([]byte, 2048) panic(err)
for { }
nBytes, rcm, fromAddr, err := n.sock.ReadFrom(bs) bs := make([]byte, 2048)
if err != nil { panic(err) } for {
//if rcm == nil { continue } // wat nBytes, rcm, fromAddr, err := n.sock.ReadFrom(bs)
//fmt.Println("DEBUG:", "packet from:", fromAddr.String()) if err != nil {
if !rcm.Dst.IsLinkLocalMulticast() { continue } panic(err)
if !rcm.Dst.Equal(groupAddr.IP) { continue } }
anAddr := string(bs[:nBytes]) //if rcm == nil { continue } // wat
addr, err := net.ResolveUDPAddr("udp6", anAddr) //fmt.Println("DEBUG:", "packet from:", fromAddr.String())
if err != nil { panic(err) ; continue } // Panic for testing, remove later if !rcm.Dst.IsLinkLocalMulticast() {
from := fromAddr.(*net.UDPAddr) continue
//fmt.Println("DEBUG:", "heard:", addr.IP.String(), "from:", from.IP.String()) }
if addr.IP.String() != from.IP.String() { continue } if !rcm.Dst.Equal(groupAddr.IP) {
addr.Zone = from.Zone continue
saddr := addr.String() }
//if _, isIn := n.peers[saddr]; isIn { continue } anAddr := string(bs[:nBytes])
//n.peers[saddr] = struct{}{} addr, err := net.ResolveUDPAddr("udp6", anAddr)
n.core.DEBUG_maybeSendUDPKeys(saddr) if err != nil {
//fmt.Println("DEBUG:", "added multicast peer:", saddr) panic(err)
} continue
} // Panic for testing, remove later
from := fromAddr.(*net.UDPAddr)
//fmt.Println("DEBUG:", "heard:", addr.IP.String(), "from:", from.IP.String())
if addr.IP.String() != from.IP.String() {
continue
}
addr.Zone = from.Zone
saddr := addr.String()
//if _, isIn := n.peers[saddr]; isIn { continue }
//n.peers[saddr] = struct{}{}
n.core.DEBUG_maybeSendUDPKeys(saddr)
//fmt.Println("DEBUG:", "added multicast peer:", saddr)
}
} }
func (n *node) announce() { func (n *node) announce() {
groupAddr, err := net.ResolveUDPAddr("udp6", multicastAddr) groupAddr, err := net.ResolveUDPAddr("udp6", multicastAddr)
if err != nil { panic(err) } if err != nil {
udpaddr := n.core.DEBUG_getGlobalUDPAddr() panic(err)
anAddr, err := net.ResolveUDPAddr("udp6", udpaddr.String()) }
if err != nil { panic(err) } udpaddr := n.core.DEBUG_getGlobalUDPAddr()
destAddr, err := net.ResolveUDPAddr("udp6", multicastAddr) anAddr, err := net.ResolveUDPAddr("udp6", udpaddr.String())
if err != nil { panic(err) } if err != nil {
for { panic(err)
ifaces, err := net.Interfaces() }
if err != nil { panic(err) } destAddr, err := net.ResolveUDPAddr("udp6", multicastAddr)
for _, iface := range ifaces { if err != nil {
n.sock.JoinGroup(&iface, groupAddr) panic(err)
//err := n.sock.JoinGroup(&iface, groupAddr) }
//if err != nil { panic(err) } for {
addrs, err := iface.Addrs() ifaces, err := net.Interfaces()
if err != nil { panic(err) } if err != nil {
for _, addr := range addrs { panic(err)
addrIP, _, _ := net.ParseCIDR(addr.String()) }
if addrIP.To4() != nil { continue } // IPv6 only for _, iface := range ifaces {
if !addrIP.IsLinkLocalUnicast() { continue } n.sock.JoinGroup(&iface, groupAddr)
anAddr.IP = addrIP //err := n.sock.JoinGroup(&iface, groupAddr)
anAddr.Zone = iface.Name //if err != nil { panic(err) }
destAddr.Zone = iface.Name addrs, err := iface.Addrs()
msg := []byte(anAddr.String()) if err != nil {
n.sock.WriteTo(msg, nil, destAddr) panic(err)
break }
} for _, addr := range addrs {
time.Sleep(time.Second) addrIP, _, _ := net.ParseCIDR(addr.String())
} if addrIP.To4() != nil {
time.Sleep(time.Second) continue
} } // IPv6 only
if !addrIP.IsLinkLocalUnicast() {
continue
}
anAddr.IP = addrIP
anAddr.Zone = iface.Name
destAddr.Zone = iface.Name
msg := []byte(anAddr.String())
n.sock.WriteTo(msg, nil, destAddr)
break
}
time.Sleep(time.Second)
}
time.Sleep(time.Second)
}
} }
var pprof = flag.Bool("pprof", false, "Run pprof, see http://localhost:6060/debug/pprof/") var pprof = flag.Bool("pprof", false, "Run pprof, see http://localhost:6060/debug/pprof/")
@ -160,53 +199,67 @@ var useconf = flag.Bool("useconf", false, "read config from stdin")
var autoconf = flag.Bool("autoconf", false, "automatic mode (dynamic IP, peer with IPv6 neighbors)") var autoconf = flag.Bool("autoconf", false, "automatic mode (dynamic IP, peer with IPv6 neighbors)")
func main() { func main() {
flag.Parse() flag.Parse()
var cfg *nodeConfig var cfg *nodeConfig
switch { switch {
case *autoconf: cfg = generateConfig() case *autoconf:
case *useconf: cfg = generateConfig()
config, err := ioutil.ReadAll(os.Stdin) case *useconf:
if err != nil { panic(err) } config, err := ioutil.ReadAll(os.Stdin)
decoder := json.NewDecoder(bytes.NewReader(config)) if err != nil {
err = decoder.Decode(&cfg) panic(err)
if err != nil { panic(err) } }
case *genconf: fmt.Println(doGenconf()) decoder := json.NewDecoder(bytes.NewReader(config))
default: flag.PrintDefaults() err = decoder.Decode(&cfg)
} if err != nil {
if cfg == nil { return } panic(err)
logger := log.New(os.Stdout, "", log.Flags()) }
if *pprof { case *genconf:
runtime.SetBlockProfileRate(1) fmt.Println(doGenconf())
go func() { log.Println(http.ListenAndServe("localhost:6060", nil)) }() default:
} flag.PrintDefaults()
// Setup }
logger.Println("Initializing...") if cfg == nil {
n := node{} return
n.init(cfg, logger) }
logger.Println("Starting tun...") logger := log.New(os.Stdout, "", log.Flags())
n.core.DEBUG_startTun(cfg.IfName) // 1280, the smallest supported MTU if *pprof {
//n.core.DEBUG_startTunWithMTU(cfg.IfName, 65535) // Largest supported MTU runtime.SetBlockProfileRate(1)
defer func() { go func() { log.Println(http.ListenAndServe("localhost:6060", nil)) }()
logger.Println("Closing...") }
n.core.DEBUG_stopTun() // Setup
}() logger.Println("Initializing...")
logger.Println("Started...") n := node{}
if cfg.Multicast { n.init(cfg, logger)
addr, err := net.ResolveUDPAddr("udp", multicastAddr) logger.Println("Starting tun...")
if err != nil { panic(err) } n.core.DEBUG_startTun(cfg.IfName) // 1280, the smallest supported MTU
listenString := fmt.Sprintf("[::]:%v", addr.Port) //n.core.DEBUG_startTunWithMTU(cfg.IfName, 65535) // Largest supported MTU
conn, err := net.ListenPacket("udp6", listenString) defer func() {
if err != nil { panic(err) } logger.Println("Closing...")
//defer conn.Close() // Let it close on its own when the application exits n.core.DEBUG_stopTun()
n.sock = ipv6.NewPacketConn(conn) }()
if err = n.sock.SetControlMessage(ipv6.FlagDst, true) ; err != nil { panic(err) } logger.Println("Started...")
go n.listen() if cfg.Multicast {
go n.announce() addr, err := net.ResolveUDPAddr("udp", multicastAddr)
} if err != nil {
// Catch interrupt to exit gracefully panic(err)
c := make(chan os.Signal, 1) }
signal.Notify(c, os.Interrupt) listenString := fmt.Sprintf("[::]:%v", addr.Port)
<-c conn, err := net.ListenPacket("udp6", listenString)
logger.Println("Stopping...") if err != nil {
panic(err)
}
//defer conn.Close() // Let it close on its own when the application exits
n.sock = ipv6.NewPacketConn(conn)
if err = n.sock.SetControlMessage(ipv6.FlagDst, true); err != nil {
panic(err)
}
go n.listen()
go n.announce()
}
// Catch interrupt to exit gracefully
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
<-c
logger.Println("Stopping...")
} }