mirror of
https://github.com/cwinfo/yggdrasil-go.git
synced 2024-11-09 16:20:26 +00:00
Merge pull request #1 from neilalexander/master
Add support for tun ifname on Linux, run gofmt
This commit is contained in:
commit
b76fcbb402
@ -20,58 +20,67 @@ import . "yggdrasil"
|
||||
var doSig = flag.Bool("sig", false, "generate new signing keys instead")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
switch {
|
||||
case *doSig: doSigKeys()
|
||||
default: doBoxKeys()
|
||||
}
|
||||
flag.Parse()
|
||||
switch {
|
||||
case *doSig:
|
||||
doSigKeys()
|
||||
default:
|
||||
doBoxKeys()
|
||||
}
|
||||
}
|
||||
|
||||
func isBetter(oldID, newID []byte) bool {
|
||||
for idx := range oldID {
|
||||
if newID[idx] > oldID[idx] { return true }
|
||||
if newID[idx] < oldID[idx] { return false }
|
||||
}
|
||||
return false
|
||||
for idx := range oldID {
|
||||
if newID[idx] > oldID[idx] {
|
||||
return true
|
||||
}
|
||||
if newID[idx] < oldID[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func doBoxKeys() {
|
||||
c := Core{}
|
||||
pub, _ := c.DEBUG_newBoxKeys()
|
||||
bestID := c.DEBUG_getNodeID(pub)
|
||||
for idx := range bestID {
|
||||
bestID[idx] = 0
|
||||
}
|
||||
for {
|
||||
pub, priv := c.DEBUG_newBoxKeys()
|
||||
id := c.DEBUG_getNodeID(pub)
|
||||
if !isBetter(bestID[:], id[:]) { continue }
|
||||
bestID = id
|
||||
ip := c.DEBUG_addrForNodeID(id)
|
||||
fmt.Println("--------------------------------------------------------------------------------")
|
||||
fmt.Println("boxPriv:", hex.EncodeToString(priv[:]))
|
||||
fmt.Println("boxPub:", hex.EncodeToString(pub[:]))
|
||||
fmt.Println("NodeID:", hex.EncodeToString(id[:]))
|
||||
fmt.Println("IP:", ip)
|
||||
}
|
||||
c := Core{}
|
||||
pub, _ := c.DEBUG_newBoxKeys()
|
||||
bestID := c.DEBUG_getNodeID(pub)
|
||||
for idx := range bestID {
|
||||
bestID[idx] = 0
|
||||
}
|
||||
for {
|
||||
pub, priv := c.DEBUG_newBoxKeys()
|
||||
id := c.DEBUG_getNodeID(pub)
|
||||
if !isBetter(bestID[:], id[:]) {
|
||||
continue
|
||||
}
|
||||
bestID = id
|
||||
ip := c.DEBUG_addrForNodeID(id)
|
||||
fmt.Println("--------------------------------------------------------------------------------")
|
||||
fmt.Println("boxPriv:", hex.EncodeToString(priv[:]))
|
||||
fmt.Println("boxPub:", hex.EncodeToString(pub[:]))
|
||||
fmt.Println("NodeID:", hex.EncodeToString(id[:]))
|
||||
fmt.Println("IP:", ip)
|
||||
}
|
||||
}
|
||||
|
||||
func doSigKeys() {
|
||||
c := Core{}
|
||||
pub, _ := c.DEBUG_newSigKeys()
|
||||
bestID := c.DEBUG_getTreeID(pub)
|
||||
for idx := range bestID {
|
||||
bestID[idx] = 0
|
||||
}
|
||||
for {
|
||||
pub, priv := c.DEBUG_newSigKeys()
|
||||
id := c.DEBUG_getTreeID(pub)
|
||||
if !isBetter(bestID[:], id[:]) { continue }
|
||||
bestID = id
|
||||
fmt.Println("--------------------------------------------------------------------------------")
|
||||
fmt.Println("sigPriv:", hex.EncodeToString(priv[:]))
|
||||
fmt.Println("sigPub:", hex.EncodeToString(pub[:]))
|
||||
fmt.Println("TreeID:", hex.EncodeToString(id[:]))
|
||||
}
|
||||
c := Core{}
|
||||
pub, _ := c.DEBUG_newSigKeys()
|
||||
bestID := c.DEBUG_getTreeID(pub)
|
||||
for idx := range bestID {
|
||||
bestID[idx] = 0
|
||||
}
|
||||
for {
|
||||
pub, priv := c.DEBUG_newSigKeys()
|
||||
id := c.DEBUG_getTreeID(pub)
|
||||
if !isBetter(bestID[:], id[:]) {
|
||||
continue
|
||||
}
|
||||
bestID = id
|
||||
fmt.Println("--------------------------------------------------------------------------------")
|
||||
fmt.Println("sigPriv:", hex.EncodeToString(priv[:]))
|
||||
fmt.Println("sigPub:", hex.EncodeToString(pub[:]))
|
||||
fmt.Println("TreeID:", hex.EncodeToString(id[:]))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,153 +15,157 @@ import "router"
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type Node struct {
|
||||
nodeID router.NodeID
|
||||
table router.Table
|
||||
links []*Node
|
||||
nodeID router.NodeID
|
||||
table router.Table
|
||||
links []*Node
|
||||
}
|
||||
|
||||
func (n *Node) init(nodeID router.NodeID) {
|
||||
n.nodeID = nodeID
|
||||
n.table.Init(nodeID)
|
||||
n.links = append(n.links, n)
|
||||
n.nodeID = nodeID
|
||||
n.table.Init(nodeID)
|
||||
n.links = append(n.links, n)
|
||||
}
|
||||
|
||||
func linkNodes(m, n *Node) {
|
||||
for _, o := range m.links {
|
||||
if o.nodeID == n.nodeID {
|
||||
// Don't allow duplicates
|
||||
return
|
||||
}
|
||||
}
|
||||
m.links = append(m.links, n)
|
||||
n.links = append(n.links, m)
|
||||
for _, o := range m.links {
|
||||
if o.nodeID == n.nodeID {
|
||||
// Don't allow duplicates
|
||||
return
|
||||
}
|
||||
}
|
||||
m.links = append(m.links, n)
|
||||
n.links = append(n.links, m)
|
||||
}
|
||||
|
||||
func makeStoreSquareGrid(sideLength int) map[router.NodeID]*Node {
|
||||
store := make(map[router.NodeID]*Node)
|
||||
nNodes := sideLength*sideLength
|
||||
nodeIDs := make([]router.NodeID, 0, nNodes)
|
||||
// TODO shuffle nodeIDs
|
||||
for nodeID := 1 ; nodeID <= nNodes ; nodeID++ {
|
||||
nodeIDs = append(nodeIDs, router.NodeID(nodeID))
|
||||
}
|
||||
for _, nodeID := range nodeIDs {
|
||||
node := &Node{}
|
||||
node.init(nodeID)
|
||||
store[nodeID] = node
|
||||
}
|
||||
for idx := 0 ; idx < nNodes ; idx++ {
|
||||
if (idx % sideLength) != 0 {
|
||||
linkNodes(store[nodeIDs[idx]], store[nodeIDs[idx-1]])
|
||||
}
|
||||
if idx >= sideLength {
|
||||
linkNodes(store[nodeIDs[idx]], store[nodeIDs[idx-sideLength]])
|
||||
}
|
||||
}
|
||||
return store
|
||||
store := make(map[router.NodeID]*Node)
|
||||
nNodes := sideLength * sideLength
|
||||
nodeIDs := make([]router.NodeID, 0, nNodes)
|
||||
// TODO shuffle nodeIDs
|
||||
for nodeID := 1; nodeID <= nNodes; nodeID++ {
|
||||
nodeIDs = append(nodeIDs, router.NodeID(nodeID))
|
||||
}
|
||||
for _, nodeID := range nodeIDs {
|
||||
node := &Node{}
|
||||
node.init(nodeID)
|
||||
store[nodeID] = node
|
||||
}
|
||||
for idx := 0; idx < nNodes; idx++ {
|
||||
if (idx % sideLength) != 0 {
|
||||
linkNodes(store[nodeIDs[idx]], store[nodeIDs[idx-1]])
|
||||
}
|
||||
if idx >= sideLength {
|
||||
linkNodes(store[nodeIDs[idx]], store[nodeIDs[idx-sideLength]])
|
||||
}
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
func loadGraph(path string) map[router.NodeID]*Node {
|
||||
f, err := os.Open(path)
|
||||
if err != nil { panic(err) }
|
||||
defer f.Close()
|
||||
store := make(map[router.NodeID]*Node)
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
nodeIDstrs := strings.Split(line, " ")
|
||||
nodeIDi0, _ := strconv.Atoi(nodeIDstrs[0])
|
||||
nodeIDi1, _ := strconv.Atoi(nodeIDstrs[1])
|
||||
nodeID0 := router.NodeID(nodeIDi0)
|
||||
nodeID1 := router.NodeID(nodeIDi1)
|
||||
if store[nodeID0] == nil {
|
||||
node := &Node{}
|
||||
node.init(nodeID0)
|
||||
store[nodeID0] = node
|
||||
}
|
||||
if store[nodeID1] == nil {
|
||||
node := &Node{}
|
||||
node.init(nodeID1)
|
||||
store[nodeID1] = node
|
||||
}
|
||||
linkNodes(store[nodeID0], store[nodeID1])
|
||||
}
|
||||
return store
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer f.Close()
|
||||
store := make(map[router.NodeID]*Node)
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
nodeIDstrs := strings.Split(line, " ")
|
||||
nodeIDi0, _ := strconv.Atoi(nodeIDstrs[0])
|
||||
nodeIDi1, _ := strconv.Atoi(nodeIDstrs[1])
|
||||
nodeID0 := router.NodeID(nodeIDi0)
|
||||
nodeID1 := router.NodeID(nodeIDi1)
|
||||
if store[nodeID0] == nil {
|
||||
node := &Node{}
|
||||
node.init(nodeID0)
|
||||
store[nodeID0] = node
|
||||
}
|
||||
if store[nodeID1] == nil {
|
||||
node := &Node{}
|
||||
node.init(nodeID1)
|
||||
store[nodeID1] = node
|
||||
}
|
||||
linkNodes(store[nodeID0], store[nodeID1])
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func idleUntilConverged(store map[router.NodeID]*Node) {
|
||||
timeOfLastChange := 0
|
||||
step := 0
|
||||
// Idle untl the network has converged
|
||||
for step - timeOfLastChange < 4*router.TIMEOUT {
|
||||
step++
|
||||
fmt.Println("Step:", step, "--", "last change:", timeOfLastChange)
|
||||
for _, node := range store {
|
||||
node.table.Tick()
|
||||
for idx, link := range node.links[1:] {
|
||||
msg := node.table.CreateMessage(router.Iface(idx))
|
||||
for idx, fromNode := range link.links {
|
||||
if fromNode == node {
|
||||
//fmt.Println("Sending from node", node.nodeID, "to", link.nodeID)
|
||||
link.table.HandleMessage(msg, router.Iface(idx))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//for _, node := range store {
|
||||
// if node.table.DEBUG_isDirty() { timeOfLastChange = step }
|
||||
//}
|
||||
//time.Sleep(10*time.Millisecond)
|
||||
}
|
||||
timeOfLastChange := 0
|
||||
step := 0
|
||||
// Idle untl the network has converged
|
||||
for step-timeOfLastChange < 4*router.TIMEOUT {
|
||||
step++
|
||||
fmt.Println("Step:", step, "--", "last change:", timeOfLastChange)
|
||||
for _, node := range store {
|
||||
node.table.Tick()
|
||||
for idx, link := range node.links[1:] {
|
||||
msg := node.table.CreateMessage(router.Iface(idx))
|
||||
for idx, fromNode := range link.links {
|
||||
if fromNode == node {
|
||||
//fmt.Println("Sending from node", node.nodeID, "to", link.nodeID)
|
||||
link.table.HandleMessage(msg, router.Iface(idx))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//for _, node := range store {
|
||||
// if node.table.DEBUG_isDirty() { timeOfLastChange = step }
|
||||
//}
|
||||
//time.Sleep(10*time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func testPaths(store map[router.NodeID]*Node) {
|
||||
nNodes := len(store)
|
||||
nodeIDs := make([]router.NodeID, 0, nNodes)
|
||||
for nodeID := range store {
|
||||
nodeIDs = append(nodeIDs, nodeID)
|
||||
}
|
||||
lookups := 0
|
||||
count := 0
|
||||
start := time.Now()
|
||||
for _, source := range store {
|
||||
count++
|
||||
fmt.Printf("Testing paths from node %d / %d (%d)\n", count, nNodes, source.nodeID)
|
||||
for _, dest := range store {
|
||||
//if source == dest { continue }
|
||||
destLoc := dest.table.GetLocator()
|
||||
temp := 0
|
||||
for here := source ; here != dest ; {
|
||||
temp++
|
||||
if temp > 16 { panic("Loop?") }
|
||||
next := here.links[here.table.Lookup(destLoc)]
|
||||
if next == here {
|
||||
//for idx, link := range here.links {
|
||||
// fmt.Println("DUMP:", idx, link.nodeID)
|
||||
//}
|
||||
panic(fmt.Sprintln("Routing Loop:",
|
||||
source.nodeID,
|
||||
here.nodeID,
|
||||
dest.nodeID))
|
||||
}
|
||||
//fmt.Println("DEBUG:", source.nodeID, here.nodeID, dest.nodeID)
|
||||
here = next
|
||||
lookups++
|
||||
}
|
||||
}
|
||||
}
|
||||
timed := time.Since(start)
|
||||
fmt.Printf("%f lookups per second\n", float64(lookups)/timed.Seconds())
|
||||
nNodes := len(store)
|
||||
nodeIDs := make([]router.NodeID, 0, nNodes)
|
||||
for nodeID := range store {
|
||||
nodeIDs = append(nodeIDs, nodeID)
|
||||
}
|
||||
lookups := 0
|
||||
count := 0
|
||||
start := time.Now()
|
||||
for _, source := range store {
|
||||
count++
|
||||
fmt.Printf("Testing paths from node %d / %d (%d)\n", count, nNodes, source.nodeID)
|
||||
for _, dest := range store {
|
||||
//if source == dest { continue }
|
||||
destLoc := dest.table.GetLocator()
|
||||
temp := 0
|
||||
for here := source; here != dest; {
|
||||
temp++
|
||||
if temp > 16 {
|
||||
panic("Loop?")
|
||||
}
|
||||
next := here.links[here.table.Lookup(destLoc)]
|
||||
if next == here {
|
||||
//for idx, link := range here.links {
|
||||
// fmt.Println("DUMP:", idx, link.nodeID)
|
||||
//}
|
||||
panic(fmt.Sprintln("Routing Loop:",
|
||||
source.nodeID,
|
||||
here.nodeID,
|
||||
dest.nodeID))
|
||||
}
|
||||
//fmt.Println("DEBUG:", source.nodeID, here.nodeID, dest.nodeID)
|
||||
here = next
|
||||
lookups++
|
||||
}
|
||||
}
|
||||
}
|
||||
timed := time.Since(start)
|
||||
fmt.Printf("%f lookups per second\n", float64(lookups)/timed.Seconds())
|
||||
}
|
||||
|
||||
func dumpStore(store map[router.NodeID]*Node) {
|
||||
for _, node := range store {
|
||||
fmt.Println("DUMPSTORE:", node.nodeID, node.table.GetLocator())
|
||||
node.table.DEBUG_dumpTable()
|
||||
}
|
||||
for _, node := range store {
|
||||
fmt.Println("DUMPSTORE:", node.nodeID, node.table.GetLocator())
|
||||
node.table.DEBUG_dumpTable()
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
@ -169,25 +173,25 @@ func dumpStore(store map[router.NodeID]*Node) {
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
fmt.Println("Test")
|
||||
store := makeStoreSquareGrid(4)
|
||||
idleUntilConverged(store)
|
||||
dumpStore(store)
|
||||
testPaths(store)
|
||||
//panic("DYING")
|
||||
store = loadGraph("hype-2016-09-19.list")
|
||||
idleUntilConverged(store)
|
||||
dumpStore(store)
|
||||
testPaths(store)
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
fmt.Println("Test")
|
||||
store := makeStoreSquareGrid(4)
|
||||
idleUntilConverged(store)
|
||||
dumpStore(store)
|
||||
testPaths(store)
|
||||
//panic("DYING")
|
||||
store = loadGraph("hype-2016-09-19.list")
|
||||
idleUntilConverged(store)
|
||||
dumpStore(store)
|
||||
testPaths(store)
|
||||
}
|
||||
|
@ -15,347 +15,366 @@ import . "yggdrasil"
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type Node struct {
|
||||
index int
|
||||
core Core
|
||||
send chan<- []byte
|
||||
recv <-chan []byte
|
||||
index int
|
||||
core Core
|
||||
send chan<- []byte
|
||||
recv <-chan []byte
|
||||
}
|
||||
|
||||
func (n *Node) init(index int) {
|
||||
n.index = index
|
||||
n.core.Init()
|
||||
n.send = n.core.DEBUG_getSend()
|
||||
n.recv = n.core.DEBUG_getRecv()
|
||||
n.index = index
|
||||
n.core.Init()
|
||||
n.send = n.core.DEBUG_getSend()
|
||||
n.recv = n.core.DEBUG_getRecv()
|
||||
}
|
||||
|
||||
func (n *Node) printTraffic() {
|
||||
for {
|
||||
packet := <-n.recv
|
||||
fmt.Println(n.index, packet)
|
||||
//panic("Got a packet")
|
||||
}
|
||||
for {
|
||||
packet := <-n.recv
|
||||
fmt.Println(n.index, packet)
|
||||
//panic("Got a packet")
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) startPeers() {
|
||||
//for _, p := range n.core.Peers.Ports {
|
||||
// go p.MainLoop()
|
||||
//}
|
||||
//go n.printTraffic()
|
||||
//n.core.Peers.DEBUG_startPeers()
|
||||
//for _, p := range n.core.Peers.Ports {
|
||||
// go p.MainLoop()
|
||||
//}
|
||||
//go n.printTraffic()
|
||||
//n.core.Peers.DEBUG_startPeers()
|
||||
}
|
||||
|
||||
func linkNodes(m, n *Node) {
|
||||
// Don't allow duplicates
|
||||
if m.core.DEBUG_getPeers().DEBUG_hasPeer(n.core.DEBUG_getSigPub()) { return }
|
||||
// Create peers
|
||||
// Buffering reduces packet loss in the sim
|
||||
// This slightly speeds up testing (fewer delays before retrying a ping)
|
||||
p := m.core.DEBUG_getPeers().DEBUG_newPeer(n.core.DEBUG_getBoxPub(),
|
||||
n.core.DEBUG_getSigPub())
|
||||
q := n.core.DEBUG_getPeers().DEBUG_newPeer(m.core.DEBUG_getBoxPub(),
|
||||
m.core.DEBUG_getSigPub())
|
||||
DEBUG_simLinkPeers(p, q)
|
||||
return
|
||||
// Don't allow duplicates
|
||||
if m.core.DEBUG_getPeers().DEBUG_hasPeer(n.core.DEBUG_getSigPub()) {
|
||||
return
|
||||
}
|
||||
// Create peers
|
||||
// Buffering reduces packet loss in the sim
|
||||
// This slightly speeds up testing (fewer delays before retrying a ping)
|
||||
p := m.core.DEBUG_getPeers().DEBUG_newPeer(n.core.DEBUG_getBoxPub(),
|
||||
n.core.DEBUG_getSigPub())
|
||||
q := n.core.DEBUG_getPeers().DEBUG_newPeer(m.core.DEBUG_getBoxPub(),
|
||||
m.core.DEBUG_getSigPub())
|
||||
DEBUG_simLinkPeers(p, q)
|
||||
return
|
||||
}
|
||||
|
||||
func makeStoreSquareGrid(sideLength int) map[int]*Node {
|
||||
store := make(map[int]*Node)
|
||||
nNodes := sideLength*sideLength
|
||||
idxs := make([]int, 0, nNodes)
|
||||
// TODO shuffle nodeIDs
|
||||
for idx := 1 ; idx <= nNodes ; idx++ {
|
||||
idxs = append(idxs, idx)
|
||||
}
|
||||
for _, idx := range idxs {
|
||||
node := &Node{}
|
||||
node.init(idx)
|
||||
store[idx] = node
|
||||
}
|
||||
for idx := 0 ; idx < nNodes ; idx++ {
|
||||
if (idx % sideLength) != 0 {
|
||||
linkNodes(store[idxs[idx]], store[idxs[idx-1]])
|
||||
}
|
||||
if idx >= sideLength {
|
||||
linkNodes(store[idxs[idx]], store[idxs[idx-sideLength]])
|
||||
}
|
||||
}
|
||||
//for _, node := range store { node.initPorts() }
|
||||
return store
|
||||
store := make(map[int]*Node)
|
||||
nNodes := sideLength * sideLength
|
||||
idxs := make([]int, 0, nNodes)
|
||||
// TODO shuffle nodeIDs
|
||||
for idx := 1; idx <= nNodes; idx++ {
|
||||
idxs = append(idxs, idx)
|
||||
}
|
||||
for _, idx := range idxs {
|
||||
node := &Node{}
|
||||
node.init(idx)
|
||||
store[idx] = node
|
||||
}
|
||||
for idx := 0; idx < nNodes; idx++ {
|
||||
if (idx % sideLength) != 0 {
|
||||
linkNodes(store[idxs[idx]], store[idxs[idx-1]])
|
||||
}
|
||||
if idx >= sideLength {
|
||||
linkNodes(store[idxs[idx]], store[idxs[idx-sideLength]])
|
||||
}
|
||||
}
|
||||
//for _, node := range store { node.initPorts() }
|
||||
return store
|
||||
}
|
||||
|
||||
func makeStoreStar(nNodes int) map[int]*Node {
|
||||
store := make(map[int]*Node)
|
||||
center := &Node{}
|
||||
center.init(0)
|
||||
store[0] = center
|
||||
for idx := 1 ; idx < nNodes ; idx++ {
|
||||
node := &Node{}
|
||||
node.init(idx)
|
||||
store[idx] = node
|
||||
linkNodes(center, node)
|
||||
}
|
||||
return store
|
||||
store := make(map[int]*Node)
|
||||
center := &Node{}
|
||||
center.init(0)
|
||||
store[0] = center
|
||||
for idx := 1; idx < nNodes; idx++ {
|
||||
node := &Node{}
|
||||
node.init(idx)
|
||||
store[idx] = node
|
||||
linkNodes(center, node)
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
func loadGraph(path string) map[int]*Node {
|
||||
f, err := os.Open(path)
|
||||
if err != nil { panic(err) }
|
||||
defer f.Close()
|
||||
store := make(map[int]*Node)
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
nodeIdxstrs := strings.Split(line, " ")
|
||||
nodeIdx0, _ := strconv.Atoi(nodeIdxstrs[0])
|
||||
nodeIdx1, _ := strconv.Atoi(nodeIdxstrs[1])
|
||||
if store[nodeIdx0] == nil {
|
||||
node := &Node{}
|
||||
node.init(nodeIdx0)
|
||||
store[nodeIdx0] = node
|
||||
}
|
||||
if store[nodeIdx1] == nil {
|
||||
node := &Node{}
|
||||
node.init(nodeIdx1)
|
||||
store[nodeIdx1] = node
|
||||
}
|
||||
linkNodes(store[nodeIdx0], store[nodeIdx1])
|
||||
}
|
||||
//for _, node := range store { node.initPorts() }
|
||||
return store
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer f.Close()
|
||||
store := make(map[int]*Node)
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
nodeIdxstrs := strings.Split(line, " ")
|
||||
nodeIdx0, _ := strconv.Atoi(nodeIdxstrs[0])
|
||||
nodeIdx1, _ := strconv.Atoi(nodeIdxstrs[1])
|
||||
if store[nodeIdx0] == nil {
|
||||
node := &Node{}
|
||||
node.init(nodeIdx0)
|
||||
store[nodeIdx0] = node
|
||||
}
|
||||
if store[nodeIdx1] == nil {
|
||||
node := &Node{}
|
||||
node.init(nodeIdx1)
|
||||
store[nodeIdx1] = node
|
||||
}
|
||||
linkNodes(store[nodeIdx0], store[nodeIdx1])
|
||||
}
|
||||
//for _, node := range store { node.initPorts() }
|
||||
return store
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func startNetwork(store map[[32]byte]*Node) {
|
||||
for _, node := range store {
|
||||
node.startPeers()
|
||||
}
|
||||
for _, node := range store {
|
||||
node.startPeers()
|
||||
}
|
||||
}
|
||||
|
||||
func getKeyedStore(store map[int]*Node) map[[32]byte]*Node {
|
||||
newStore := make(map[[32]byte]*Node)
|
||||
for _, node := range store {
|
||||
newStore[node.core.DEBUG_getSigPub()] = node
|
||||
}
|
||||
return newStore
|
||||
newStore := make(map[[32]byte]*Node)
|
||||
for _, node := range store {
|
||||
newStore[node.core.DEBUG_getSigPub()] = node
|
||||
}
|
||||
return newStore
|
||||
}
|
||||
|
||||
func testPaths(store map[[32]byte]*Node) bool {
|
||||
nNodes := len(store)
|
||||
count := 0
|
||||
for _, source := range store {
|
||||
count++
|
||||
fmt.Printf("Testing paths from node %d / %d (%d)\n", count, nNodes, source.index)
|
||||
for _, dest := range store {
|
||||
//if source == dest { continue }
|
||||
destLoc := dest.core.DEBUG_getLocator()
|
||||
coords := destLoc.DEBUG_getCoords()
|
||||
temp := 0
|
||||
ttl := ^uint64(0)
|
||||
oldTTL := ttl
|
||||
for here := source ; here != dest ; {
|
||||
if ttl == 0 {
|
||||
fmt.Println("Drop:", source.index, here.index, dest.index, oldTTL)
|
||||
return false
|
||||
}
|
||||
temp++
|
||||
if temp > 4096 { panic("Loop?") }
|
||||
oldTTL = ttl
|
||||
nextPort, newTTL := here.core.DEBUG_switchLookup(coords, ttl)
|
||||
ttl = newTTL
|
||||
// First check if "here" is accepting packets from the previous node
|
||||
// TODO explain how this works
|
||||
ports := here.core.DEBUG_getPeers().DEBUG_getPorts()
|
||||
nextPeer := ports[nextPort]
|
||||
if nextPeer == nil {
|
||||
fmt.Println("Peer associated with next port is nil")
|
||||
return false
|
||||
}
|
||||
next := store[nextPeer.DEBUG_getSigKey()]
|
||||
/*
|
||||
if next == here {
|
||||
//for idx, link := range here.links {
|
||||
// fmt.Println("DUMP:", idx, link.nodeID)
|
||||
//}
|
||||
if nextPort != 0 { panic("This should not be") }
|
||||
fmt.Println("Failed to route:", source.index, here.index, dest.index, oldTTL, ttl)
|
||||
//here.table.DEBUG_dumpTable()
|
||||
//fmt.Println("Ports:", here.nodeID, here.ports)
|
||||
return false
|
||||
panic(fmt.Sprintln("Routing Loop:",
|
||||
source.index,
|
||||
here.index,
|
||||
dest.index))
|
||||
}
|
||||
*/
|
||||
if temp > 4090 {
|
||||
fmt.Println("DEBUG:",
|
||||
source.index, source.core.DEBUG_getLocator(),
|
||||
here.index, here.core.DEBUG_getLocator(),
|
||||
dest.index, dest.core.DEBUG_getLocator())
|
||||
here.core.DEBUG_getSwitchTable().DEBUG_dumpTable()
|
||||
}
|
||||
if (here != source) {
|
||||
// This is sufficient to check for routing loops or blackholes
|
||||
//break
|
||||
}
|
||||
here = next
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
nNodes := len(store)
|
||||
count := 0
|
||||
for _, source := range store {
|
||||
count++
|
||||
fmt.Printf("Testing paths from node %d / %d (%d)\n", count, nNodes, source.index)
|
||||
for _, dest := range store {
|
||||
//if source == dest { continue }
|
||||
destLoc := dest.core.DEBUG_getLocator()
|
||||
coords := destLoc.DEBUG_getCoords()
|
||||
temp := 0
|
||||
ttl := ^uint64(0)
|
||||
oldTTL := ttl
|
||||
for here := source; here != dest; {
|
||||
if ttl == 0 {
|
||||
fmt.Println("Drop:", source.index, here.index, dest.index, oldTTL)
|
||||
return false
|
||||
}
|
||||
temp++
|
||||
if temp > 4096 {
|
||||
panic("Loop?")
|
||||
}
|
||||
oldTTL = ttl
|
||||
nextPort, newTTL := here.core.DEBUG_switchLookup(coords, ttl)
|
||||
ttl = newTTL
|
||||
// First check if "here" is accepting packets from the previous node
|
||||
// TODO explain how this works
|
||||
ports := here.core.DEBUG_getPeers().DEBUG_getPorts()
|
||||
nextPeer := ports[nextPort]
|
||||
if nextPeer == nil {
|
||||
fmt.Println("Peer associated with next port is nil")
|
||||
return false
|
||||
}
|
||||
next := store[nextPeer.DEBUG_getSigKey()]
|
||||
/*
|
||||
if next == here {
|
||||
//for idx, link := range here.links {
|
||||
// fmt.Println("DUMP:", idx, link.nodeID)
|
||||
//}
|
||||
if nextPort != 0 { panic("This should not be") }
|
||||
fmt.Println("Failed to route:", source.index, here.index, dest.index, oldTTL, ttl)
|
||||
//here.table.DEBUG_dumpTable()
|
||||
//fmt.Println("Ports:", here.nodeID, here.ports)
|
||||
return false
|
||||
panic(fmt.Sprintln("Routing Loop:",
|
||||
source.index,
|
||||
here.index,
|
||||
dest.index))
|
||||
}
|
||||
*/
|
||||
if temp > 4090 {
|
||||
fmt.Println("DEBUG:",
|
||||
source.index, source.core.DEBUG_getLocator(),
|
||||
here.index, here.core.DEBUG_getLocator(),
|
||||
dest.index, dest.core.DEBUG_getLocator())
|
||||
here.core.DEBUG_getSwitchTable().DEBUG_dumpTable()
|
||||
}
|
||||
if here != source {
|
||||
// This is sufficient to check for routing loops or blackholes
|
||||
//break
|
||||
}
|
||||
here = next
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func stressTest(store map[[32]byte]*Node) {
|
||||
fmt.Println("Stress testing network...")
|
||||
nNodes := len(store)
|
||||
dests := make([][]byte, 0, nNodes)
|
||||
for _, dest := range store {
|
||||
loc := dest.core.DEBUG_getLocator()
|
||||
coords := loc.DEBUG_getCoords()
|
||||
dests = append(dests, coords)
|
||||
}
|
||||
lookups := 0
|
||||
start := time.Now()
|
||||
for _, source := range store {
|
||||
for _, coords := range dests {
|
||||
source.core.DEBUG_switchLookup(coords, ^uint64(0))
|
||||
lookups++
|
||||
}
|
||||
}
|
||||
timed := time.Since(start)
|
||||
fmt.Printf("%d lookups in %s (%f lookups per second)\n",
|
||||
lookups,
|
||||
timed,
|
||||
float64(lookups)/timed.Seconds())
|
||||
fmt.Println("Stress testing network...")
|
||||
nNodes := len(store)
|
||||
dests := make([][]byte, 0, nNodes)
|
||||
for _, dest := range store {
|
||||
loc := dest.core.DEBUG_getLocator()
|
||||
coords := loc.DEBUG_getCoords()
|
||||
dests = append(dests, coords)
|
||||
}
|
||||
lookups := 0
|
||||
start := time.Now()
|
||||
for _, source := range store {
|
||||
for _, coords := range dests {
|
||||
source.core.DEBUG_switchLookup(coords, ^uint64(0))
|
||||
lookups++
|
||||
}
|
||||
}
|
||||
timed := time.Since(start)
|
||||
fmt.Printf("%d lookups in %s (%f lookups per second)\n",
|
||||
lookups,
|
||||
timed,
|
||||
float64(lookups)/timed.Seconds())
|
||||
}
|
||||
|
||||
func pingNodes(store map[[32]byte]*Node) {
|
||||
fmt.Println("Sending pings...")
|
||||
nNodes := len(store)
|
||||
count := 0
|
||||
equiv := func (a []byte, b []byte) bool {
|
||||
if len(a) != len(b) { return false }
|
||||
for idx := 0 ; idx < len(a) ; idx++ {
|
||||
if a[idx] != b[idx] { return false }
|
||||
}
|
||||
return true
|
||||
}
|
||||
for _, source := range store {
|
||||
count++
|
||||
//if count > 16 { break }
|
||||
fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index)
|
||||
sourceKey := source.core.DEBUG_getBoxPub()
|
||||
payload := sourceKey[:]
|
||||
sourceAddr := source.core.DEBUG_getAddr()[:]
|
||||
sendTo := func (bs []byte, destAddr []byte) {
|
||||
packet := make([]byte, 40+len(bs))
|
||||
copy(packet[8:24], sourceAddr)
|
||||
copy(packet[24:40], destAddr)
|
||||
copy(packet[40:], bs)
|
||||
source.send<-packet
|
||||
}
|
||||
destCount := 0
|
||||
for _, dest := range store {
|
||||
destCount += 1
|
||||
fmt.Printf("%d Nodes, %d Send, %d Recv\n", nNodes, count, destCount)
|
||||
if dest == source {
|
||||
fmt.Println("Skipping self")
|
||||
continue
|
||||
}
|
||||
destAddr := dest.core.DEBUG_getAddr()[:]
|
||||
ticker := time.NewTicker(150*time.Millisecond)
|
||||
ch := make(chan bool, 1)
|
||||
ch<-true
|
||||
doTicker := func () {
|
||||
for _ = range ticker.C {
|
||||
select {
|
||||
case ch<-true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
go doTicker()
|
||||
for loop := true ; loop ; {
|
||||
select {
|
||||
case packet := <-dest.recv: {
|
||||
if equiv(payload, packet[len(packet)-len(payload):]) {
|
||||
loop = false
|
||||
}
|
||||
}
|
||||
case <-ch: sendTo(payload, destAddr)
|
||||
}
|
||||
}
|
||||
ticker.Stop()
|
||||
}
|
||||
//break // Only try sending pings from 1 node
|
||||
// This is because, for some reason, stopTun() doesn't always close it
|
||||
// And if two tuns are up, bad things happen (sends via wrong interface)
|
||||
}
|
||||
fmt.Println("Finished pinging nodes")
|
||||
fmt.Println("Sending pings...")
|
||||
nNodes := len(store)
|
||||
count := 0
|
||||
equiv := func(a []byte, b []byte) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for idx := 0; idx < len(a); idx++ {
|
||||
if a[idx] != b[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
for _, source := range store {
|
||||
count++
|
||||
//if count > 16 { break }
|
||||
fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index)
|
||||
sourceKey := source.core.DEBUG_getBoxPub()
|
||||
payload := sourceKey[:]
|
||||
sourceAddr := source.core.DEBUG_getAddr()[:]
|
||||
sendTo := func(bs []byte, destAddr []byte) {
|
||||
packet := make([]byte, 40+len(bs))
|
||||
copy(packet[8:24], sourceAddr)
|
||||
copy(packet[24:40], destAddr)
|
||||
copy(packet[40:], bs)
|
||||
source.send <- packet
|
||||
}
|
||||
destCount := 0
|
||||
for _, dest := range store {
|
||||
destCount += 1
|
||||
fmt.Printf("%d Nodes, %d Send, %d Recv\n", nNodes, count, destCount)
|
||||
if dest == source {
|
||||
fmt.Println("Skipping self")
|
||||
continue
|
||||
}
|
||||
destAddr := dest.core.DEBUG_getAddr()[:]
|
||||
ticker := time.NewTicker(150 * time.Millisecond)
|
||||
ch := make(chan bool, 1)
|
||||
ch <- true
|
||||
doTicker := func() {
|
||||
for range ticker.C {
|
||||
select {
|
||||
case ch <- true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
go doTicker()
|
||||
for loop := true; loop; {
|
||||
select {
|
||||
case packet := <-dest.recv:
|
||||
{
|
||||
if equiv(payload, packet[len(packet)-len(payload):]) {
|
||||
loop = false
|
||||
}
|
||||
}
|
||||
case <-ch:
|
||||
sendTo(payload, destAddr)
|
||||
}
|
||||
}
|
||||
ticker.Stop()
|
||||
}
|
||||
//break // Only try sending pings from 1 node
|
||||
// This is because, for some reason, stopTun() doesn't always close it
|
||||
// And if two tuns are up, bad things happen (sends via wrong interface)
|
||||
}
|
||||
fmt.Println("Finished pinging nodes")
|
||||
}
|
||||
|
||||
func pingBench(store map[[32]byte]*Node) {
|
||||
fmt.Println("Benchmarking pings...")
|
||||
nPings := 0
|
||||
payload := make([]byte, 1280+40) // MTU + ipv6 header
|
||||
var timed time.Duration
|
||||
//nNodes := len(store)
|
||||
count := 0
|
||||
for _, source := range store {
|
||||
count++
|
||||
//fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index)
|
||||
getPing := func (key [32]byte, decodedCoords []byte) []byte {
|
||||
// TODO write some function to do this the right way, put... somewhere...
|
||||
coords := DEBUG_wire_encode_coords(decodedCoords)
|
||||
packet := make([]byte, 0, len(key)+len(coords)+len(payload))
|
||||
packet = append(packet, key[:]...)
|
||||
packet = append(packet, coords...)
|
||||
packet = append(packet, payload[:]...)
|
||||
return packet
|
||||
}
|
||||
for _, dest := range store {
|
||||
key := dest.core.DEBUG_getBoxPub()
|
||||
loc := dest.core.DEBUG_getLocator()
|
||||
coords := loc.DEBUG_getCoords()
|
||||
ping := getPing(key, coords)
|
||||
// TODO make sure the session is open first
|
||||
start := time.Now()
|
||||
for i := 0 ; i < 1000000 ; i++{ source.send<-ping ; nPings++ }
|
||||
timed += time.Since(start)
|
||||
break
|
||||
}
|
||||
break
|
||||
}
|
||||
fmt.Printf("Sent %d pings in %s (%f per second)\n",
|
||||
nPings,
|
||||
timed,
|
||||
float64(nPings)/timed.Seconds())
|
||||
fmt.Println("Benchmarking pings...")
|
||||
nPings := 0
|
||||
payload := make([]byte, 1280+40) // MTU + ipv6 header
|
||||
var timed time.Duration
|
||||
//nNodes := len(store)
|
||||
count := 0
|
||||
for _, source := range store {
|
||||
count++
|
||||
//fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index)
|
||||
getPing := func(key [32]byte, decodedCoords []byte) []byte {
|
||||
// TODO write some function to do this the right way, put... somewhere...
|
||||
coords := DEBUG_wire_encode_coords(decodedCoords)
|
||||
packet := make([]byte, 0, len(key)+len(coords)+len(payload))
|
||||
packet = append(packet, key[:]...)
|
||||
packet = append(packet, coords...)
|
||||
packet = append(packet, payload[:]...)
|
||||
return packet
|
||||
}
|
||||
for _, dest := range store {
|
||||
key := dest.core.DEBUG_getBoxPub()
|
||||
loc := dest.core.DEBUG_getLocator()
|
||||
coords := loc.DEBUG_getCoords()
|
||||
ping := getPing(key, coords)
|
||||
// TODO make sure the session is open first
|
||||
start := time.Now()
|
||||
for i := 0; i < 1000000; i++ {
|
||||
source.send <- ping
|
||||
nPings++
|
||||
}
|
||||
timed += time.Since(start)
|
||||
break
|
||||
}
|
||||
break
|
||||
}
|
||||
fmt.Printf("Sent %d pings in %s (%f per second)\n",
|
||||
nPings,
|
||||
timed,
|
||||
float64(nPings)/timed.Seconds())
|
||||
}
|
||||
|
||||
func dumpStore(store map[NodeID]*Node) {
|
||||
for _, node := range store {
|
||||
fmt.Println("DUMPSTORE:", node.index, node.core.DEBUG_getLocator())
|
||||
node.core.DEBUG_getSwitchTable().DEBUG_dumpTable()
|
||||
}
|
||||
for _, node := range store {
|
||||
fmt.Println("DUMPSTORE:", node.index, node.core.DEBUG_getLocator())
|
||||
node.core.DEBUG_getSwitchTable().DEBUG_dumpTable()
|
||||
}
|
||||
}
|
||||
|
||||
func dumpDHTSize(store map[[32]byte]*Node) {
|
||||
var min, max, sum int
|
||||
for _, node := range store {
|
||||
num := node.core.DEBUG_getDHTSize()
|
||||
min = num
|
||||
max = num
|
||||
break
|
||||
}
|
||||
for _, node := range store {
|
||||
num := node.core.DEBUG_getDHTSize()
|
||||
if num < min { min = num }
|
||||
if num > max { max = num }
|
||||
sum += num
|
||||
}
|
||||
avg := float64(sum)/float64(len(store))
|
||||
fmt.Printf("DHT min %d / avg %f / max %d\n", min, avg, max)
|
||||
var min, max, sum int
|
||||
for _, node := range store {
|
||||
num := node.core.DEBUG_getDHTSize()
|
||||
min = num
|
||||
max = num
|
||||
break
|
||||
}
|
||||
for _, node := range store {
|
||||
num := node.core.DEBUG_getDHTSize()
|
||||
if num < min {
|
||||
min = num
|
||||
}
|
||||
if num > max {
|
||||
max = num
|
||||
}
|
||||
sum += num
|
||||
}
|
||||
avg := float64(sum) / float64(len(store))
|
||||
fmt.Printf("DHT min %d / avg %f / max %d\n", min, avg, max)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
@ -364,47 +383,48 @@ var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
fmt.Println("Test")
|
||||
Util_testAddrIDMask()
|
||||
idxstore := makeStoreSquareGrid(4)
|
||||
//idxstore := makeStoreStar(256)
|
||||
//idxstore := loadGraph("misc/sim/hype-2016-09-19.list")
|
||||
//idxstore := loadGraph("misc/sim/fc00-2017-08-12.txt")
|
||||
//idxstore := loadGraph("skitter")
|
||||
kstore := getKeyedStore(idxstore)
|
||||
/*
|
||||
for _, n := range kstore {
|
||||
log := n.core.DEBUG_getLogger()
|
||||
log.SetOutput(os.Stderr)
|
||||
}
|
||||
*/
|
||||
startNetwork(kstore)
|
||||
//time.Sleep(10*time.Second)
|
||||
// Note that testPaths only works if pressure is turend off
|
||||
// Otherwise congestion can lead to routing loops?
|
||||
for finished := false; !finished ; { finished = testPaths(kstore) }
|
||||
pingNodes(kstore)
|
||||
//pingBench(kstore) // Only after disabling debug output
|
||||
//stressTest(kstore)
|
||||
//time.Sleep(120*time.Second)
|
||||
dumpDHTSize(kstore) // note that this uses racey functions to read things...
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
||||
}
|
||||
fmt.Println("Test")
|
||||
Util_testAddrIDMask()
|
||||
idxstore := makeStoreSquareGrid(4)
|
||||
//idxstore := makeStoreStar(256)
|
||||
//idxstore := loadGraph("misc/sim/hype-2016-09-19.list")
|
||||
//idxstore := loadGraph("misc/sim/fc00-2017-08-12.txt")
|
||||
//idxstore := loadGraph("skitter")
|
||||
kstore := getKeyedStore(idxstore)
|
||||
/*
|
||||
for _, n := range kstore {
|
||||
log := n.core.DEBUG_getLogger()
|
||||
log.SetOutput(os.Stderr)
|
||||
}
|
||||
*/
|
||||
startNetwork(kstore)
|
||||
//time.Sleep(10*time.Second)
|
||||
// Note that testPaths only works if pressure is turend off
|
||||
// Otherwise congestion can lead to routing loops?
|
||||
for finished := false; !finished; {
|
||||
finished = testPaths(kstore)
|
||||
}
|
||||
pingNodes(kstore)
|
||||
//pingBench(kstore) // Only after disabling debug output
|
||||
//stressTest(kstore)
|
||||
//time.Sleep(120*time.Second)
|
||||
dumpDHTSize(kstore) // note that this uses racey functions to read things...
|
||||
}
|
||||
|
||||
|
@ -7,16 +7,16 @@ import "runtime"
|
||||
|
||||
func main() {
|
||||
|
||||
var ops uint64 = 0
|
||||
for i := 0 ; i < 4 ; i++ {
|
||||
go func () {
|
||||
for {
|
||||
atomic.AddUint64(&ops, 1)
|
||||
runtime.Gosched()
|
||||
}
|
||||
}()
|
||||
}
|
||||
time.Sleep(1*time.Second)
|
||||
opsFinal := atomic.LoadUint64(&ops)
|
||||
fmt.Println("ops:", opsFinal)
|
||||
var ops uint64 = 0
|
||||
for i := 0; i < 4; i++ {
|
||||
go func() {
|
||||
for {
|
||||
atomic.AddUint64(&ops, 1)
|
||||
runtime.Gosched()
|
||||
}
|
||||
}()
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
opsFinal := atomic.LoadUint64(&ops)
|
||||
fmt.Println("ops:", opsFinal)
|
||||
}
|
||||
|
@ -4,39 +4,50 @@ import "fmt"
|
||||
import "net"
|
||||
import "time"
|
||||
|
||||
func main () {
|
||||
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
|
||||
if err != nil { panic(err) }
|
||||
listener, err := net.ListenTCP("tcp", addr)
|
||||
if err != nil { panic(err) }
|
||||
defer listener.Close()
|
||||
func main() {
|
||||
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
listener, err := net.ListenTCP("tcp", addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
packetSize := 65535
|
||||
numPackets := 65535
|
||||
packetSize := 65535
|
||||
numPackets := 65535
|
||||
|
||||
go func () {
|
||||
send, err := net.DialTCP("tcp", nil, addr)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
msg := make([]byte, packetSize)
|
||||
for idx := 0 ; idx < numPackets ; idx++ { send.Write(msg) }
|
||||
}()
|
||||
go func() {
|
||||
send, err := net.DialTCP("tcp", nil, addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer send.Close()
|
||||
msg := make([]byte, packetSize)
|
||||
for idx := 0; idx < numPackets; idx++ {
|
||||
send.Write(msg)
|
||||
}
|
||||
}()
|
||||
|
||||
start := time.Now()
|
||||
//msg := make([]byte, 1280)
|
||||
sock, err := listener.AcceptTCP()
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
read := 0
|
||||
buf := make([]byte, packetSize)
|
||||
for {
|
||||
n, err := sock.Read(buf)
|
||||
read += n
|
||||
if err != nil { break }
|
||||
}
|
||||
timed := time.Since(start)
|
||||
start := time.Now()
|
||||
//msg := make([]byte, 1280)
|
||||
sock, err := listener.AcceptTCP()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer sock.Close()
|
||||
read := 0
|
||||
buf := make([]byte, packetSize)
|
||||
for {
|
||||
n, err := sock.Read(buf)
|
||||
read += n
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
fmt.Printf("%f bits/sec\n", 8*float64(read)/timed.Seconds())
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
fmt.Printf("%f bits/sec\n", 8*float64(read)/timed.Seconds())
|
||||
}
|
||||
|
||||
|
@ -5,32 +5,32 @@ import "fmt"
|
||||
import "sync"
|
||||
|
||||
func main() {
|
||||
fmt.Println("Testing speed of recv+send loop")
|
||||
const count = 10000000
|
||||
c := make(chan []byte, 1)
|
||||
c<-[]byte{}
|
||||
var wg sync.WaitGroup
|
||||
worker := func () {
|
||||
for idx := 0 ; idx < count ; idx++ {
|
||||
p := <-c
|
||||
select {
|
||||
case c<-p:
|
||||
default:
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
nIter := 0
|
||||
start := time.Now()
|
||||
for idx := 0 ; idx < 1 ; idx++ {
|
||||
go worker()
|
||||
nIter += count
|
||||
wg.Add(1)
|
||||
}
|
||||
wg.Wait()
|
||||
stop := time.Now()
|
||||
timed := stop.Sub(start)
|
||||
fmt.Printf("%d iterations in %s\n", nIter, timed)
|
||||
fmt.Printf("%f iterations per second\n", float64(nIter)/timed.Seconds())
|
||||
fmt.Printf("%s per iteration\n", timed/time.Duration(nIter))
|
||||
fmt.Println("Testing speed of recv+send loop")
|
||||
const count = 10000000
|
||||
c := make(chan []byte, 1)
|
||||
c <- []byte{}
|
||||
var wg sync.WaitGroup
|
||||
worker := func() {
|
||||
for idx := 0; idx < count; idx++ {
|
||||
p := <-c
|
||||
select {
|
||||
case c <- p:
|
||||
default:
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
nIter := 0
|
||||
start := time.Now()
|
||||
for idx := 0; idx < 1; idx++ {
|
||||
go worker()
|
||||
nIter += count
|
||||
wg.Add(1)
|
||||
}
|
||||
wg.Wait()
|
||||
stop := time.Now()
|
||||
timed := stop.Sub(start)
|
||||
fmt.Printf("%d iterations in %s\n", nIter, timed)
|
||||
fmt.Printf("%f iterations per second\n", float64(nIter)/timed.Seconds())
|
||||
fmt.Printf("%s per iteration\n", timed/time.Duration(nIter))
|
||||
}
|
||||
|
@ -6,47 +6,51 @@ import "time"
|
||||
import "fmt"
|
||||
|
||||
type testStruct struct {
|
||||
First uint64
|
||||
Second float64
|
||||
Third []byte
|
||||
First uint64
|
||||
Second float64
|
||||
Third []byte
|
||||
}
|
||||
|
||||
|
||||
func testFunc(tickerDuration time.Duration) {
|
||||
chn := make(chan []byte)
|
||||
ticker := time.NewTicker(tickerDuration)
|
||||
defer ticker.Stop()
|
||||
send := testStruct{First: 1, Second: 2, Third: []byte{3, 4, 5}}
|
||||
buf := bytes.NewBuffer(nil)
|
||||
enc := gob.NewEncoder(buf)
|
||||
dec := gob.NewDecoder(buf)
|
||||
sendCall := func () {
|
||||
err := enc.EncodeValue(&send)
|
||||
if err != nil { panic(err) }
|
||||
bs := make([]byte, buf.Len())
|
||||
buf.Read(bs)
|
||||
fmt.Println("send:", bs)
|
||||
go func() { chn<-bs }()
|
||||
}
|
||||
recvCall := func (bs []byte) {
|
||||
buf.Write(bs)
|
||||
recv := testStruct{}
|
||||
err := dec.DecodeValue(&recv)
|
||||
fmt.Println("recv:", bs)
|
||||
if err != nil { panic(err) }
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case bs := <-chn : recvCall(bs)
|
||||
case <-ticker.C : sendCall()
|
||||
}
|
||||
}
|
||||
chn := make(chan []byte)
|
||||
ticker := time.NewTicker(tickerDuration)
|
||||
defer ticker.Stop()
|
||||
send := testStruct{First: 1, Second: 2, Third: []byte{3, 4, 5}}
|
||||
buf := bytes.NewBuffer(nil)
|
||||
enc := gob.NewEncoder(buf)
|
||||
dec := gob.NewDecoder(buf)
|
||||
sendCall := func() {
|
||||
err := enc.EncodeValue(&send)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bs := make([]byte, buf.Len())
|
||||
buf.Read(bs)
|
||||
fmt.Println("send:", bs)
|
||||
go func() { chn <- bs }()
|
||||
}
|
||||
recvCall := func(bs []byte) {
|
||||
buf.Write(bs)
|
||||
recv := testStruct{}
|
||||
err := dec.DecodeValue(&recv)
|
||||
fmt.Println("recv:", bs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case bs := <-chn:
|
||||
recvCall(bs)
|
||||
case <-ticker.C:
|
||||
sendCall()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
go testFunc(100*time.Millisecond) // Does not crash
|
||||
time.Sleep(time.Second)
|
||||
go testFunc(time.Nanosecond) // Does crash
|
||||
time.Sleep(time.Second)
|
||||
go testFunc(100 * time.Millisecond) // Does not crash
|
||||
time.Sleep(time.Second)
|
||||
go testFunc(time.Nanosecond) // Does crash
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
|
@ -4,19 +4,19 @@ import "sync"
|
||||
import "time"
|
||||
import "fmt"
|
||||
|
||||
func main () {
|
||||
const reqs = 1000000
|
||||
var wg sync.WaitGroup
|
||||
start := time.Now()
|
||||
for idx := 0 ; idx < reqs ; idx++ {
|
||||
wg.Add(1)
|
||||
go func () { wg.Done() } ()
|
||||
}
|
||||
wg.Wait()
|
||||
stop := time.Now()
|
||||
timed := stop.Sub(start)
|
||||
fmt.Printf("%d goroutines in %s (%f per second)\n",
|
||||
reqs,
|
||||
timed,
|
||||
reqs/timed.Seconds())
|
||||
func main() {
|
||||
const reqs = 1000000
|
||||
var wg sync.WaitGroup
|
||||
start := time.Now()
|
||||
for idx := 0; idx < reqs; idx++ {
|
||||
wg.Add(1)
|
||||
go func() { wg.Done() }()
|
||||
}
|
||||
wg.Wait()
|
||||
stop := time.Now()
|
||||
timed := stop.Sub(start)
|
||||
fmt.Printf("%d goroutines in %s (%f per second)\n",
|
||||
reqs,
|
||||
timed,
|
||||
reqs/timed.Seconds())
|
||||
}
|
||||
|
@ -8,42 +8,50 @@ import "time"
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
//addr, err := net.ResolveUDPAddr("udp", "[ff02::1%veth0]:9001")
|
||||
addr, err := net.ResolveUDPAddr("udp", "[ff02::1]:9001")
|
||||
if err != nil { panic(err) }
|
||||
sock, err := net.ListenMulticastUDP("udp", nil, addr)
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
//addr, err := net.ResolveUDPAddr("udp", "[ff02::1%veth0]:9001")
|
||||
addr, err := net.ResolveUDPAddr("udp", "[ff02::1]:9001")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sock, err := net.ListenMulticastUDP("udp", nil, addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer sock.Close()
|
||||
|
||||
go func () {
|
||||
saddr, err := net.ResolveUDPAddr("udp", "[::]:0")
|
||||
if err != nil { panic(err) }
|
||||
send, err := net.ListenUDP("udp", saddr)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
//fmt.Println("Sending...")
|
||||
send.WriteTo(msg, addr)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
saddr, err := net.ResolveUDPAddr("udp", "[::]:0")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
send, err := net.ListenUDP("udp", saddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
//fmt.Println("Sending...")
|
||||
send.WriteTo(msg, addr)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 1000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
//fmt.Println("Reading:", i)
|
||||
sock.ReadFromUDP(msg)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
numPackets := 1000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0; i < numPackets; i++ {
|
||||
//fmt.Println("Reading:", i)
|
||||
sock.ReadFromUDP(msg)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
|
||||
}
|
||||
|
||||
func main () {
|
||||
func main() {
|
||||
|
||||
basic_test()
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
@ -4,75 +4,89 @@ import "fmt"
|
||||
import "net"
|
||||
import "time"
|
||||
|
||||
|
||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
var ip *net.IP
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil { panic(err) }
|
||||
var zone string
|
||||
for _, iface := range ifaces {
|
||||
addrs, err := iface.Addrs()
|
||||
if err != nil { panic(err) }
|
||||
for _, addr := range addrs {
|
||||
addrIP, _, _ := net.ParseCIDR(addr.String())
|
||||
if addrIP.To4() != nil { continue } // IPv6 only
|
||||
if !addrIP.IsLinkLocalUnicast() { continue }
|
||||
zone = iface.Name
|
||||
ip = &addrIP
|
||||
}
|
||||
addrs, err = iface.MulticastAddrs()
|
||||
if err != nil { panic(err) }
|
||||
for _, addr := range addrs {
|
||||
fmt.Println(addr.String())
|
||||
}
|
||||
}
|
||||
if ip == nil { panic("No link-local IPv6 found") }
|
||||
fmt.Println("Using address:", *ip)
|
||||
var ip *net.IP
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var zone string
|
||||
for _, iface := range ifaces {
|
||||
addrs, err := iface.Addrs()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
addrIP, _, _ := net.ParseCIDR(addr.String())
|
||||
if addrIP.To4() != nil {
|
||||
continue
|
||||
} // IPv6 only
|
||||
if !addrIP.IsLinkLocalUnicast() {
|
||||
continue
|
||||
}
|
||||
zone = iface.Name
|
||||
ip = &addrIP
|
||||
}
|
||||
addrs, err = iface.MulticastAddrs()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
fmt.Println(addr.String())
|
||||
}
|
||||
}
|
||||
if ip == nil {
|
||||
panic("No link-local IPv6 found")
|
||||
}
|
||||
fmt.Println("Using address:", *ip)
|
||||
|
||||
addr := net.UDPAddr{IP: *ip, Port: 9001, Zone: zone}
|
||||
addr := net.UDPAddr{IP: *ip, Port: 9001, Zone: zone}
|
||||
|
||||
saddr := net.UDPAddr{IP: *ip, Port: 9002, Zone: zone}
|
||||
send, err := net.ListenUDP("udp", &saddr)
|
||||
defer send.Close()
|
||||
if err != nil { panic(err) }
|
||||
sock, err := net.ListenUDP("udp", &addr)
|
||||
defer sock.Close()
|
||||
if err != nil { panic(err) }
|
||||
saddr := net.UDPAddr{IP: *ip, Port: 9002, Zone: zone}
|
||||
send, err := net.ListenUDP("udp", &saddr)
|
||||
defer send.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sock, err := net.ListenUDP("udp", &addr)
|
||||
defer sock.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
const buffSize = 1048576*100
|
||||
const buffSize = 1048576 * 100
|
||||
|
||||
send.SetWriteBuffer(buffSize)
|
||||
sock.SetReadBuffer(buffSize)
|
||||
sock.SetWriteBuffer(buffSize)
|
||||
send.SetWriteBuffer(buffSize)
|
||||
sock.SetReadBuffer(buffSize)
|
||||
sock.SetWriteBuffer(buffSize)
|
||||
|
||||
go func() {
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
send.WriteTo(msg, &addr)
|
||||
}
|
||||
}()
|
||||
|
||||
go func () {
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
send.WriteTo(msg, &addr)
|
||||
}
|
||||
}()
|
||||
numPackets := 100000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0; i < numPackets; i++ {
|
||||
_, addr, _ := sock.ReadFrom(msg)
|
||||
sock.WriteTo(msg, addr)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
numPackets := 100000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
_, addr, _ := sock.ReadFrom(msg)
|
||||
sock.WriteTo(msg, addr)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
|
||||
}
|
||||
|
||||
func main () {
|
||||
func main() {
|
||||
|
||||
basic_test()
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
@ -1,83 +1,89 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
|
||||
//import "net"
|
||||
import "time"
|
||||
import "runtime"
|
||||
import "sync/atomic"
|
||||
|
||||
func poolbench() {
|
||||
nWorkers := runtime.GOMAXPROCS(0)
|
||||
work := make(chan func(), 1)
|
||||
workers := make(chan chan<- func(), nWorkers)
|
||||
makeWorker := func() chan<- func() {
|
||||
ch := make(chan func())
|
||||
go func() {
|
||||
for {
|
||||
f := <-ch
|
||||
f()
|
||||
select {
|
||||
case workers<-(ch):
|
||||
default: return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
getWorker := func() chan<- func() {
|
||||
select {
|
||||
case ch := <-workers: return ch
|
||||
default: return makeWorker()
|
||||
}
|
||||
}
|
||||
dispatcher := func() {
|
||||
for {
|
||||
w := <-work
|
||||
ch := getWorker()
|
||||
ch<-w
|
||||
}
|
||||
}
|
||||
go dispatcher()
|
||||
var count uint64
|
||||
const nCounts = 1000000
|
||||
for idx := 0 ; idx < nCounts ; idx++ {
|
||||
f := func() { atomic.AddUint64(&count, 1) }
|
||||
work <- f
|
||||
}
|
||||
for atomic.LoadUint64(&count) < nCounts {}
|
||||
nWorkers := runtime.GOMAXPROCS(0)
|
||||
work := make(chan func(), 1)
|
||||
workers := make(chan chan<- func(), nWorkers)
|
||||
makeWorker := func() chan<- func() {
|
||||
ch := make(chan func())
|
||||
go func() {
|
||||
for {
|
||||
f := <-ch
|
||||
f()
|
||||
select {
|
||||
case workers <- (ch):
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
getWorker := func() chan<- func() {
|
||||
select {
|
||||
case ch := <-workers:
|
||||
return ch
|
||||
default:
|
||||
return makeWorker()
|
||||
}
|
||||
}
|
||||
dispatcher := func() {
|
||||
for {
|
||||
w := <-work
|
||||
ch := getWorker()
|
||||
ch <- w
|
||||
}
|
||||
}
|
||||
go dispatcher()
|
||||
var count uint64
|
||||
const nCounts = 1000000
|
||||
for idx := 0; idx < nCounts; idx++ {
|
||||
f := func() { atomic.AddUint64(&count, 1) }
|
||||
work <- f
|
||||
}
|
||||
for atomic.LoadUint64(&count) < nCounts {
|
||||
}
|
||||
}
|
||||
|
||||
func normalbench() {
|
||||
var count uint64
|
||||
const nCounts = 1000000
|
||||
ch := make(chan struct{}, 1)
|
||||
ch<-struct{}{}
|
||||
for idx := 0 ; idx < nCounts ; idx++ {
|
||||
f := func() { atomic.AddUint64(&count, 1) }
|
||||
f()
|
||||
<-ch
|
||||
ch<-struct{}{}
|
||||
}
|
||||
var count uint64
|
||||
const nCounts = 1000000
|
||||
ch := make(chan struct{}, 1)
|
||||
ch <- struct{}{}
|
||||
for idx := 0; idx < nCounts; idx++ {
|
||||
f := func() { atomic.AddUint64(&count, 1) }
|
||||
f()
|
||||
<-ch
|
||||
ch <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func gobench() {
|
||||
var count uint64
|
||||
const nCounts = 1000000
|
||||
for idx := 0 ; idx < nCounts ; idx++ {
|
||||
f := func() { atomic.AddUint64(&count, 1) }
|
||||
go f()
|
||||
}
|
||||
for atomic.LoadUint64(&count) < nCounts {}
|
||||
var count uint64
|
||||
const nCounts = 1000000
|
||||
for idx := 0; idx < nCounts; idx++ {
|
||||
f := func() { atomic.AddUint64(&count, 1) }
|
||||
go f()
|
||||
}
|
||||
for atomic.LoadUint64(&count) < nCounts {
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
start := time.Now()
|
||||
poolbench()
|
||||
fmt.Println(time.Since(start))
|
||||
start = time.Now()
|
||||
normalbench()
|
||||
fmt.Println(time.Since(start))
|
||||
start = time.Now()
|
||||
gobench()
|
||||
fmt.Println(time.Since(start))
|
||||
start := time.Now()
|
||||
poolbench()
|
||||
fmt.Println(time.Since(start))
|
||||
start = time.Now()
|
||||
normalbench()
|
||||
fmt.Println(time.Since(start))
|
||||
start = time.Now()
|
||||
gobench()
|
||||
fmt.Println(time.Since(start))
|
||||
}
|
||||
|
@ -1,64 +1,76 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
"bytes"
|
||||
"sync"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"math/big"
|
||||
quic "github.com/lucas-clemente/quic-go"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
quic "github.com/lucas-clemente/quic-go"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const addr = "[::1]:9001"
|
||||
|
||||
func main () {
|
||||
go run_server()
|
||||
run_client()
|
||||
func main() {
|
||||
go run_server()
|
||||
run_client()
|
||||
}
|
||||
|
||||
func run_server() {
|
||||
listener, err := quic.ListenAddr(addr, generateTLSConfig(), nil)
|
||||
if err != nil { panic(err) }
|
||||
ses, err := listener.Accept()
|
||||
if err != nil { panic(err) }
|
||||
for {
|
||||
stream, err := ses.AcceptStream()
|
||||
if err != nil { panic(err) }
|
||||
go func() {
|
||||
defer stream.Close()
|
||||
bs := bytes.Buffer{}
|
||||
_, err := bs.ReadFrom(stream)
|
||||
if err != nil { panic(err) } //<-- TooManyOpenStreams
|
||||
}()
|
||||
}
|
||||
listener, err := quic.ListenAddr(addr, generateTLSConfig(), nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ses, err := listener.Accept()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for {
|
||||
stream, err := ses.AcceptStream()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go func() {
|
||||
defer stream.Close()
|
||||
bs := bytes.Buffer{}
|
||||
_, err := bs.ReadFrom(stream)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
} //<-- TooManyOpenStreams
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func run_client() {
|
||||
msgSize := 1048576
|
||||
msgCount := 128
|
||||
ses, err := quic.DialAddr(addr, &tls.Config{InsecureSkipVerify: true}, nil)
|
||||
if err != nil { panic(err) }
|
||||
bs := make([]byte, msgSize)
|
||||
wg := sync.WaitGroup{}
|
||||
start := time.Now()
|
||||
for idx := 0 ; idx < msgCount ; idx++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
stream, err := ses.OpenStreamSync()
|
||||
if err != nil { panic(err) }
|
||||
defer stream.Close()
|
||||
stream.Write(bs)
|
||||
}() // "go" this later
|
||||
}
|
||||
wg.Wait()
|
||||
timed := time.Since(start)
|
||||
fmt.Println("Client finished", timed, fmt.Sprintf("%f Bits/sec", 8*float64(msgSize*msgCount)/timed.Seconds()))
|
||||
msgSize := 1048576
|
||||
msgCount := 128
|
||||
ses, err := quic.DialAddr(addr, &tls.Config{InsecureSkipVerify: true}, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bs := make([]byte, msgSize)
|
||||
wg := sync.WaitGroup{}
|
||||
start := time.Now()
|
||||
for idx := 0; idx < msgCount; idx++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
stream, err := ses.OpenStreamSync()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer stream.Close()
|
||||
stream.Write(bs)
|
||||
}() // "go" this later
|
||||
}
|
||||
wg.Wait()
|
||||
timed := time.Since(start)
|
||||
fmt.Println("Client finished", timed, fmt.Sprintf("%f Bits/sec", 8*float64(msgSize*msgCount)/timed.Seconds()))
|
||||
}
|
||||
|
||||
// Setup a bare-bones TLS config for the server
|
||||
@ -81,4 +93,3 @@ func generateTLSConfig() *tls.Config {
|
||||
}
|
||||
return &tls.Config{Certificates: []tls.Certificate{tlsCert}}
|
||||
}
|
||||
|
||||
|
@ -11,59 +11,64 @@ import "runtime/pprof"
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
addr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
|
||||
if err != nil { panic(err) }
|
||||
sock, err := net.ListenUDP("udp", addr)
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
addr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sock, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer sock.Close()
|
||||
|
||||
go func () {
|
||||
send, err := net.DialUDP("udp", nil, addr)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
send.Write(msg)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
send, err := net.DialUDP("udp", nil, addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
send.Write(msg)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
sock.ReadFrom(msg)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0; i < numPackets; i++ {
|
||||
sock.ReadFrom(msg)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
||||
|
@ -11,67 +11,74 @@ import "runtime/pprof"
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
addr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
|
||||
if err != nil { panic(err) }
|
||||
sock, err := net.ListenUDP("udp", addr)
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
addr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sock, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer sock.Close()
|
||||
|
||||
go func () {
|
||||
send, err := net.DialUDP("udp", nil, addr)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
bss := make(net.Buffers, 0, 1024)
|
||||
for {
|
||||
for len(bss) < 1024 {
|
||||
bss = append(bss, msg)
|
||||
}
|
||||
bss.WriteTo(send)
|
||||
//bss = bss[:0]
|
||||
//send.Write(msg)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
send, err := net.DialUDP("udp", nil, addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
bss := make(net.Buffers, 0, 1024)
|
||||
for {
|
||||
for len(bss) < 1024 {
|
||||
bss = append(bss, msg)
|
||||
}
|
||||
bss.WriteTo(send)
|
||||
//bss = bss[:0]
|
||||
//send.Write(msg)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 1000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
n, err := sock.Read(msg)
|
||||
if err != nil { panic(err) }
|
||||
fmt.Println(n)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
numPackets := 1000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0; i < numPackets; i++ {
|
||||
n, err := sock.Read(msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(n)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
||||
|
@ -11,89 +11,106 @@ import "time"
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
var ip *net.IP
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil { panic(err) }
|
||||
var zone string
|
||||
for _, iface := range ifaces {
|
||||
addrs, err := iface.Addrs()
|
||||
if err != nil { panic(err) }
|
||||
for _, addr := range addrs {
|
||||
addrIP, _, _ := net.ParseCIDR(addr.String())
|
||||
if addrIP.To4() != nil { continue } // IPv6 only
|
||||
if !addrIP.IsLinkLocalUnicast() { continue }
|
||||
fmt.Println(iface.Name, addrIP)
|
||||
zone = iface.Name
|
||||
ip = &addrIP
|
||||
}
|
||||
if ip != nil { break }
|
||||
/*
|
||||
addrs, err = iface.MulticastAddrs()
|
||||
if err != nil { panic(err) }
|
||||
for _, addr := range addrs {
|
||||
fmt.Println(addr.String())
|
||||
}
|
||||
*/
|
||||
}
|
||||
if ip == nil { panic("No link-local IPv6 found") }
|
||||
fmt.Println("Using address:", *ip)
|
||||
addr := net.UDPAddr{IP: *ip, Port: 9001, Zone: zone}
|
||||
var ip *net.IP
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var zone string
|
||||
for _, iface := range ifaces {
|
||||
addrs, err := iface.Addrs()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
addrIP, _, _ := net.ParseCIDR(addr.String())
|
||||
if addrIP.To4() != nil {
|
||||
continue
|
||||
} // IPv6 only
|
||||
if !addrIP.IsLinkLocalUnicast() {
|
||||
continue
|
||||
}
|
||||
fmt.Println(iface.Name, addrIP)
|
||||
zone = iface.Name
|
||||
ip = &addrIP
|
||||
}
|
||||
if ip != nil {
|
||||
break
|
||||
}
|
||||
/*
|
||||
addrs, err = iface.MulticastAddrs()
|
||||
if err != nil { panic(err) }
|
||||
for _, addr := range addrs {
|
||||
fmt.Println(addr.String())
|
||||
}
|
||||
*/
|
||||
}
|
||||
if ip == nil {
|
||||
panic("No link-local IPv6 found")
|
||||
}
|
||||
fmt.Println("Using address:", *ip)
|
||||
addr := net.UDPAddr{IP: *ip, Port: 9001, Zone: zone}
|
||||
|
||||
laddr, err := net.ResolveUDPAddr("udp", "[::]:9001")
|
||||
if err != nil { panic(err) }
|
||||
sock, err := net.ListenUDP("udp", laddr)
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
laddr, err := net.ResolveUDPAddr("udp", "[::]:9001")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sock, err := net.ListenUDP("udp", laddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer sock.Close()
|
||||
|
||||
go func () {
|
||||
send, err := net.DialUDP("udp", nil, &addr)
|
||||
//send, err := net.ListenUDP("udp", nil)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
send.Write(msg)
|
||||
//send.WriteToUDP(msg, &addr)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
send, err := net.DialUDP("udp", nil, &addr)
|
||||
//send, err := net.ListenUDP("udp", nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
send.Write(msg)
|
||||
//send.WriteToUDP(msg, &addr)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
sock.ReadFromUDP(msg)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0; i < numPackets; i++ {
|
||||
sock.ReadFromUDP(msg)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
||||
|
@ -13,82 +13,91 @@ const buffSize = 32
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
|
||||
if err != nil { panic(err) }
|
||||
listener, err := net.ListenTCP("tcp", addr)
|
||||
if err != nil { panic(err) }
|
||||
defer listener.Close()
|
||||
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
listener, err := net.ListenTCP("tcp", addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
go func () {
|
||||
send, err := net.DialTCP("tcp", nil, addr)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
bss := make(net.Buffers, 0, 1024)
|
||||
for {
|
||||
for len(bss) < 1 { //buffSize {
|
||||
bss = append(bss, msg)
|
||||
}
|
||||
bss := net.Buffers{[]byte{0,1,2,3}, []byte{0,1}, msg}
|
||||
bss.WriteTo(send)
|
||||
//send.Write(msg)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
send, err := net.DialTCP("tcp", nil, addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
bss := make(net.Buffers, 0, 1024)
|
||||
for {
|
||||
for len(bss) < 1 { //buffSize {
|
||||
bss = append(bss, msg)
|
||||
}
|
||||
bss := net.Buffers{[]byte{0, 1, 2, 3}, []byte{0, 1}, msg}
|
||||
bss.WriteTo(send)
|
||||
//send.Write(msg)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
//msg := make([]byte, 1280)
|
||||
sock, err := listener.AcceptTCP()
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
msg := make([]byte, 1280*buffSize)
|
||||
n, err := sock.Read(msg)
|
||||
if err != nil { panic(err) }
|
||||
msg = msg[:n]
|
||||
for len(msg) > 1286 {
|
||||
// handle message
|
||||
i++
|
||||
msg = msg[1286:]
|
||||
}
|
||||
// handle remaining fragment of message
|
||||
//fmt.Println(n)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
//msg := make([]byte, 1280)
|
||||
sock, err := listener.AcceptTCP()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer sock.Close()
|
||||
for i := 0; i < numPackets; i++ {
|
||||
msg := make([]byte, 1280*buffSize)
|
||||
n, err := sock.Read(msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
msg = msg[:n]
|
||||
for len(msg) > 1286 {
|
||||
// handle message
|
||||
i++
|
||||
msg = msg[1286:]
|
||||
}
|
||||
// handle remaining fragment of message
|
||||
//fmt.Println(n)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
|
||||
_ = func (in (chan<- int)) {
|
||||
close(in)
|
||||
}
|
||||
_ = func(in chan<- int) {
|
||||
close(in)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
||||
|
@ -11,62 +11,67 @@ import "runtime/pprof"
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
addr, err := net.ResolveUDPAddr("udp", "[::1]:0")
|
||||
if err != nil { panic(err) }
|
||||
sock, err := net.ListenUDP("udp", addr)
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
addr, err := net.ResolveUDPAddr("udp", "[::1]:0")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sock, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer sock.Close()
|
||||
|
||||
go func () {
|
||||
raddr := sock.LocalAddr().(*net.UDPAddr)
|
||||
send, err := net.DialUDP("udp", nil, raddr)
|
||||
//send, err := net.ListenUDP("udp", addr)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
send.Write(msg)
|
||||
//send.WriteToUDP(msg, raddr)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
raddr := sock.LocalAddr().(*net.UDPAddr)
|
||||
send, err := net.DialUDP("udp", nil, raddr)
|
||||
//send, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
send.Write(msg)
|
||||
//send.WriteToUDP(msg, raddr)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
sock.ReadFromUDP(msg)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0; i < numPackets; i++ {
|
||||
sock.ReadFromUDP(msg)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
||||
|
@ -11,62 +11,69 @@ import "runtime/pprof"
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
saddr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
|
||||
if err != nil { panic(err) }
|
||||
raddr, err := net.ResolveUDPAddr("udp", "[::1]:9002")
|
||||
if err != nil { panic(err) }
|
||||
saddr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
raddr, err := net.ResolveUDPAddr("udp", "[::1]:9002")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
send, err := net.DialUDP("udp", saddr, raddr)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
send, err := net.DialUDP("udp", saddr, raddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer send.Close()
|
||||
|
||||
recv, err := net.DialUDP("udp", raddr, saddr)
|
||||
if err != nil { panic(err) }
|
||||
defer recv.Close()
|
||||
recv, err := net.DialUDP("udp", raddr, saddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer recv.Close()
|
||||
|
||||
go func () {
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
send.Write(msg)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
send.Write(msg)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
recv.Read(msg)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0; i < numPackets; i++ {
|
||||
recv.Read(msg)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
||||
|
@ -11,78 +11,82 @@ import "runtime/pprof"
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
sock, err := net.ListenUDP("udp", nil)
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
sock, err := net.ListenUDP("udp", nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer sock.Close()
|
||||
|
||||
ch := make(chan []byte, 1)
|
||||
ch := make(chan []byte, 1)
|
||||
|
||||
writer := func () {
|
||||
raddr := sock.LocalAddr().(*net.UDPAddr)
|
||||
//send, err := net.ListenUDP("udp", nil)
|
||||
//if err != nil { panic(err) }
|
||||
//defer send.Close()
|
||||
for {
|
||||
select {
|
||||
case <-ch:
|
||||
default:
|
||||
}
|
||||
msg := make([]byte, 1280)
|
||||
sock.WriteToUDP(msg, raddr)
|
||||
//send.WriteToUDP(msg, raddr)
|
||||
}
|
||||
}
|
||||
go writer()
|
||||
//go writer()
|
||||
//go writer()
|
||||
//go writer()
|
||||
writer := func() {
|
||||
raddr := sock.LocalAddr().(*net.UDPAddr)
|
||||
//send, err := net.ListenUDP("udp", nil)
|
||||
//if err != nil { panic(err) }
|
||||
//defer send.Close()
|
||||
for {
|
||||
select {
|
||||
case <-ch:
|
||||
default:
|
||||
}
|
||||
msg := make([]byte, 1280)
|
||||
sock.WriteToUDP(msg, raddr)
|
||||
//send.WriteToUDP(msg, raddr)
|
||||
}
|
||||
}
|
||||
go writer()
|
||||
//go writer()
|
||||
//go writer()
|
||||
//go writer()
|
||||
|
||||
numPackets := 65536
|
||||
size := 0
|
||||
start := time.Now()
|
||||
success := 0
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
msg := make([]byte, 2048)
|
||||
n, _, err := sock.ReadFromUDP(msg)
|
||||
if err != nil { panic(err) }
|
||||
size += n
|
||||
select {
|
||||
case ch <- msg: success += 1
|
||||
default:
|
||||
}
|
||||
}
|
||||
timed := time.Since(start)
|
||||
numPackets := 65536
|
||||
size := 0
|
||||
start := time.Now()
|
||||
success := 0
|
||||
for i := 0; i < numPackets; i++ {
|
||||
msg := make([]byte, 2048)
|
||||
n, _, err := sock.ReadFromUDP(msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
size += n
|
||||
select {
|
||||
case ch <- msg:
|
||||
success += 1
|
||||
default:
|
||||
}
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
fmt.Printf("%f bits per second\n", 8*float64(size)/timed.Seconds())
|
||||
fmt.Println("Success:", success, "/", numPackets)
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
fmt.Printf("%f bits per second\n", 8*float64(size)/timed.Seconds())
|
||||
fmt.Println("Success:", success, "/", numPackets)
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
||||
|
@ -13,105 +13,112 @@ import "golang.org/x/net/ipv6"
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
|
||||
if err != nil { panic(err) }
|
||||
sock, err := net.ListenUDP("udp", udpAddr)
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sock, err := net.ListenUDP("udp", udpAddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer sock.Close()
|
||||
|
||||
writer := func () {
|
||||
raddr := sock.LocalAddr().(*net.UDPAddr)
|
||||
send, err := net.ListenUDP("udp", nil)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
conn := ipv6.NewPacketConn(send)
|
||||
defer conn.Close()
|
||||
var msgs []ipv6.Message
|
||||
for idx := 0 ; idx < 1024 ; idx++ {
|
||||
msg := ipv6.Message{Addr: raddr, Buffers: [][]byte{make([]byte, 1280)}}
|
||||
msgs = append(msgs, msg)
|
||||
}
|
||||
for {
|
||||
/*
|
||||
var msgs []ipv6.Message
|
||||
for idx := 0 ; idx < 1024 ; idx++ {
|
||||
msg := ipv6.Message{Addr: raddr, Buffers: [][]byte{make([]byte, 1280)}}
|
||||
msgs = append(msgs, msg)
|
||||
}
|
||||
*/
|
||||
conn.WriteBatch(msgs, 0)
|
||||
}
|
||||
writer := func() {
|
||||
raddr := sock.LocalAddr().(*net.UDPAddr)
|
||||
send, err := net.ListenUDP("udp", nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer send.Close()
|
||||
conn := ipv6.NewPacketConn(send)
|
||||
defer conn.Close()
|
||||
var msgs []ipv6.Message
|
||||
for idx := 0; idx < 1024; idx++ {
|
||||
msg := ipv6.Message{Addr: raddr, Buffers: [][]byte{make([]byte, 1280)}}
|
||||
msgs = append(msgs, msg)
|
||||
}
|
||||
for {
|
||||
/*
|
||||
var msgs []ipv6.Message
|
||||
for idx := 0 ; idx < 1024 ; idx++ {
|
||||
msg := ipv6.Message{Addr: raddr, Buffers: [][]byte{make([]byte, 1280)}}
|
||||
msgs = append(msgs, msg)
|
||||
}
|
||||
*/
|
||||
conn.WriteBatch(msgs, 0)
|
||||
}
|
||||
|
||||
}
|
||||
go writer()
|
||||
//go writer()
|
||||
//go writer()
|
||||
//go writer()
|
||||
}
|
||||
go writer()
|
||||
//go writer()
|
||||
//go writer()
|
||||
//go writer()
|
||||
|
||||
numPackets := 65536
|
||||
size := 0
|
||||
count := 0
|
||||
start := time.Now()
|
||||
/*
|
||||
conn := ipv6.NewPacketConn(sock)
|
||||
defer conn.Close()
|
||||
for ; count < numPackets ; count++ {
|
||||
msgs := make([]ipv6.Message, 1024)
|
||||
for _, msg := range msgs {
|
||||
msg.Buffers = append(msg.Buffers, make([]byte, 2048))
|
||||
}
|
||||
n, err := conn.ReadBatch(msgs, 0)
|
||||
if err != nil { panic(err) }
|
||||
fmt.Println("DEBUG: n", n)
|
||||
for _, msg := range msgs[:n] {
|
||||
fmt.Println("DEBUG: msg", msg)
|
||||
size += msg.N
|
||||
//for _, bs := range msg.Buffers {
|
||||
// size += len(bs)
|
||||
//}
|
||||
count++
|
||||
}
|
||||
}
|
||||
//*/
|
||||
//*
|
||||
for ; count < numPackets ; count++ {
|
||||
msg := make([]byte, 2048)
|
||||
n, _, err := sock.ReadFromUDP(msg)
|
||||
if err != nil { panic(err) }
|
||||
size += n
|
||||
}
|
||||
//*/
|
||||
timed := time.Since(start)
|
||||
numPackets := 65536
|
||||
size := 0
|
||||
count := 0
|
||||
start := time.Now()
|
||||
/*
|
||||
conn := ipv6.NewPacketConn(sock)
|
||||
defer conn.Close()
|
||||
for ; count < numPackets ; count++ {
|
||||
msgs := make([]ipv6.Message, 1024)
|
||||
for _, msg := range msgs {
|
||||
msg.Buffers = append(msg.Buffers, make([]byte, 2048))
|
||||
}
|
||||
n, err := conn.ReadBatch(msgs, 0)
|
||||
if err != nil { panic(err) }
|
||||
fmt.Println("DEBUG: n", n)
|
||||
for _, msg := range msgs[:n] {
|
||||
fmt.Println("DEBUG: msg", msg)
|
||||
size += msg.N
|
||||
//for _, bs := range msg.Buffers {
|
||||
// size += len(bs)
|
||||
//}
|
||||
count++
|
||||
}
|
||||
}
|
||||
//*/
|
||||
//*
|
||||
for ; count < numPackets; count++ {
|
||||
msg := make([]byte, 2048)
|
||||
n, _, err := sock.ReadFromUDP(msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
size += n
|
||||
}
|
||||
//*/
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(count)/timed.Seconds())
|
||||
fmt.Printf("%f bits/second\n", float64(8*size)/timed.Seconds())
|
||||
fmt.Printf("%f packets per second\n", float64(count)/timed.Seconds())
|
||||
fmt.Printf("%f bits/second\n", float64(8*size)/timed.Seconds())
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
||||
|
@ -13,84 +13,93 @@ const buffSize = 32
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
|
||||
if err != nil { panic(err) }
|
||||
listener, err := net.ListenTCP("tcp", addr)
|
||||
if err != nil { panic(err) }
|
||||
defer listener.Close()
|
||||
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
listener, err := net.ListenTCP("tcp", addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
go func () {
|
||||
send, err := net.DialTCP("tcp", nil, addr)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
bss := make(net.Buffers, 0, 1024)
|
||||
count := 0
|
||||
for {
|
||||
time.Sleep(100*time.Millisecond)
|
||||
for len(bss) < count {
|
||||
bss = append(bss, msg)
|
||||
}
|
||||
bss.WriteTo(send)
|
||||
count++
|
||||
//send.Write(msg)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
send, err := net.DialTCP("tcp", nil, addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
bss := make(net.Buffers, 0, 1024)
|
||||
count := 0
|
||||
for {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
for len(bss) < count {
|
||||
bss = append(bss, msg)
|
||||
}
|
||||
bss.WriteTo(send)
|
||||
count++
|
||||
//send.Write(msg)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
//msg := make([]byte, 1280)
|
||||
sock, err := listener.AcceptTCP()
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
for {
|
||||
msg := make([]byte, 1280*buffSize)
|
||||
n, err := sock.Read(msg)
|
||||
if err != nil { panic(err) }
|
||||
msg = msg[:n]
|
||||
fmt.Println("Read:", n)
|
||||
for len(msg) > 1280 {
|
||||
// handle message
|
||||
msg = msg[1280:]
|
||||
}
|
||||
// handle remaining fragment of message
|
||||
//fmt.Println(n)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
//msg := make([]byte, 1280)
|
||||
sock, err := listener.AcceptTCP()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer sock.Close()
|
||||
for {
|
||||
msg := make([]byte, 1280*buffSize)
|
||||
n, err := sock.Read(msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
msg = msg[:n]
|
||||
fmt.Println("Read:", n)
|
||||
for len(msg) > 1280 {
|
||||
// handle message
|
||||
msg = msg[1280:]
|
||||
}
|
||||
// handle remaining fragment of message
|
||||
//fmt.Println(n)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
|
||||
_ = func (in (chan<- int)) {
|
||||
close(in)
|
||||
}
|
||||
_ = func(in chan<- int) {
|
||||
close(in)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
||||
|
@ -1,11 +1,11 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os/exec"
|
||||
"time"
|
||||
"net"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/songgao/water"
|
||||
)
|
||||
@ -17,54 +17,56 @@ func setup_dev() *water.Interface {
|
||||
DeviceType: water.TUN,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
panic(err)
|
||||
}
|
||||
return ifce
|
||||
return ifce
|
||||
}
|
||||
|
||||
func setup_dev1() *water.Interface {
|
||||
ifce := setup_dev()
|
||||
cmd := exec.Command("ip", "-f", "inet6",
|
||||
"addr", "add", "fc00::2/8",
|
||||
"dev", ifce.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to assign address")
|
||||
}
|
||||
cmd = exec.Command("ip", "link", "set",
|
||||
"dev", ifce.Name(),
|
||||
"mtu", fmt.Sprintf("%d", mtu),
|
||||
"up")
|
||||
out, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to bring up interface")
|
||||
}
|
||||
return ifce
|
||||
ifce := setup_dev()
|
||||
cmd := exec.Command("ip", "-f", "inet6",
|
||||
"addr", "add", "fc00::2/8",
|
||||
"dev", ifce.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to assign address")
|
||||
}
|
||||
cmd = exec.Command("ip", "link", "set",
|
||||
"dev", ifce.Name(),
|
||||
"mtu", fmt.Sprintf("%d", mtu),
|
||||
"up")
|
||||
out, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to bring up interface")
|
||||
}
|
||||
return ifce
|
||||
}
|
||||
|
||||
func connect(ifce *water.Interface) {
|
||||
conn, err := net.DialTimeout("tcp", "192.168.2.2:9001", time.Second)
|
||||
if err != nil { panic(err) }
|
||||
sock := conn.(*net.TCPConn)
|
||||
// TODO go a worker to move packets to/from the tun
|
||||
conn, err := net.DialTimeout("tcp", "192.168.2.2:9001", time.Second)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sock := conn.(*net.TCPConn)
|
||||
// TODO go a worker to move packets to/from the tun
|
||||
}
|
||||
|
||||
func bench() {
|
||||
}
|
||||
|
||||
func main() {
|
||||
ifce := setup_dev1()
|
||||
connect(ifce)
|
||||
bench()
|
||||
fmt.Println("Done?")
|
||||
return
|
||||
ifce := setup_dev1()
|
||||
connect(ifce)
|
||||
bench()
|
||||
fmt.Println("Done?")
|
||||
return
|
||||
ifce, err := water.New(water.Config{
|
||||
DeviceType: water.TUN,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.Printf("Interface Name: %s\n", ifce.Name())
|
||||
@ -73,10 +75,9 @@ func main() {
|
||||
for {
|
||||
n, err := ifce.Read(packet)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
panic(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("Packet Received: % x\n", packet[:n])
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os/exec"
|
||||
"net"
|
||||
"os/exec"
|
||||
|
||||
"github.com/songgao/water"
|
||||
)
|
||||
@ -17,84 +17,84 @@ func setup_dev() *water.Interface {
|
||||
DeviceType: water.TUN,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
panic(err)
|
||||
}
|
||||
return ifce
|
||||
return ifce
|
||||
}
|
||||
|
||||
func setup_dev1() *water.Interface {
|
||||
ifce := setup_dev()
|
||||
cmd := exec.Command("ip", "-f", "inet6",
|
||||
"addr", "add", "fc00::1/8",
|
||||
"dev", ifce.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
fmt.Println(string(err))
|
||||
panic("Failed to assign address")
|
||||
}
|
||||
cmd = exec.Command("ip", "link", "set",
|
||||
"dev", tun.name,
|
||||
"mtu", fmt.Sprintf("%d", mtu),
|
||||
"up")
|
||||
out, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to bring up interface")
|
||||
}
|
||||
return ifce
|
||||
ifce := setup_dev()
|
||||
cmd := exec.Command("ip", "-f", "inet6",
|
||||
"addr", "add", "fc00::1/8",
|
||||
"dev", ifce.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
fmt.Println(string(err))
|
||||
panic("Failed to assign address")
|
||||
}
|
||||
cmd = exec.Command("ip", "link", "set",
|
||||
"dev", tun.name,
|
||||
"mtu", fmt.Sprintf("%d", mtu),
|
||||
"up")
|
||||
out, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to bring up interface")
|
||||
}
|
||||
return ifce
|
||||
}
|
||||
|
||||
func addNS(name string) {
|
||||
cmd := exec.COmmand("ip", "netns", "add", name)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to setup netns")
|
||||
}
|
||||
cmd := exec.COmmand("ip", "netns", "add", name)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to setup netns")
|
||||
}
|
||||
}
|
||||
|
||||
func delNS(name string) {
|
||||
cmd := exec.COmmand("ip", "netns", "delete", name)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to setup netns")
|
||||
}
|
||||
cmd := exec.COmmand("ip", "netns", "delete", name)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to setup netns")
|
||||
}
|
||||
}
|
||||
|
||||
func doInNetNS(comm ...string) *exec.Cmd {
|
||||
return exec.Command("ip", "netns", "exec", netnsName, comm...)
|
||||
return exec.Command("ip", "netns", "exec", netnsName, comm...)
|
||||
}
|
||||
|
||||
func setup_dev2() *water.Interface {
|
||||
ifce := setup_dev()
|
||||
addNS(netnsName)
|
||||
cmd := exec.Command("ip", "link", "set", ifce.Name(), "netns", netnsName)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to move tun to netns")
|
||||
}
|
||||
cmd = doInNetNS("ip", "-f", "inet6",
|
||||
"addr", "add", "fc00::2/8",
|
||||
"dev", ifce.Name())
|
||||
out, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to assign address")
|
||||
}
|
||||
cmd = doInNetNS("ip", "link", "set",
|
||||
"dev", tun.name,
|
||||
"mtu", fmt.Sprintf("%d", mtu),
|
||||
"up")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
fmt.Println(string(err))
|
||||
panic("Failed to bring up interface")
|
||||
}
|
||||
return ifce
|
||||
ifce := setup_dev()
|
||||
addNS(netnsName)
|
||||
cmd := exec.Command("ip", "link", "set", ifce.Name(), "netns", netnsName)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to move tun to netns")
|
||||
}
|
||||
cmd = doInNetNS("ip", "-f", "inet6",
|
||||
"addr", "add", "fc00::2/8",
|
||||
"dev", ifce.Name())
|
||||
out, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to assign address")
|
||||
}
|
||||
cmd = doInNetNS("ip", "link", "set",
|
||||
"dev", tun.name,
|
||||
"mtu", fmt.Sprintf("%d", mtu),
|
||||
"up")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
fmt.Println(string(err))
|
||||
panic("Failed to bring up interface")
|
||||
}
|
||||
return ifce
|
||||
}
|
||||
|
||||
func connect() {
|
||||
@ -109,7 +109,7 @@ func main() {
|
||||
DeviceType: water.TUN,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.Printf("Interface Name: %s\n", ifce.Name())
|
||||
@ -118,10 +118,9 @@ func main() {
|
||||
for {
|
||||
n, err := ifce.Read(packet)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
panic(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("Packet Received: % x\n", packet[:n])
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os/exec"
|
||||
"net"
|
||||
"os/exec"
|
||||
|
||||
"github.com/songgao/water"
|
||||
)
|
||||
@ -17,87 +17,86 @@ func setup_dev() *water.Interface {
|
||||
DeviceType: water.TUN,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
panic(err)
|
||||
}
|
||||
return ifce
|
||||
return ifce
|
||||
}
|
||||
|
||||
func setup_dev1() *water.Interface {
|
||||
ifce := setup_dev()
|
||||
cmd := exec.Command("ip", "-f", "inet6",
|
||||
"addr", "add", "fc00::1/8",
|
||||
"dev", ifce.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
fmt.Println(string(err))
|
||||
panic("Failed to assign address")
|
||||
}
|
||||
cmd = exec.Command("ip", "link", "set",
|
||||
"dev", tun.name,
|
||||
"mtu", fmt.Sprintf("%d", mtu),
|
||||
"up")
|
||||
out, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to bring up interface")
|
||||
}
|
||||
return ifce
|
||||
ifce := setup_dev()
|
||||
cmd := exec.Command("ip", "-f", "inet6",
|
||||
"addr", "add", "fc00::1/8",
|
||||
"dev", ifce.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
fmt.Println(string(err))
|
||||
panic("Failed to assign address")
|
||||
}
|
||||
cmd = exec.Command("ip", "link", "set",
|
||||
"dev", tun.name,
|
||||
"mtu", fmt.Sprintf("%d", mtu),
|
||||
"up")
|
||||
out, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to bring up interface")
|
||||
}
|
||||
return ifce
|
||||
}
|
||||
|
||||
func addNS(name string) {
|
||||
cmd := exec.COmmand("ip", "netns", "add", name)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to setup netns")
|
||||
}
|
||||
cmd := exec.COmmand("ip", "netns", "add", name)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to setup netns")
|
||||
}
|
||||
}
|
||||
|
||||
func delNS(name string) {
|
||||
cmd := exec.COmmand("ip", "netns", "delete", name)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to setup netns")
|
||||
}
|
||||
cmd := exec.COmmand("ip", "netns", "delete", name)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to setup netns")
|
||||
}
|
||||
}
|
||||
|
||||
func doInNetNS(comm ...string) *exec.Cmd {
|
||||
return exec.Command("ip", "netns", "exec", netnsName, comm...)
|
||||
return exec.Command("ip", "netns", "exec", netnsName, comm...)
|
||||
}
|
||||
|
||||
func setup_dev2() *water.Interface {
|
||||
ifce := setup_dev()
|
||||
addNS(netnsName)
|
||||
cmd := exec.Command("ip", "link", "set", ifce.Name(), "netns", netnsName)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to move tun to netns")
|
||||
}
|
||||
cmd =
|
||||
cmd = exec.Command(
|
||||
"ip", "-f", "inet6",
|
||||
"addr", "add", "fc00::2/8",
|
||||
"dev", ifce.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to assign address")
|
||||
}
|
||||
cmd = exec.Command(
|
||||
"ip", "link", "set",
|
||||
"dev", tun.name,
|
||||
"mtu", fmt.Sprintf("%d", mtu),
|
||||
"up")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
fmt.Println(string(err))
|
||||
panic("Failed to bring up interface")
|
||||
}
|
||||
return ifce
|
||||
ifce := setup_dev()
|
||||
addNS(netnsName)
|
||||
cmd := exec.Command("ip", "link", "set", ifce.Name(), "netns", netnsName)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to move tun to netns")
|
||||
}
|
||||
cmd = exec.Command(
|
||||
"ip", "-f", "inet6",
|
||||
"addr", "add", "fc00::2/8",
|
||||
"dev", ifce.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to assign address")
|
||||
}
|
||||
cmd = exec.Command(
|
||||
"ip", "link", "set",
|
||||
"dev", tun.name,
|
||||
"mtu", fmt.Sprintf("%d", mtu),
|
||||
"up")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
fmt.Println(string(err))
|
||||
panic("Failed to bring up interface")
|
||||
}
|
||||
return ifce
|
||||
}
|
||||
|
||||
func connect() {
|
||||
@ -112,7 +111,7 @@ func main() {
|
||||
DeviceType: water.TUN,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.Printf("Interface Name: %s\n", ifce.Name())
|
||||
@ -121,10 +120,9 @@ func main() {
|
||||
for {
|
||||
n, err := ifce.Read(packet)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
panic(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("Packet Received: % x\n", packet[:n])
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,14 +27,18 @@ func main() {
|
||||
}
|
||||
}()
|
||||
|
||||
address := net.ParseIP("fc00::1")
|
||||
address := net.ParseIP("fc00::1")
|
||||
tuntap, err := tun.OpenTun(address)
|
||||
if err != nil { panic(err) }
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer tuntap.Close()
|
||||
// read data from tun into rCh channel.
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := tuntap.Read(rCh); err != nil { panic(err) }
|
||||
if err := tuntap.Read(rCh); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
|
@ -6,34 +6,35 @@ import "fmt"
|
||||
import "time"
|
||||
|
||||
func main() {
|
||||
for idx := 0 ; idx < 64 ; idx++ {
|
||||
num := uint64(1) << uint(idx)
|
||||
encoded := make([]byte, 10)
|
||||
length := wire.Encode_uint64(num, encoded)
|
||||
decoded, _ := wire.Decode_uint64(encoded[:length])
|
||||
if decoded != num { panic(fmt.Sprintf("%d != %d", decoded, num)) }
|
||||
}
|
||||
const count = 1000000
|
||||
start := time.Now()
|
||||
encoded := make([]byte, 10)
|
||||
//num := ^uint64(0) // Longest possible value for full uint64 range
|
||||
num := ^uint64(0) >> 1 // Largest positive int64 (real use case)
|
||||
//num := uint64(0) // Shortest possible value, most will be of this length
|
||||
length := wire.Encode_uint64(num, encoded)
|
||||
for idx := 0 ; idx < count ; idx++ {
|
||||
wire.Encode_uint64(num, encoded)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
fmt.Println("Ops:", count/timed.Seconds())
|
||||
fmt.Println("Time:", timed.Nanoseconds()/count)
|
||||
for idx := 0; idx < 64; idx++ {
|
||||
num := uint64(1) << uint(idx)
|
||||
encoded := make([]byte, 10)
|
||||
length := wire.Encode_uint64(num, encoded)
|
||||
decoded, _ := wire.Decode_uint64(encoded[:length])
|
||||
if decoded != num {
|
||||
panic(fmt.Sprintf("%d != %d", decoded, num))
|
||||
}
|
||||
}
|
||||
const count = 1000000
|
||||
start := time.Now()
|
||||
encoded := make([]byte, 10)
|
||||
//num := ^uint64(0) // Longest possible value for full uint64 range
|
||||
num := ^uint64(0) >> 1 // Largest positive int64 (real use case)
|
||||
//num := uint64(0) // Shortest possible value, most will be of this length
|
||||
length := wire.Encode_uint64(num, encoded)
|
||||
for idx := 0; idx < count; idx++ {
|
||||
wire.Encode_uint64(num, encoded)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
fmt.Println("Ops:", count/timed.Seconds())
|
||||
fmt.Println("Time:", timed.Nanoseconds()/count)
|
||||
|
||||
encoded = encoded[:length]
|
||||
start = time.Now()
|
||||
for idx := 0 ; idx < count ; idx++ {
|
||||
wire.Decode_uint64(encoded)
|
||||
}
|
||||
timed = time.Since(start)
|
||||
fmt.Println("Ops:", count/timed.Seconds())
|
||||
fmt.Println("Time:", timed.Nanoseconds()/count)
|
||||
encoded = encoded[:length]
|
||||
start = time.Now()
|
||||
for idx := 0; idx < count; idx++ {
|
||||
wire.Decode_uint64(encoded)
|
||||
}
|
||||
timed = time.Since(start)
|
||||
fmt.Println("Ops:", count/timed.Seconds())
|
||||
fmt.Println("Time:", timed.Nanoseconds()/count)
|
||||
}
|
||||
|
||||
|
@ -1,108 +1,120 @@
|
||||
package yggdrasil
|
||||
|
||||
type address [16]byte // IPv6 address within the network
|
||||
type subnet [8]byte // It's a /64
|
||||
type subnet [8]byte // It's a /64
|
||||
|
||||
var address_prefix = [...]byte{0xfd} // For node addresses + local subnets
|
||||
|
||||
func (a *address) isValid() bool {
|
||||
for idx := range address_prefix {
|
||||
if (*a)[idx] != address_prefix[idx] { return false }
|
||||
}
|
||||
return (*a)[len(address_prefix)] & 0x80 == 0
|
||||
for idx := range address_prefix {
|
||||
if (*a)[idx] != address_prefix[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return (*a)[len(address_prefix)]&0x80 == 0
|
||||
}
|
||||
|
||||
func (s *subnet) isValid() bool {
|
||||
for idx := range address_prefix {
|
||||
if (*s)[idx] != address_prefix[idx] { return false }
|
||||
}
|
||||
return (*s)[len(address_prefix)] & 0x80 != 0
|
||||
for idx := range address_prefix {
|
||||
if (*s)[idx] != address_prefix[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return (*s)[len(address_prefix)]&0x80 != 0
|
||||
}
|
||||
|
||||
func address_addrForNodeID(nid *NodeID) *address {
|
||||
// 128 bit address
|
||||
// Begins with prefix
|
||||
// Next bit is a 0
|
||||
// Next 7 bits, interpreted as a uint, are # of leading 1s in the NodeID
|
||||
// Leading 1s and first leading 0 of the NodeID are truncated off
|
||||
// The rest is appended to the IPv6 address (truncated to 128 bits total)
|
||||
var addr address
|
||||
var temp []byte
|
||||
done := false
|
||||
ones := byte(0)
|
||||
bits := byte(0)
|
||||
nBits := 0
|
||||
for idx := 0 ; idx < 8*len(nid) ; idx++ {
|
||||
bit := (nid[idx/8] & (0x80 >> byte(idx % 8))) >> byte(7 - (idx % 8))
|
||||
if !done && bit != 0 {
|
||||
ones++
|
||||
continue
|
||||
}
|
||||
if !done && bit == 0 {
|
||||
done = true
|
||||
continue // FIXME this assumes that ones <= 127
|
||||
}
|
||||
bits = (bits << 1) | bit
|
||||
nBits++
|
||||
if nBits == 8 {
|
||||
nBits = 0
|
||||
temp = append(temp, bits)
|
||||
}
|
||||
}
|
||||
copy(addr[:], address_prefix[:])
|
||||
addr[len(address_prefix)] = ones & 0x7f
|
||||
copy(addr[len(address_prefix)+1:], temp)
|
||||
return &addr
|
||||
// 128 bit address
|
||||
// Begins with prefix
|
||||
// Next bit is a 0
|
||||
// Next 7 bits, interpreted as a uint, are # of leading 1s in the NodeID
|
||||
// Leading 1s and first leading 0 of the NodeID are truncated off
|
||||
// The rest is appended to the IPv6 address (truncated to 128 bits total)
|
||||
var addr address
|
||||
var temp []byte
|
||||
done := false
|
||||
ones := byte(0)
|
||||
bits := byte(0)
|
||||
nBits := 0
|
||||
for idx := 0; idx < 8*len(nid); idx++ {
|
||||
bit := (nid[idx/8] & (0x80 >> byte(idx%8))) >> byte(7-(idx%8))
|
||||
if !done && bit != 0 {
|
||||
ones++
|
||||
continue
|
||||
}
|
||||
if !done && bit == 0 {
|
||||
done = true
|
||||
continue // FIXME this assumes that ones <= 127
|
||||
}
|
||||
bits = (bits << 1) | bit
|
||||
nBits++
|
||||
if nBits == 8 {
|
||||
nBits = 0
|
||||
temp = append(temp, bits)
|
||||
}
|
||||
}
|
||||
copy(addr[:], address_prefix[:])
|
||||
addr[len(address_prefix)] = ones & 0x7f
|
||||
copy(addr[len(address_prefix)+1:], temp)
|
||||
return &addr
|
||||
}
|
||||
|
||||
func address_subnetForNodeID(nid *NodeID) *subnet {
|
||||
// Exactly as the address version, with two exceptions:
|
||||
// 1) The first bit after the fixed prefix is a 1 instead of a 0
|
||||
// 2) It's truncated to a subnet prefix length instead of 128 bits
|
||||
addr := *address_addrForNodeID(nid)
|
||||
var snet subnet
|
||||
copy(snet[:], addr[:])
|
||||
snet[len(address_prefix)] |= 0x80
|
||||
return &snet
|
||||
// Exactly as the address version, with two exceptions:
|
||||
// 1) The first bit after the fixed prefix is a 1 instead of a 0
|
||||
// 2) It's truncated to a subnet prefix length instead of 128 bits
|
||||
addr := *address_addrForNodeID(nid)
|
||||
var snet subnet
|
||||
copy(snet[:], addr[:])
|
||||
snet[len(address_prefix)] |= 0x80
|
||||
return &snet
|
||||
}
|
||||
|
||||
func (a *address) getNodeIDandMask() (*NodeID, *NodeID) {
|
||||
// Mask is a bitmask to mark the bits visible from the address
|
||||
// This means truncated leading 1s, first leading 0, and visible part of addr
|
||||
var nid NodeID
|
||||
var mask NodeID
|
||||
ones := int(a[len(address_prefix)] & 0x7f)
|
||||
for idx := 0 ; idx < ones ; idx++ { nid[idx/8] |= 0x80 >> byte(idx % 8) }
|
||||
nidOffset := ones+1
|
||||
addrOffset := 8*len(address_prefix)+8
|
||||
for idx := addrOffset ; idx < 8*len(a) ; idx++ {
|
||||
bits := a[idx/8] & (0x80 >> byte(idx % 8))
|
||||
bits <<= byte(idx % 8)
|
||||
nidIdx := nidOffset + (idx - addrOffset)
|
||||
bits >>= byte(nidIdx % 8)
|
||||
nid[nidIdx/8] |= bits
|
||||
}
|
||||
maxMask := 8*(len(a) - len(address_prefix) - 1) + ones + 1
|
||||
for idx := 0 ; idx < maxMask ; idx++ { mask[idx/8] |= 0x80 >> byte(idx % 8) }
|
||||
return &nid, &mask
|
||||
// Mask is a bitmask to mark the bits visible from the address
|
||||
// This means truncated leading 1s, first leading 0, and visible part of addr
|
||||
var nid NodeID
|
||||
var mask NodeID
|
||||
ones := int(a[len(address_prefix)] & 0x7f)
|
||||
for idx := 0; idx < ones; idx++ {
|
||||
nid[idx/8] |= 0x80 >> byte(idx%8)
|
||||
}
|
||||
nidOffset := ones + 1
|
||||
addrOffset := 8*len(address_prefix) + 8
|
||||
for idx := addrOffset; idx < 8*len(a); idx++ {
|
||||
bits := a[idx/8] & (0x80 >> byte(idx%8))
|
||||
bits <<= byte(idx % 8)
|
||||
nidIdx := nidOffset + (idx - addrOffset)
|
||||
bits >>= byte(nidIdx % 8)
|
||||
nid[nidIdx/8] |= bits
|
||||
}
|
||||
maxMask := 8*(len(a)-len(address_prefix)-1) + ones + 1
|
||||
for idx := 0; idx < maxMask; idx++ {
|
||||
mask[idx/8] |= 0x80 >> byte(idx%8)
|
||||
}
|
||||
return &nid, &mask
|
||||
}
|
||||
|
||||
func (s *subnet) getNodeIDandMask() (*NodeID, *NodeID) {
|
||||
// As witht he address version, but visible parts of the subnet prefix instead
|
||||
var nid NodeID
|
||||
var mask NodeID
|
||||
ones := int(s[len(address_prefix)] & 0x7f)
|
||||
for idx := 0 ; idx < ones ; idx++ { nid[idx/8] |= 0x80 >> byte(idx % 8) }
|
||||
nidOffset := ones+1
|
||||
addrOffset := 8*len(address_prefix)+8
|
||||
for idx := addrOffset ; idx < 8*len(s) ; idx++ {
|
||||
bits := s[idx/8] & (0x80 >> byte(idx % 8))
|
||||
bits <<= byte(idx % 8)
|
||||
nidIdx := nidOffset + (idx - addrOffset)
|
||||
bits >>= byte(nidIdx % 8)
|
||||
nid[nidIdx/8] |= bits
|
||||
}
|
||||
maxMask := 8*(len(s) - len(address_prefix) - 1) + ones + 1
|
||||
for idx := 0 ; idx < maxMask ; idx++ { mask[idx/8] |= 0x80 >> byte(idx % 8) }
|
||||
return &nid, &mask
|
||||
// As witht he address version, but visible parts of the subnet prefix instead
|
||||
var nid NodeID
|
||||
var mask NodeID
|
||||
ones := int(s[len(address_prefix)] & 0x7f)
|
||||
for idx := 0; idx < ones; idx++ {
|
||||
nid[idx/8] |= 0x80 >> byte(idx%8)
|
||||
}
|
||||
nidOffset := ones + 1
|
||||
addrOffset := 8*len(address_prefix) + 8
|
||||
for idx := addrOffset; idx < 8*len(s); idx++ {
|
||||
bits := s[idx/8] & (0x80 >> byte(idx%8))
|
||||
bits <<= byte(idx % 8)
|
||||
nidIdx := nidOffset + (idx - addrOffset)
|
||||
bits >>= byte(nidIdx % 8)
|
||||
nid[nidIdx/8] |= bits
|
||||
}
|
||||
maxMask := 8*(len(s)-len(address_prefix)-1) + ones + 1
|
||||
for idx := 0; idx < maxMask; idx++ {
|
||||
mask[idx/8] |= 0x80 >> byte(idx%8)
|
||||
}
|
||||
return &nid, &mask
|
||||
}
|
||||
|
@ -4,61 +4,60 @@ import "io/ioutil"
|
||||
import "log"
|
||||
|
||||
type Core struct {
|
||||
// This is the main data structure that holds everything else for a node
|
||||
// TODO? move keys out of core and into something more appropriate
|
||||
// e.g. box keys live in sessions
|
||||
// sig keys live in peers or sigs (or wherever signing/validating logic is)
|
||||
boxPub boxPubKey
|
||||
boxPriv boxPrivKey
|
||||
sigPub sigPubKey
|
||||
sigPriv sigPrivKey
|
||||
switchTable switchTable
|
||||
peers peers
|
||||
sigs sigManager
|
||||
sessions sessions
|
||||
router router
|
||||
dht dht
|
||||
tun tunDevice
|
||||
searches searches
|
||||
tcp *tcpInterface
|
||||
udp *udpInterface
|
||||
log *log.Logger
|
||||
// This is the main data structure that holds everything else for a node
|
||||
// TODO? move keys out of core and into something more appropriate
|
||||
// e.g. box keys live in sessions
|
||||
// sig keys live in peers or sigs (or wherever signing/validating logic is)
|
||||
boxPub boxPubKey
|
||||
boxPriv boxPrivKey
|
||||
sigPub sigPubKey
|
||||
sigPriv sigPrivKey
|
||||
switchTable switchTable
|
||||
peers peers
|
||||
sigs sigManager
|
||||
sessions sessions
|
||||
router router
|
||||
dht dht
|
||||
tun tunDevice
|
||||
searches searches
|
||||
tcp *tcpInterface
|
||||
udp *udpInterface
|
||||
log *log.Logger
|
||||
}
|
||||
|
||||
func (c *Core) Init() {
|
||||
// Only called by the simulator, to set up nodes with random keys
|
||||
bpub, bpriv := newBoxKeys()
|
||||
spub, spriv := newSigKeys()
|
||||
c.init(bpub, bpriv, spub, spriv)
|
||||
// Only called by the simulator, to set up nodes with random keys
|
||||
bpub, bpriv := newBoxKeys()
|
||||
spub, spriv := newSigKeys()
|
||||
c.init(bpub, bpriv, spub, spriv)
|
||||
}
|
||||
|
||||
func (c *Core) init(bpub *boxPubKey,
|
||||
bpriv *boxPrivKey,
|
||||
spub *sigPubKey,
|
||||
spriv *sigPrivKey) {
|
||||
// TODO separate init and start functions
|
||||
// Init sets up structs
|
||||
// Start launches goroutines that depend on structs being set up
|
||||
// This is pretty much required to avoid race conditions
|
||||
util_initByteStore()
|
||||
c.log = log.New(ioutil.Discard, "", 0)
|
||||
c.boxPub, c.boxPriv = *bpub, *bpriv
|
||||
c.sigPub, c.sigPriv = *spub, *spriv
|
||||
c.sigs.init()
|
||||
c.searches.init(c)
|
||||
c.dht.init(c)
|
||||
c.sessions.init(c)
|
||||
c.peers.init(c)
|
||||
c.router.init(c)
|
||||
c.switchTable.init(c, c.sigPub) // TODO move before peers? before router?
|
||||
c.tun.init(c)
|
||||
bpriv *boxPrivKey,
|
||||
spub *sigPubKey,
|
||||
spriv *sigPrivKey) {
|
||||
// TODO separate init and start functions
|
||||
// Init sets up structs
|
||||
// Start launches goroutines that depend on structs being set up
|
||||
// This is pretty much required to avoid race conditions
|
||||
util_initByteStore()
|
||||
c.log = log.New(ioutil.Discard, "", 0)
|
||||
c.boxPub, c.boxPriv = *bpub, *bpriv
|
||||
c.sigPub, c.sigPriv = *spub, *spriv
|
||||
c.sigs.init()
|
||||
c.searches.init(c)
|
||||
c.dht.init(c)
|
||||
c.sessions.init(c)
|
||||
c.peers.init(c)
|
||||
c.router.init(c)
|
||||
c.switchTable.init(c, c.sigPub) // TODO move before peers? before router?
|
||||
c.tun.init(c)
|
||||
}
|
||||
|
||||
func (c *Core) GetNodeID() *NodeID {
|
||||
return getNodeID(&c.boxPub)
|
||||
return getNodeID(&c.boxPub)
|
||||
}
|
||||
|
||||
func (c *Core) GetTreeID() *TreeID {
|
||||
return getTreeID(&c.sigPub)
|
||||
return getTreeID(&c.sigPub)
|
||||
}
|
||||
|
||||
|
@ -28,20 +28,22 @@ type TreeID [TreeIDLen]byte
|
||||
type handle [handleLen]byte
|
||||
|
||||
func getNodeID(pub *boxPubKey) *NodeID {
|
||||
h := sha512.Sum512(pub[:])
|
||||
return (*NodeID)(&h)
|
||||
h := sha512.Sum512(pub[:])
|
||||
return (*NodeID)(&h)
|
||||
}
|
||||
|
||||
func getTreeID(pub *sigPubKey) *TreeID {
|
||||
h := sha512.Sum512(pub[:])
|
||||
return (*TreeID)(&h)
|
||||
h := sha512.Sum512(pub[:])
|
||||
return (*TreeID)(&h)
|
||||
}
|
||||
|
||||
func newHandle() *handle {
|
||||
var h handle
|
||||
_, err := rand.Read(h[:])
|
||||
if err != nil { panic(err) }
|
||||
return &h
|
||||
var h handle
|
||||
_, err := rand.Read(h[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &h
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
@ -57,26 +59,28 @@ type sigPrivKey [sigPrivKeyLen]byte
|
||||
type sigBytes [sigLen]byte
|
||||
|
||||
func newSigKeys() (*sigPubKey, *sigPrivKey) {
|
||||
var pub sigPubKey
|
||||
var priv sigPrivKey
|
||||
pubSlice, privSlice, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil { panic(err) }
|
||||
copy(pub[:], pubSlice)
|
||||
copy(priv[:], privSlice)
|
||||
return &pub, &priv
|
||||
var pub sigPubKey
|
||||
var priv sigPrivKey
|
||||
pubSlice, privSlice, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
copy(pub[:], pubSlice)
|
||||
copy(priv[:], privSlice)
|
||||
return &pub, &priv
|
||||
}
|
||||
|
||||
func sign(priv *sigPrivKey, msg []byte) *sigBytes {
|
||||
var sig sigBytes
|
||||
sigSlice := ed25519.Sign(priv[:], msg)
|
||||
copy(sig[:], sigSlice)
|
||||
return &sig
|
||||
var sig sigBytes
|
||||
sigSlice := ed25519.Sign(priv[:], msg)
|
||||
copy(sig[:], sigSlice)
|
||||
return &sig
|
||||
}
|
||||
|
||||
func verify(pub *sigPubKey, msg []byte, sig *sigBytes) bool {
|
||||
// Should sig be an array instead of a slice?...
|
||||
// It's fixed size, but
|
||||
return ed25519.Verify(pub[:], msg, sig[:])
|
||||
// Should sig be an array instead of a slice?...
|
||||
// It's fixed size, but
|
||||
return ed25519.Verify(pub[:], msg, sig[:])
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
@ -94,61 +98,68 @@ type boxSharedKey [boxSharedKeyLen]byte
|
||||
type boxNonce [boxNonceLen]byte
|
||||
|
||||
func newBoxKeys() (*boxPubKey, *boxPrivKey) {
|
||||
pubBytes, privBytes, err := box.GenerateKey(rand.Reader)
|
||||
if err != nil { panic(err) }
|
||||
pub := (*boxPubKey)(pubBytes)
|
||||
priv := (*boxPrivKey)(privBytes)
|
||||
return pub, priv
|
||||
pubBytes, privBytes, err := box.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pub := (*boxPubKey)(pubBytes)
|
||||
priv := (*boxPrivKey)(privBytes)
|
||||
return pub, priv
|
||||
}
|
||||
|
||||
func getSharedKey(myPrivKey *boxPrivKey,
|
||||
othersPubKey *boxPubKey) *boxSharedKey {
|
||||
var shared [boxSharedKeyLen]byte
|
||||
priv := (*[boxPrivKeyLen]byte)(myPrivKey)
|
||||
pub := (*[boxPubKeyLen]byte)(othersPubKey)
|
||||
box.Precompute(&shared, pub, priv)
|
||||
return (*boxSharedKey)(&shared)
|
||||
othersPubKey *boxPubKey) *boxSharedKey {
|
||||
var shared [boxSharedKeyLen]byte
|
||||
priv := (*[boxPrivKeyLen]byte)(myPrivKey)
|
||||
pub := (*[boxPubKeyLen]byte)(othersPubKey)
|
||||
box.Precompute(&shared, pub, priv)
|
||||
return (*boxSharedKey)(&shared)
|
||||
}
|
||||
|
||||
func boxOpen(shared *boxSharedKey,
|
||||
boxed []byte,
|
||||
nonce *boxNonce) ([]byte, bool) {
|
||||
out := util_getBytes()
|
||||
//return append(out, boxed...), true // XXX HACK to test without encryption
|
||||
s := (*[boxSharedKeyLen]byte)(shared)
|
||||
n := (*[boxNonceLen]byte)(nonce)
|
||||
unboxed, success := box.OpenAfterPrecomputation(out, boxed, n, s)
|
||||
return unboxed, success
|
||||
boxed []byte,
|
||||
nonce *boxNonce) ([]byte, bool) {
|
||||
out := util_getBytes()
|
||||
//return append(out, boxed...), true // XXX HACK to test without encryption
|
||||
s := (*[boxSharedKeyLen]byte)(shared)
|
||||
n := (*[boxNonceLen]byte)(nonce)
|
||||
unboxed, success := box.OpenAfterPrecomputation(out, boxed, n, s)
|
||||
return unboxed, success
|
||||
}
|
||||
|
||||
func boxSeal(shared *boxSharedKey, unboxed []byte, nonce *boxNonce) ([]byte, *boxNonce) {
|
||||
if nonce == nil { nonce = newBoxNonce() }
|
||||
nonce.update()
|
||||
out := util_getBytes()
|
||||
//return append(out, unboxed...), nonce // XXX HACK to test without encryption
|
||||
s := (*[boxSharedKeyLen]byte)(shared)
|
||||
n := (*[boxNonceLen]byte)(nonce)
|
||||
boxed := box.SealAfterPrecomputation(out, unboxed, n, s)
|
||||
return boxed, nonce
|
||||
if nonce == nil {
|
||||
nonce = newBoxNonce()
|
||||
}
|
||||
nonce.update()
|
||||
out := util_getBytes()
|
||||
//return append(out, unboxed...), nonce // XXX HACK to test without encryption
|
||||
s := (*[boxSharedKeyLen]byte)(shared)
|
||||
n := (*[boxNonceLen]byte)(nonce)
|
||||
boxed := box.SealAfterPrecomputation(out, unboxed, n, s)
|
||||
return boxed, nonce
|
||||
}
|
||||
|
||||
func newBoxNonce() *boxNonce {
|
||||
var nonce boxNonce
|
||||
_, err := rand.Read(nonce[:])
|
||||
for ; err == nil && nonce[0] == 0xff ; _, err = rand.Read(nonce[:]){
|
||||
// Make sure nonce isn't too high
|
||||
// This is just to make rollover unlikely to happen
|
||||
// Rollover is fine, but it may kill the session and force it to reopen
|
||||
}
|
||||
if err != nil { panic(err) }
|
||||
return &nonce
|
||||
var nonce boxNonce
|
||||
_, err := rand.Read(nonce[:])
|
||||
for ; err == nil && nonce[0] == 0xff; _, err = rand.Read(nonce[:]) {
|
||||
// Make sure nonce isn't too high
|
||||
// This is just to make rollover unlikely to happen
|
||||
// Rollover is fine, but it may kill the session and force it to reopen
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &nonce
|
||||
}
|
||||
|
||||
func (n *boxNonce) update() {
|
||||
oldNonce := *n
|
||||
n[len(n)-1] += 2
|
||||
for i := len(n)-2 ; i >= 0 ; i-- {
|
||||
if n[i+1] < oldNonce[i+1] { n[i] += 1 }
|
||||
}
|
||||
oldNonce := *n
|
||||
n[len(n)-1] += 2
|
||||
for i := len(n) - 2; i >= 0; i-- {
|
||||
if n[i+1] < oldNonce[i+1] {
|
||||
n[i] += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,32 +15,32 @@ import "log"
|
||||
// Core
|
||||
|
||||
func (c *Core) DEBUG_getSigPub() sigPubKey {
|
||||
return (sigPubKey)(c.sigPub)
|
||||
return (sigPubKey)(c.sigPub)
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getBoxPub() boxPubKey {
|
||||
return (boxPubKey)(c.boxPub)
|
||||
return (boxPubKey)(c.boxPub)
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getSend() (chan<- []byte) {
|
||||
return c.tun.send
|
||||
func (c *Core) DEBUG_getSend() chan<- []byte {
|
||||
return c.tun.send
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getRecv() (<-chan []byte) {
|
||||
return c.tun.recv
|
||||
func (c *Core) DEBUG_getRecv() <-chan []byte {
|
||||
return c.tun.recv
|
||||
}
|
||||
|
||||
// Peer
|
||||
|
||||
func (c *Core) DEBUG_getPeers() *peers {
|
||||
return &c.peers
|
||||
return &c.peers
|
||||
}
|
||||
|
||||
func (ps *peers) DEBUG_newPeer(box boxPubKey,
|
||||
sig sigPubKey) *peer {
|
||||
//in <-chan []byte,
|
||||
//out chan<- []byte) *peer {
|
||||
return ps.newPeer(&box, &sig)//, in, out)
|
||||
sig sigPubKey) *peer {
|
||||
//in <-chan []byte,
|
||||
//out chan<- []byte) *peer {
|
||||
return ps.newPeer(&box, &sig) //, in, out)
|
||||
}
|
||||
|
||||
/*
|
||||
@ -55,47 +55,51 @@ func (ps *peers) DEBUG_startPeers() {
|
||||
*/
|
||||
|
||||
func (ps *peers) DEBUG_hasPeer(key sigPubKey) bool {
|
||||
ports := ps.ports.Load().(map[switchPort]*peer)
|
||||
for _, p := range ports {
|
||||
if p == nil { continue }
|
||||
if p.sig == key { return true }
|
||||
}
|
||||
return false
|
||||
ports := ps.ports.Load().(map[switchPort]*peer)
|
||||
for _, p := range ports {
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
if p.sig == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ps *peers) DEBUG_getPorts() map[switchPort]*peer {
|
||||
ports := ps.ports.Load().(map[switchPort]*peer)
|
||||
newPeers := make(map[switchPort]*peer)
|
||||
for port, p := range ports{
|
||||
newPeers[port] = p
|
||||
}
|
||||
return newPeers
|
||||
ports := ps.ports.Load().(map[switchPort]*peer)
|
||||
newPeers := make(map[switchPort]*peer)
|
||||
for port, p := range ports {
|
||||
newPeers[port] = p
|
||||
}
|
||||
return newPeers
|
||||
}
|
||||
|
||||
func (p *peer) DEBUG_getSigKey() sigPubKey {
|
||||
return p.sig
|
||||
return p.sig
|
||||
}
|
||||
|
||||
func (p *peer) DEEBUG_getPort() switchPort {
|
||||
return p.port
|
||||
return p.port
|
||||
}
|
||||
|
||||
// Router
|
||||
|
||||
func (c *Core) DEBUG_getSwitchTable() *switchTable {
|
||||
return &c.switchTable
|
||||
return &c.switchTable
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getLocator() switchLocator {
|
||||
return c.switchTable.getLocator()
|
||||
return c.switchTable.getLocator()
|
||||
}
|
||||
|
||||
func (l *switchLocator) DEBUG_getCoords() []byte {
|
||||
return l.getCoords()
|
||||
return l.getCoords()
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_switchLookup(dest []byte, ttl uint64) (switchPort, uint64) {
|
||||
return c.switchTable.lookup(dest, ttl)
|
||||
return c.switchTable.lookup(dest, ttl)
|
||||
}
|
||||
|
||||
/*
|
||||
@ -109,45 +113,49 @@ func (t *switchTable) DEBUG_isDirty() bool {
|
||||
*/
|
||||
|
||||
func (t *switchTable) DEBUG_dumpTable() {
|
||||
//data := t.data.Load().(*tabledata)
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
data := t.data
|
||||
for _, peer := range data.peers {
|
||||
//fmt.Println("DUMPTABLE:", t.treeID, peer.treeID, peer.port,
|
||||
// peer.locator.Root, peer.coords,
|
||||
// peer.reverse.Root, peer.reverse.Coords, peer.forward)
|
||||
fmt.Println("DUMPTABLE:", t.key, peer.key, peer.locator.coords, peer.port/*, peer.forward*/)
|
||||
}
|
||||
//data := t.data.Load().(*tabledata)
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
data := t.data
|
||||
for _, peer := range data.peers {
|
||||
//fmt.Println("DUMPTABLE:", t.treeID, peer.treeID, peer.port,
|
||||
// peer.locator.Root, peer.coords,
|
||||
// peer.reverse.Root, peer.reverse.Coords, peer.forward)
|
||||
fmt.Println("DUMPTABLE:", t.key, peer.key, peer.locator.coords, peer.port /*, peer.forward*/)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *switchTable) DEBUG_getReversePort(port switchPort) switchPort {
|
||||
// Returns Port(0) if it cannot get the reverse peer for any reason
|
||||
//data := t.data.Load().(*tabledata)
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
data := t.data
|
||||
if port >= switchPort(len(data.peers)) { return switchPort(0) }
|
||||
pinfo := data.peers[port]
|
||||
if len(pinfo.locator.coords) < 1 { return switchPort(0) }
|
||||
return pinfo.locator.coords[len(pinfo.locator.coords)-1]
|
||||
// Returns Port(0) if it cannot get the reverse peer for any reason
|
||||
//data := t.data.Load().(*tabledata)
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
data := t.data
|
||||
if port >= switchPort(len(data.peers)) {
|
||||
return switchPort(0)
|
||||
}
|
||||
pinfo := data.peers[port]
|
||||
if len(pinfo.locator.coords) < 1 {
|
||||
return switchPort(0)
|
||||
}
|
||||
return pinfo.locator.coords[len(pinfo.locator.coords)-1]
|
||||
}
|
||||
|
||||
// Wire
|
||||
|
||||
func DEBUG_wire_encode_coords(coords []byte) []byte {
|
||||
return wire_encode_coords(coords)
|
||||
return wire_encode_coords(coords)
|
||||
}
|
||||
|
||||
// DHT, via core
|
||||
|
||||
func (c *Core) DEBUG_getDHTSize() int {
|
||||
total := 0
|
||||
for bidx := 0 ; bidx < c.dht.nBuckets() ; bidx++ {
|
||||
b := c.dht.getBucket(bidx)
|
||||
total += len(b.infos)
|
||||
}
|
||||
return total
|
||||
total := 0
|
||||
for bidx := 0; bidx < c.dht.nBuckets(); bidx++ {
|
||||
b := c.dht.getBucket(bidx)
|
||||
total += len(b.infos)
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// udpInterface
|
||||
@ -193,99 +201,104 @@ func (c *Core) DEBUG_startLoopbackUDPInterface() {
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (c *Core) DEBUG_getAddr() *address {
|
||||
return address_addrForNodeID(&c.dht.nodeID)
|
||||
return address_addrForNodeID(&c.dht.nodeID)
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_startTun() {
|
||||
c.DEBUG_startTunWithMTU(1280)
|
||||
func (c *Core) DEBUG_startTun(ifname string) {
|
||||
c.DEBUG_startTunWithMTU(ifname, 1280)
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_startTunWithMTU(mtu int) {
|
||||
addr := c.DEBUG_getAddr()
|
||||
straddr := fmt.Sprintf("%s/%v", net.IP(addr[:]).String(), 8*len(address_prefix))
|
||||
err := c.tun.setup(straddr, mtu)
|
||||
if err != nil { panic(err) }
|
||||
go c.tun.read()
|
||||
go c.tun.write()
|
||||
func (c *Core) DEBUG_startTunWithMTU(ifname string, mtu int) {
|
||||
addr := c.DEBUG_getAddr()
|
||||
straddr := fmt.Sprintf("%s/%v", net.IP(addr[:]).String(), 8*len(address_prefix))
|
||||
err := c.tun.setup(ifname, straddr, mtu)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go c.tun.read()
|
||||
go c.tun.write()
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_stopTun() {
|
||||
c.tun.close()
|
||||
c.tun.close()
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (c *Core) DEBUG_newBoxKeys() (*boxPubKey, *boxPrivKey) {
|
||||
return newBoxKeys()
|
||||
return newBoxKeys()
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_newSigKeys() (*sigPubKey, *sigPrivKey) {
|
||||
return newSigKeys()
|
||||
return newSigKeys()
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getNodeID(pub *boxPubKey) *NodeID {
|
||||
return getNodeID(pub)
|
||||
return getNodeID(pub)
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getTreeID(pub *sigPubKey) *TreeID {
|
||||
return getTreeID(pub)
|
||||
return getTreeID(pub)
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_addrForNodeID(nodeID *NodeID) string {
|
||||
return net.IP(address_addrForNodeID(nodeID)[:]).String()
|
||||
return net.IP(address_addrForNodeID(nodeID)[:]).String()
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_init(bpub []byte,
|
||||
bpriv []byte,
|
||||
spub []byte,
|
||||
spriv []byte) {
|
||||
var boxPub boxPubKey
|
||||
var boxPriv boxPrivKey
|
||||
var sigPub sigPubKey
|
||||
var sigPriv sigPrivKey
|
||||
copy(boxPub[:], bpub)
|
||||
copy(boxPriv[:], bpriv)
|
||||
copy(sigPub[:], spub)
|
||||
copy(sigPriv[:], spriv)
|
||||
c.init(&boxPub, &boxPriv, &sigPub, &sigPriv)
|
||||
bpriv []byte,
|
||||
spub []byte,
|
||||
spriv []byte) {
|
||||
var boxPub boxPubKey
|
||||
var boxPriv boxPrivKey
|
||||
var sigPub sigPubKey
|
||||
var sigPriv sigPrivKey
|
||||
copy(boxPub[:], bpub)
|
||||
copy(boxPriv[:], bpriv)
|
||||
copy(sigPub[:], spub)
|
||||
copy(sigPriv[:], spriv)
|
||||
c.init(&boxPub, &boxPriv, &sigPub, &sigPriv)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (c *Core) DEBUG_setupAndStartGlobalUDPInterface(addrport string) {
|
||||
iface := udpInterface{}
|
||||
iface.init(c, addrport)
|
||||
c.udp = &iface
|
||||
iface := udpInterface{}
|
||||
iface.init(c, addrport)
|
||||
c.udp = &iface
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getGlobalUDPAddr() net.Addr {
|
||||
return c.udp.sock.LocalAddr()
|
||||
return c.udp.sock.LocalAddr()
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_maybeSendUDPKeys(saddr string) {
|
||||
addr := connAddr(saddr)
|
||||
c.udp.mutex.RLock()
|
||||
_, isIn := c.udp.conns[connAddr(addr)]
|
||||
c.udp.mutex.RUnlock()
|
||||
if !isIn { c.udp.sendKeys(addr) }
|
||||
addr := connAddr(saddr)
|
||||
c.udp.mutex.RLock()
|
||||
_, isIn := c.udp.conns[connAddr(addr)]
|
||||
c.udp.mutex.RUnlock()
|
||||
if !isIn {
|
||||
c.udp.sendKeys(addr)
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
//*
|
||||
func (c *Core) DEBUG_setupAndStartGlobalTCPInterface(addrport string) {
|
||||
iface := tcpInterface{}
|
||||
iface.init(c, addrport)
|
||||
c.tcp = &iface
|
||||
iface := tcpInterface{}
|
||||
iface.init(c, addrport)
|
||||
c.tcp = &iface
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getGlobalTCPAddr() *net.TCPAddr {
|
||||
return c.tcp.serv.Addr().(*net.TCPAddr)
|
||||
return c.tcp.serv.Addr().(*net.TCPAddr)
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_addTCPConn(saddr string) {
|
||||
c.tcp.call(saddr)
|
||||
c.tcp.call(saddr)
|
||||
}
|
||||
|
||||
//*/
|
||||
|
||||
/*
|
||||
@ -318,22 +331,21 @@ func (c *Core) DEBUG_addKCPConn(saddr string) {
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (c *Core) DEBUG_setLogger(log *log.Logger) {
|
||||
c.log = log
|
||||
c.log = log
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func DEBUG_simLinkPeers(p, q *peer) {
|
||||
// Sets q.out() to point to p and starts p.linkLoop()
|
||||
plinkIn := make(chan []byte, 1)
|
||||
qlinkIn := make(chan []byte, 1)
|
||||
p.out = func(bs []byte) {
|
||||
go q.handlePacket(bs, qlinkIn)
|
||||
}
|
||||
q.out = func(bs []byte) {
|
||||
go p.handlePacket(bs, plinkIn)
|
||||
}
|
||||
go p.linkLoop(plinkIn)
|
||||
go q.linkLoop(qlinkIn)
|
||||
// Sets q.out() to point to p and starts p.linkLoop()
|
||||
plinkIn := make(chan []byte, 1)
|
||||
qlinkIn := make(chan []byte, 1)
|
||||
p.out = func(bs []byte) {
|
||||
go q.handlePacket(bs, qlinkIn)
|
||||
}
|
||||
q.out = func(bs []byte) {
|
||||
go p.handlePacket(bs, plinkIn)
|
||||
}
|
||||
go p.linkLoop(plinkIn)
|
||||
go q.linkLoop(qlinkIn)
|
||||
}
|
||||
|
||||
|
@ -24,360 +24,420 @@ Slight changes *do* make it blackhole hard, bootstrapping isn't an easy problem
|
||||
|
||||
import "sort"
|
||||
import "time"
|
||||
|
||||
//import "fmt"
|
||||
|
||||
// Maximum size for buckets and lookups
|
||||
// Exception for buckets if the next one is non-full
|
||||
const dht_bucket_size = 2 // This should be at least 2
|
||||
const dht_lookup_size = 2 // This should be at least 1, below 2 is impractical
|
||||
const dht_bucket_number = 8*NodeIDLen // This shouldn't be changed
|
||||
const dht_bucket_size = 2 // This should be at least 2
|
||||
const dht_lookup_size = 2 // This should be at least 1, below 2 is impractical
|
||||
const dht_bucket_number = 8 * NodeIDLen // This shouldn't be changed
|
||||
|
||||
type dhtInfo struct {
|
||||
// TODO save their nodeID so we don't need to rehash if we need it again
|
||||
nodeID_hidden *NodeID
|
||||
key boxPubKey
|
||||
coords []byte
|
||||
send time.Time // When we last sent a message
|
||||
recv time.Time // When we last received a message
|
||||
pings int // Decide when to drop
|
||||
// TODO save their nodeID so we don't need to rehash if we need it again
|
||||
nodeID_hidden *NodeID
|
||||
key boxPubKey
|
||||
coords []byte
|
||||
send time.Time // When we last sent a message
|
||||
recv time.Time // When we last received a message
|
||||
pings int // Decide when to drop
|
||||
}
|
||||
|
||||
func (info *dhtInfo) getNodeID() *NodeID {
|
||||
if info.nodeID_hidden == nil {
|
||||
info.nodeID_hidden = getNodeID(&info.key)
|
||||
}
|
||||
return info.nodeID_hidden
|
||||
if info.nodeID_hidden == nil {
|
||||
info.nodeID_hidden = getNodeID(&info.key)
|
||||
}
|
||||
return info.nodeID_hidden
|
||||
}
|
||||
|
||||
type bucket struct {
|
||||
infos []*dhtInfo
|
||||
infos []*dhtInfo
|
||||
}
|
||||
|
||||
type dhtReq struct {
|
||||
key boxPubKey // Key of whoever asked
|
||||
coords []byte // Coords of whoever asked
|
||||
dest NodeID // NodeID they're asking about
|
||||
key boxPubKey // Key of whoever asked
|
||||
coords []byte // Coords of whoever asked
|
||||
dest NodeID // NodeID they're asking about
|
||||
}
|
||||
|
||||
type dhtRes struct {
|
||||
key boxPubKey // key to respond to
|
||||
coords []byte // coords to respond to
|
||||
dest NodeID
|
||||
infos []*dhtInfo // response
|
||||
key boxPubKey // key to respond to
|
||||
coords []byte // coords to respond to
|
||||
dest NodeID
|
||||
infos []*dhtInfo // response
|
||||
}
|
||||
|
||||
type dht struct {
|
||||
core *Core
|
||||
nodeID NodeID
|
||||
buckets_hidden [dht_bucket_number]bucket // Extra is for the self-bucket
|
||||
peers chan *dhtInfo // other goroutines put incoming dht updates here
|
||||
reqs map[boxPubKey]map[NodeID]time.Time
|
||||
offset int
|
||||
core *Core
|
||||
nodeID NodeID
|
||||
buckets_hidden [dht_bucket_number]bucket // Extra is for the self-bucket
|
||||
peers chan *dhtInfo // other goroutines put incoming dht updates here
|
||||
reqs map[boxPubKey]map[NodeID]time.Time
|
||||
offset int
|
||||
}
|
||||
|
||||
func (t *dht) init(c *Core) {
|
||||
t.core = c
|
||||
t.nodeID = *t.core.GetNodeID()
|
||||
t.peers = make(chan *dhtInfo, 1)
|
||||
t.reqs = make(map[boxPubKey]map[NodeID]time.Time)
|
||||
t.core = c
|
||||
t.nodeID = *t.core.GetNodeID()
|
||||
t.peers = make(chan *dhtInfo, 1)
|
||||
t.reqs = make(map[boxPubKey]map[NodeID]time.Time)
|
||||
}
|
||||
|
||||
func (t *dht) handleReq(req *dhtReq) {
|
||||
// Send them what they asked for
|
||||
loc := t.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
res := dhtRes{
|
||||
key: t.core.boxPub,
|
||||
coords: coords,
|
||||
dest: req.dest,
|
||||
infos: t.lookup(&req.dest),
|
||||
}
|
||||
t.sendRes(&res, req)
|
||||
// Also (possibly) add them to our DHT
|
||||
info := dhtInfo{
|
||||
key: req.key,
|
||||
coords: req.coords,
|
||||
}
|
||||
t.insertIfNew(&info) // This seems DoSable (we just trust their coords...)
|
||||
//if req.dest != t.nodeID { t.ping(&info, info.getNodeID()) } // Or spam...
|
||||
// Send them what they asked for
|
||||
loc := t.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
res := dhtRes{
|
||||
key: t.core.boxPub,
|
||||
coords: coords,
|
||||
dest: req.dest,
|
||||
infos: t.lookup(&req.dest),
|
||||
}
|
||||
t.sendRes(&res, req)
|
||||
// Also (possibly) add them to our DHT
|
||||
info := dhtInfo{
|
||||
key: req.key,
|
||||
coords: req.coords,
|
||||
}
|
||||
t.insertIfNew(&info) // This seems DoSable (we just trust their coords...)
|
||||
//if req.dest != t.nodeID { t.ping(&info, info.getNodeID()) } // Or spam...
|
||||
}
|
||||
|
||||
func (t *dht) handleRes(res *dhtRes) {
|
||||
reqs, isIn := t.reqs[res.key]
|
||||
if !isIn { return }
|
||||
_, isIn = reqs[res.dest]
|
||||
if !isIn { return }
|
||||
rinfo := dhtInfo{
|
||||
key: res.key,
|
||||
coords: res.coords,
|
||||
send: time.Now(), // Technically wrong but should be OK... FIXME or not
|
||||
recv: time.Now(),
|
||||
}
|
||||
// If they're already in the table, then keep the correct send time
|
||||
bidx, isOK := t.getBucketIndex(rinfo.getNodeID())
|
||||
if !isOK { return }
|
||||
b := t.getBucket(bidx)
|
||||
for _, oldinfo := range b.infos {
|
||||
if oldinfo.key == rinfo.key {rinfo.send = oldinfo.send }
|
||||
}
|
||||
// Insert into table
|
||||
t.insert(&rinfo)
|
||||
if res.dest == *rinfo.getNodeID() { return } // No infinite recursions
|
||||
// ping the nodes we were told about
|
||||
if len(res.infos) > dht_lookup_size {
|
||||
// Ignore any "extra" lookup results
|
||||
res.infos = res.infos[:dht_lookup_size]
|
||||
}
|
||||
for _, info := range res.infos {
|
||||
bidx, isOK := t.getBucketIndex(info.getNodeID())
|
||||
if !isOK { continue }
|
||||
b := t.getBucket(bidx)
|
||||
if b.contains(info) { continue } // wait for maintenance cycle to get them
|
||||
t.ping(info, info.getNodeID())
|
||||
}
|
||||
reqs, isIn := t.reqs[res.key]
|
||||
if !isIn {
|
||||
return
|
||||
}
|
||||
_, isIn = reqs[res.dest]
|
||||
if !isIn {
|
||||
return
|
||||
}
|
||||
rinfo := dhtInfo{
|
||||
key: res.key,
|
||||
coords: res.coords,
|
||||
send: time.Now(), // Technically wrong but should be OK... FIXME or not
|
||||
recv: time.Now(),
|
||||
}
|
||||
// If they're already in the table, then keep the correct send time
|
||||
bidx, isOK := t.getBucketIndex(rinfo.getNodeID())
|
||||
if !isOK {
|
||||
return
|
||||
}
|
||||
b := t.getBucket(bidx)
|
||||
for _, oldinfo := range b.infos {
|
||||
if oldinfo.key == rinfo.key {
|
||||
rinfo.send = oldinfo.send
|
||||
}
|
||||
}
|
||||
// Insert into table
|
||||
t.insert(&rinfo)
|
||||
if res.dest == *rinfo.getNodeID() {
|
||||
return
|
||||
} // No infinite recursions
|
||||
// ping the nodes we were told about
|
||||
if len(res.infos) > dht_lookup_size {
|
||||
// Ignore any "extra" lookup results
|
||||
res.infos = res.infos[:dht_lookup_size]
|
||||
}
|
||||
for _, info := range res.infos {
|
||||
bidx, isOK := t.getBucketIndex(info.getNodeID())
|
||||
if !isOK {
|
||||
continue
|
||||
}
|
||||
b := t.getBucket(bidx)
|
||||
if b.contains(info) {
|
||||
continue
|
||||
} // wait for maintenance cycle to get them
|
||||
t.ping(info, info.getNodeID())
|
||||
}
|
||||
}
|
||||
|
||||
func (t *dht) lookup(nodeID *NodeID) []*dhtInfo {
|
||||
// FIXME this allocates a bunch, sorts, and keeps the part it likes
|
||||
// It would be better to only track the part it likes to begin with
|
||||
addInfos := func (res []*dhtInfo, infos []*dhtInfo) ([]*dhtInfo) {
|
||||
for _, info := range infos {
|
||||
if info == nil { panic ("Should never happen!") }
|
||||
if true || dht_firstCloserThanThird(info.getNodeID(), nodeID, &t.nodeID) {
|
||||
res = append(res, info)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
var res []*dhtInfo
|
||||
for bidx := 0 ; bidx < t.nBuckets() ; bidx++ {
|
||||
b := t.getBucket(bidx)
|
||||
res = addInfos(res, b.infos)
|
||||
}
|
||||
doSort := func(infos []*dhtInfo) {
|
||||
less := func (i, j int) bool {
|
||||
return dht_firstCloserThanThird(infos[i].getNodeID(),
|
||||
nodeID,
|
||||
infos[j].getNodeID())
|
||||
}
|
||||
sort.SliceStable(infos, less)
|
||||
}
|
||||
doSort(res)
|
||||
if len(res) > dht_lookup_size { res = res[:dht_lookup_size] }
|
||||
return res
|
||||
// FIXME this allocates a bunch, sorts, and keeps the part it likes
|
||||
// It would be better to only track the part it likes to begin with
|
||||
addInfos := func(res []*dhtInfo, infos []*dhtInfo) []*dhtInfo {
|
||||
for _, info := range infos {
|
||||
if info == nil {
|
||||
panic("Should never happen!")
|
||||
}
|
||||
if true || dht_firstCloserThanThird(info.getNodeID(), nodeID, &t.nodeID) {
|
||||
res = append(res, info)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
var res []*dhtInfo
|
||||
for bidx := 0; bidx < t.nBuckets(); bidx++ {
|
||||
b := t.getBucket(bidx)
|
||||
res = addInfos(res, b.infos)
|
||||
}
|
||||
doSort := func(infos []*dhtInfo) {
|
||||
less := func(i, j int) bool {
|
||||
return dht_firstCloserThanThird(infos[i].getNodeID(),
|
||||
nodeID,
|
||||
infos[j].getNodeID())
|
||||
}
|
||||
sort.SliceStable(infos, less)
|
||||
}
|
||||
doSort(res)
|
||||
if len(res) > dht_lookup_size {
|
||||
res = res[:dht_lookup_size]
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (t *dht) getBucket(bidx int) *bucket {
|
||||
return &t.buckets_hidden[bidx]
|
||||
return &t.buckets_hidden[bidx]
|
||||
}
|
||||
|
||||
func (t *dht) nBuckets() int {
|
||||
return len(t.buckets_hidden)
|
||||
return len(t.buckets_hidden)
|
||||
}
|
||||
|
||||
func (t *dht) insertIfNew(info *dhtInfo) {
|
||||
//fmt.Println("DEBUG: dht insertIfNew:", info.getNodeID(), info.coords)
|
||||
// Insert a peer if and only if the bucket doesn't already contain it
|
||||
nodeID := info.getNodeID()
|
||||
bidx, isOK := t.getBucketIndex(nodeID)
|
||||
if !isOK { return }
|
||||
b := t.getBucket(bidx)
|
||||
if !b.contains(info) {
|
||||
// We've never heard this node before
|
||||
// TODO is there a better time than "now" to set send/recv to?
|
||||
// (Is there another "natural" choice that bootstraps faster?)
|
||||
info.send = time.Now()
|
||||
info.recv = info.send
|
||||
t.insert(info)
|
||||
}
|
||||
//fmt.Println("DEBUG: dht insertIfNew:", info.getNodeID(), info.coords)
|
||||
// Insert a peer if and only if the bucket doesn't already contain it
|
||||
nodeID := info.getNodeID()
|
||||
bidx, isOK := t.getBucketIndex(nodeID)
|
||||
if !isOK {
|
||||
return
|
||||
}
|
||||
b := t.getBucket(bidx)
|
||||
if !b.contains(info) {
|
||||
// We've never heard this node before
|
||||
// TODO is there a better time than "now" to set send/recv to?
|
||||
// (Is there another "natural" choice that bootstraps faster?)
|
||||
info.send = time.Now()
|
||||
info.recv = info.send
|
||||
t.insert(info)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *dht) insert(info *dhtInfo) {
|
||||
//fmt.Println("DEBUG: dht insert:", info.getNodeID(), info.coords)
|
||||
// First update the time on this info
|
||||
info.recv = time.Now()
|
||||
// Get the bucket for this node
|
||||
nodeID := info.getNodeID()
|
||||
bidx, isOK := t.getBucketIndex(nodeID)
|
||||
if !isOK { return }
|
||||
b := t.getBucket(bidx)
|
||||
// First drop any existing entry from the bucket
|
||||
b.drop(&info.key)
|
||||
// Now add to the *end* of the bucket
|
||||
b.infos = append(b.infos, info)
|
||||
// Check if the next bucket is non-full and return early if it is
|
||||
if bidx+1 == t.nBuckets() { return }
|
||||
bnext := t.getBucket(bidx+1)
|
||||
if len(bnext.infos) < dht_bucket_size { return }
|
||||
// Shrink from the *front* to requied size
|
||||
for len(b.infos) > dht_bucket_size { b.infos = b.infos[1:] }
|
||||
//fmt.Println("DEBUG: dht insert:", info.getNodeID(), info.coords)
|
||||
// First update the time on this info
|
||||
info.recv = time.Now()
|
||||
// Get the bucket for this node
|
||||
nodeID := info.getNodeID()
|
||||
bidx, isOK := t.getBucketIndex(nodeID)
|
||||
if !isOK {
|
||||
return
|
||||
}
|
||||
b := t.getBucket(bidx)
|
||||
// First drop any existing entry from the bucket
|
||||
b.drop(&info.key)
|
||||
// Now add to the *end* of the bucket
|
||||
b.infos = append(b.infos, info)
|
||||
// Check if the next bucket is non-full and return early if it is
|
||||
if bidx+1 == t.nBuckets() {
|
||||
return
|
||||
}
|
||||
bnext := t.getBucket(bidx + 1)
|
||||
if len(bnext.infos) < dht_bucket_size {
|
||||
return
|
||||
}
|
||||
// Shrink from the *front* to requied size
|
||||
for len(b.infos) > dht_bucket_size {
|
||||
b.infos = b.infos[1:]
|
||||
}
|
||||
}
|
||||
|
||||
func (t *dht) getBucketIndex(nodeID *NodeID) (int, bool) {
|
||||
for bidx := 0 ; bidx < t.nBuckets() ; bidx++ {
|
||||
them := nodeID[bidx/8] & (0x80 >> byte(bidx % 8))
|
||||
me := t.nodeID[bidx/8] & (0x80 >> byte(bidx % 8))
|
||||
if them != me { return bidx, true }
|
||||
}
|
||||
return t.nBuckets(), false
|
||||
for bidx := 0; bidx < t.nBuckets(); bidx++ {
|
||||
them := nodeID[bidx/8] & (0x80 >> byte(bidx%8))
|
||||
me := t.nodeID[bidx/8] & (0x80 >> byte(bidx%8))
|
||||
if them != me {
|
||||
return bidx, true
|
||||
}
|
||||
}
|
||||
return t.nBuckets(), false
|
||||
}
|
||||
|
||||
func (b *bucket) contains(ninfo *dhtInfo) bool {
|
||||
// Compares if key and coords match
|
||||
for _, info := range b.infos {
|
||||
if info == nil { panic("Should never happen") }
|
||||
if info.key == ninfo.key {
|
||||
if len(info.coords) != len(ninfo.coords) { return false }
|
||||
for idx := 0 ; idx < len(info.coords) ; idx++ {
|
||||
if info.coords[idx] != ninfo.coords[idx] { return false }
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
// Compares if key and coords match
|
||||
for _, info := range b.infos {
|
||||
if info == nil {
|
||||
panic("Should never happen")
|
||||
}
|
||||
if info.key == ninfo.key {
|
||||
if len(info.coords) != len(ninfo.coords) {
|
||||
return false
|
||||
}
|
||||
for idx := 0; idx < len(info.coords); idx++ {
|
||||
if info.coords[idx] != ninfo.coords[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *bucket) drop(key *boxPubKey) {
|
||||
clean := func (infos []*dhtInfo) []*dhtInfo {
|
||||
cleaned := infos[:0]
|
||||
for _, info := range infos {
|
||||
if info.key == *key { continue }
|
||||
cleaned = append(cleaned, info)
|
||||
}
|
||||
return cleaned
|
||||
}
|
||||
b.infos = clean(b.infos)
|
||||
clean := func(infos []*dhtInfo) []*dhtInfo {
|
||||
cleaned := infos[:0]
|
||||
for _, info := range infos {
|
||||
if info.key == *key {
|
||||
continue
|
||||
}
|
||||
cleaned = append(cleaned, info)
|
||||
}
|
||||
return cleaned
|
||||
}
|
||||
b.infos = clean(b.infos)
|
||||
}
|
||||
|
||||
func (t *dht) sendReq(req *dhtReq, dest *dhtInfo) {
|
||||
// Send a dhtReq to the node in dhtInfo
|
||||
bs := req.encode()
|
||||
shared := t.core.sessions.getSharedKey(&t.core.boxPriv, &dest.key)
|
||||
payload, nonce := boxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: dest.coords,
|
||||
toKey: dest.key,
|
||||
fromKey: t.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload:payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
t.core.router.out(packet)
|
||||
reqsToDest, isIn := t.reqs[dest.key]
|
||||
if !isIn {
|
||||
t.reqs[dest.key] = make(map[NodeID]time.Time)
|
||||
reqsToDest, isIn = t.reqs[dest.key]
|
||||
if !isIn { panic("This should never happen") }
|
||||
}
|
||||
reqsToDest[req.dest] = time.Now()
|
||||
// Send a dhtReq to the node in dhtInfo
|
||||
bs := req.encode()
|
||||
shared := t.core.sessions.getSharedKey(&t.core.boxPriv, &dest.key)
|
||||
payload, nonce := boxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: dest.coords,
|
||||
toKey: dest.key,
|
||||
fromKey: t.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
t.core.router.out(packet)
|
||||
reqsToDest, isIn := t.reqs[dest.key]
|
||||
if !isIn {
|
||||
t.reqs[dest.key] = make(map[NodeID]time.Time)
|
||||
reqsToDest, isIn = t.reqs[dest.key]
|
||||
if !isIn {
|
||||
panic("This should never happen")
|
||||
}
|
||||
}
|
||||
reqsToDest[req.dest] = time.Now()
|
||||
}
|
||||
|
||||
func (t *dht) sendRes(res *dhtRes, req *dhtReq) {
|
||||
// Send a reply for a dhtReq
|
||||
bs := res.encode()
|
||||
shared := t.core.sessions.getSharedKey(&t.core.boxPriv, &req.key)
|
||||
payload, nonce := boxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: req.coords,
|
||||
toKey: req.key,
|
||||
fromKey: t.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
t.core.router.out(packet)
|
||||
// Send a reply for a dhtReq
|
||||
bs := res.encode()
|
||||
shared := t.core.sessions.getSharedKey(&t.core.boxPriv, &req.key)
|
||||
payload, nonce := boxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: req.coords,
|
||||
toKey: req.key,
|
||||
fromKey: t.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
t.core.router.out(packet)
|
||||
}
|
||||
|
||||
func (b *bucket) isEmpty() bool {
|
||||
return len(b.infos) == 0
|
||||
return len(b.infos) == 0
|
||||
}
|
||||
|
||||
func (b *bucket) nextToPing() *dhtInfo {
|
||||
// Check the nodes in the bucket
|
||||
// Return whichever one responded least recently
|
||||
// Delay of 6 seconds between pinging the same node
|
||||
// Gives them time to respond
|
||||
// And time between traffic loss from short term congestion in the network
|
||||
var toPing *dhtInfo
|
||||
for _, next := range b.infos {
|
||||
if time.Since(next.send) < 6*time.Second { continue }
|
||||
if toPing == nil || next.recv.Before(toPing.recv) { toPing = next }
|
||||
}
|
||||
return toPing
|
||||
// Check the nodes in the bucket
|
||||
// Return whichever one responded least recently
|
||||
// Delay of 6 seconds between pinging the same node
|
||||
// Gives them time to respond
|
||||
// And time between traffic loss from short term congestion in the network
|
||||
var toPing *dhtInfo
|
||||
for _, next := range b.infos {
|
||||
if time.Since(next.send) < 6*time.Second {
|
||||
continue
|
||||
}
|
||||
if toPing == nil || next.recv.Before(toPing.recv) {
|
||||
toPing = next
|
||||
}
|
||||
}
|
||||
return toPing
|
||||
}
|
||||
|
||||
func (t *dht) getTarget(bidx int) *NodeID {
|
||||
targetID := t.nodeID
|
||||
targetID[bidx/8] ^= 0x80 >> byte(bidx % 8)
|
||||
return &targetID
|
||||
targetID := t.nodeID
|
||||
targetID[bidx/8] ^= 0x80 >> byte(bidx%8)
|
||||
return &targetID
|
||||
}
|
||||
|
||||
func (t *dht) ping(info *dhtInfo, target *NodeID) {
|
||||
if info.pings > 2 {
|
||||
bidx, isOK := t.getBucketIndex(info.getNodeID())
|
||||
if !isOK { panic("This should never happen") }
|
||||
b := t.getBucket(bidx)
|
||||
b.drop(&info.key)
|
||||
return
|
||||
}
|
||||
if target == nil { target = &t.nodeID }
|
||||
loc := t.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
req := dhtReq{
|
||||
key: t.core.boxPub,
|
||||
coords: coords,
|
||||
dest: *target,
|
||||
}
|
||||
info.pings++
|
||||
info.send = time.Now()
|
||||
t.sendReq(&req, info)
|
||||
if info.pings > 2 {
|
||||
bidx, isOK := t.getBucketIndex(info.getNodeID())
|
||||
if !isOK {
|
||||
panic("This should never happen")
|
||||
}
|
||||
b := t.getBucket(bidx)
|
||||
b.drop(&info.key)
|
||||
return
|
||||
}
|
||||
if target == nil {
|
||||
target = &t.nodeID
|
||||
}
|
||||
loc := t.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
req := dhtReq{
|
||||
key: t.core.boxPub,
|
||||
coords: coords,
|
||||
dest: *target,
|
||||
}
|
||||
info.pings++
|
||||
info.send = time.Now()
|
||||
t.sendReq(&req, info)
|
||||
}
|
||||
|
||||
func (t *dht) doMaintenance() {
|
||||
// First clean up reqs
|
||||
for key, reqs := range t.reqs {
|
||||
for target, timeout := range reqs {
|
||||
if time.Since(timeout) > time.Minute { delete(reqs, target) }
|
||||
}
|
||||
if len(reqs) == 0 { delete(t.reqs, key) }
|
||||
}
|
||||
// Ping the least recently contacted node
|
||||
// This is to make sure we eventually notice when someone times out
|
||||
var oldest *dhtInfo
|
||||
last := 0
|
||||
for bidx := 0 ; bidx < t.nBuckets() ; bidx++ {
|
||||
b := t.getBucket(bidx)
|
||||
if !b.isEmpty() {
|
||||
last = bidx
|
||||
toPing := b.nextToPing()
|
||||
if toPing == nil { continue } // We've recently pinged everyone in b
|
||||
if oldest == nil || toPing.recv.Before(oldest.recv) {
|
||||
oldest = toPing
|
||||
}
|
||||
}
|
||||
}
|
||||
if oldest != nil { t.ping(oldest, nil) } // if the DHT isn't empty
|
||||
// Refresh buckets
|
||||
if t.offset > last { t.offset = 0 }
|
||||
target := t.getTarget(t.offset)
|
||||
for _, info := range t.lookup(target) {
|
||||
t.ping(info, target)
|
||||
break
|
||||
}
|
||||
t.offset++
|
||||
// First clean up reqs
|
||||
for key, reqs := range t.reqs {
|
||||
for target, timeout := range reqs {
|
||||
if time.Since(timeout) > time.Minute {
|
||||
delete(reqs, target)
|
||||
}
|
||||
}
|
||||
if len(reqs) == 0 {
|
||||
delete(t.reqs, key)
|
||||
}
|
||||
}
|
||||
// Ping the least recently contacted node
|
||||
// This is to make sure we eventually notice when someone times out
|
||||
var oldest *dhtInfo
|
||||
last := 0
|
||||
for bidx := 0; bidx < t.nBuckets(); bidx++ {
|
||||
b := t.getBucket(bidx)
|
||||
if !b.isEmpty() {
|
||||
last = bidx
|
||||
toPing := b.nextToPing()
|
||||
if toPing == nil {
|
||||
continue
|
||||
} // We've recently pinged everyone in b
|
||||
if oldest == nil || toPing.recv.Before(oldest.recv) {
|
||||
oldest = toPing
|
||||
}
|
||||
}
|
||||
}
|
||||
if oldest != nil {
|
||||
t.ping(oldest, nil)
|
||||
} // if the DHT isn't empty
|
||||
// Refresh buckets
|
||||
if t.offset > last {
|
||||
t.offset = 0
|
||||
}
|
||||
target := t.getTarget(t.offset)
|
||||
for _, info := range t.lookup(target) {
|
||||
t.ping(info, target)
|
||||
break
|
||||
}
|
||||
t.offset++
|
||||
}
|
||||
|
||||
func dht_firstCloserThanThird(first *NodeID,
|
||||
second *NodeID,
|
||||
third *NodeID) bool {
|
||||
for idx := 0 ; idx < NodeIDLen ; idx++ {
|
||||
f := first[idx] ^ second[idx]
|
||||
t := third[idx] ^ second[idx]
|
||||
if f == t { continue }
|
||||
return f < t
|
||||
}
|
||||
return false
|
||||
second *NodeID,
|
||||
third *NodeID) bool {
|
||||
for idx := 0; idx < NodeIDLen; idx++ {
|
||||
f := first[idx] ^ second[idx]
|
||||
t := third[idx] ^ second[idx]
|
||||
if f == t {
|
||||
continue
|
||||
}
|
||||
return f < t
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -11,335 +11,424 @@ import "time"
|
||||
import "sync"
|
||||
import "sync/atomic"
|
||||
import "math"
|
||||
|
||||
//import "fmt"
|
||||
|
||||
type peers struct {
|
||||
core *Core
|
||||
mutex sync.Mutex // Synchronize writes to atomic
|
||||
ports atomic.Value //map[Port]*peer, use CoW semantics
|
||||
//ports map[Port]*peer
|
||||
core *Core
|
||||
mutex sync.Mutex // Synchronize writes to atomic
|
||||
ports atomic.Value //map[Port]*peer, use CoW semantics
|
||||
//ports map[Port]*peer
|
||||
}
|
||||
|
||||
func (ps *peers) init(c *Core) {
|
||||
ps.mutex.Lock()
|
||||
defer ps.mutex.Unlock()
|
||||
ps.putPorts(make(map[switchPort]*peer))
|
||||
ps.core = c
|
||||
ps.mutex.Lock()
|
||||
defer ps.mutex.Unlock()
|
||||
ps.putPorts(make(map[switchPort]*peer))
|
||||
ps.core = c
|
||||
}
|
||||
|
||||
func (ps *peers) getPorts() map[switchPort]*peer {
|
||||
return ps.ports.Load().(map[switchPort]*peer)
|
||||
return ps.ports.Load().(map[switchPort]*peer)
|
||||
}
|
||||
|
||||
func (ps *peers) putPorts(ports map[switchPort]*peer) {
|
||||
ps.ports.Store(ports)
|
||||
ps.ports.Store(ports)
|
||||
}
|
||||
|
||||
type peer struct {
|
||||
// Rolling approximation of bandwidth, in bps, used by switch, updated by tcp
|
||||
// use get/update methods only! (atomic accessors as float64)
|
||||
bandwidth uint64
|
||||
// BUG: sync/atomic, 32 bit platforms need the above to be the first element
|
||||
box boxPubKey
|
||||
sig sigPubKey
|
||||
shared boxSharedKey
|
||||
//in <-chan []byte
|
||||
//out chan<- []byte
|
||||
//in func([]byte)
|
||||
out func([]byte)
|
||||
core *Core
|
||||
port switchPort
|
||||
msgAnc *msgAnnounce
|
||||
msgHops []*msgHop
|
||||
myMsg *switchMessage
|
||||
mySigs []sigInfo
|
||||
// This is used to limit how often we perform expensive operations
|
||||
// Specifically, processing switch messages, signing, and verifying sigs
|
||||
// Resets at the start of each tick
|
||||
throttle uint8
|
||||
// Rolling approximation of bandwidth, in bps, used by switch, updated by tcp
|
||||
// use get/update methods only! (atomic accessors as float64)
|
||||
bandwidth uint64
|
||||
// BUG: sync/atomic, 32 bit platforms need the above to be the first element
|
||||
box boxPubKey
|
||||
sig sigPubKey
|
||||
shared boxSharedKey
|
||||
//in <-chan []byte
|
||||
//out chan<- []byte
|
||||
//in func([]byte)
|
||||
out func([]byte)
|
||||
core *Core
|
||||
port switchPort
|
||||
msgAnc *msgAnnounce
|
||||
msgHops []*msgHop
|
||||
myMsg *switchMessage
|
||||
mySigs []sigInfo
|
||||
// This is used to limit how often we perform expensive operations
|
||||
// Specifically, processing switch messages, signing, and verifying sigs
|
||||
// Resets at the start of each tick
|
||||
throttle uint8
|
||||
}
|
||||
|
||||
const peer_Throttle = 1
|
||||
|
||||
func (p *peer) getBandwidth() float64 {
|
||||
bits := atomic.LoadUint64(&p.bandwidth)
|
||||
return math.Float64frombits(bits)
|
||||
bits := atomic.LoadUint64(&p.bandwidth)
|
||||
return math.Float64frombits(bits)
|
||||
}
|
||||
|
||||
func (p *peer) updateBandwidth(bytes int, duration time.Duration) {
|
||||
if p == nil { return }
|
||||
for ok := false ; !ok ; {
|
||||
oldBits := atomic.LoadUint64(&p.bandwidth)
|
||||
oldBandwidth := math.Float64frombits(oldBits)
|
||||
bandwidth := oldBandwidth * 7 / 8 + float64(bytes)/duration.Seconds()
|
||||
bits := math.Float64bits(bandwidth)
|
||||
ok = atomic.CompareAndSwapUint64(&p.bandwidth, oldBits, bits)
|
||||
}
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
for ok := false; !ok; {
|
||||
oldBits := atomic.LoadUint64(&p.bandwidth)
|
||||
oldBandwidth := math.Float64frombits(oldBits)
|
||||
bandwidth := oldBandwidth*7/8 + float64(bytes)/duration.Seconds()
|
||||
bits := math.Float64bits(bandwidth)
|
||||
ok = atomic.CompareAndSwapUint64(&p.bandwidth, oldBits, bits)
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *peers) newPeer(box *boxPubKey,
|
||||
sig *sigPubKey) *peer {
|
||||
//in <-chan []byte,
|
||||
//out chan<- []byte) *peer {
|
||||
p := peer{box: *box,
|
||||
sig: *sig,
|
||||
shared: *getSharedKey(&ps.core.boxPriv, box),
|
||||
//in: in,
|
||||
//out: out,
|
||||
core: ps.core}
|
||||
ps.mutex.Lock()
|
||||
defer ps.mutex.Unlock()
|
||||
oldPorts := ps.getPorts()
|
||||
newPorts := make(map[switchPort]*peer)
|
||||
for k,v := range oldPorts{ newPorts[k] = v }
|
||||
for idx := switchPort(0) ; true ; idx++ {
|
||||
if _, isIn := newPorts[idx]; !isIn {
|
||||
p.port = switchPort(idx)
|
||||
newPorts[p.port] = &p
|
||||
break
|
||||
}
|
||||
}
|
||||
ps.putPorts(newPorts)
|
||||
return &p
|
||||
sig *sigPubKey) *peer {
|
||||
//in <-chan []byte,
|
||||
//out chan<- []byte) *peer {
|
||||
p := peer{box: *box,
|
||||
sig: *sig,
|
||||
shared: *getSharedKey(&ps.core.boxPriv, box),
|
||||
//in: in,
|
||||
//out: out,
|
||||
core: ps.core}
|
||||
ps.mutex.Lock()
|
||||
defer ps.mutex.Unlock()
|
||||
oldPorts := ps.getPorts()
|
||||
newPorts := make(map[switchPort]*peer)
|
||||
for k, v := range oldPorts {
|
||||
newPorts[k] = v
|
||||
}
|
||||
for idx := switchPort(0); true; idx++ {
|
||||
if _, isIn := newPorts[idx]; !isIn {
|
||||
p.port = switchPort(idx)
|
||||
newPorts[p.port] = &p
|
||||
break
|
||||
}
|
||||
}
|
||||
ps.putPorts(newPorts)
|
||||
return &p
|
||||
}
|
||||
|
||||
func (p *peer) linkLoop(in <-chan []byte) {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case packet, ok := <-in:
|
||||
if !ok { return }
|
||||
p.handleLinkTraffic(packet)
|
||||
case <-ticker.C: {
|
||||
p.throttle = 0
|
||||
if p.port == 0 { continue } // Don't send announces on selfInterface
|
||||
// Maybe we shouldn't time out, and instead wait for a kill signal?
|
||||
p.myMsg, p.mySigs = p.core.switchTable.createMessage(p.port)
|
||||
p.sendSwitchAnnounce()
|
||||
}
|
||||
}
|
||||
}
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case packet, ok := <-in:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
p.handleLinkTraffic(packet)
|
||||
case <-ticker.C:
|
||||
{
|
||||
p.throttle = 0
|
||||
if p.port == 0 {
|
||||
continue
|
||||
} // Don't send announces on selfInterface
|
||||
// Maybe we shouldn't time out, and instead wait for a kill signal?
|
||||
p.myMsg, p.mySigs = p.core.switchTable.createMessage(p.port)
|
||||
p.sendSwitchAnnounce()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *peer) handlePacket(packet []byte, linkIn (chan<- []byte)) {
|
||||
pType, pTypeLen := wire_decode_uint64(packet)
|
||||
if pTypeLen==0 { return }
|
||||
switch (pType) {
|
||||
case wire_Traffic: p.handleTraffic(packet, pTypeLen)
|
||||
case wire_ProtocolTraffic: p.handleTraffic(packet, pTypeLen)
|
||||
case wire_LinkProtocolTraffic: {
|
||||
select {
|
||||
case linkIn<-packet:
|
||||
default:
|
||||
}
|
||||
}
|
||||
default: /*panic(pType) ;*/ return
|
||||
}
|
||||
func (p *peer) handlePacket(packet []byte, linkIn chan<- []byte) {
|
||||
pType, pTypeLen := wire_decode_uint64(packet)
|
||||
if pTypeLen == 0 {
|
||||
return
|
||||
}
|
||||
switch pType {
|
||||
case wire_Traffic:
|
||||
p.handleTraffic(packet, pTypeLen)
|
||||
case wire_ProtocolTraffic:
|
||||
p.handleTraffic(packet, pTypeLen)
|
||||
case wire_LinkProtocolTraffic:
|
||||
{
|
||||
select {
|
||||
case linkIn <- packet:
|
||||
default:
|
||||
}
|
||||
}
|
||||
default: /*panic(pType) ;*/
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (p *peer) handleTraffic(packet []byte, pTypeLen int) {
|
||||
ttl, ttlLen := wire_decode_uint64(packet[pTypeLen:])
|
||||
ttlBegin := pTypeLen
|
||||
ttlEnd := pTypeLen+ttlLen
|
||||
coords, coordLen := wire_decode_coords(packet[ttlEnd:])
|
||||
coordEnd := ttlEnd+coordLen
|
||||
if coordEnd == len(packet) { return } // No payload
|
||||
toPort, newTTL := p.core.switchTable.lookup(coords, ttl)
|
||||
if toPort == p.port { return } // FIXME? shouldn't happen, does it? would loop
|
||||
to := p.core.peers.getPorts()[toPort]
|
||||
if to == nil { return }
|
||||
newTTLSlice := wire_encode_uint64(newTTL)
|
||||
// This mutates the packet in-place if the length of the TTL changes!
|
||||
shift := ttlLen - len(newTTLSlice)
|
||||
copy(packet[ttlBegin+shift:], newTTLSlice)
|
||||
copy(packet[shift:], packet[:pTypeLen])
|
||||
packet = packet[shift:]
|
||||
to.sendPacket(packet)
|
||||
ttl, ttlLen := wire_decode_uint64(packet[pTypeLen:])
|
||||
ttlBegin := pTypeLen
|
||||
ttlEnd := pTypeLen + ttlLen
|
||||
coords, coordLen := wire_decode_coords(packet[ttlEnd:])
|
||||
coordEnd := ttlEnd + coordLen
|
||||
if coordEnd == len(packet) {
|
||||
return
|
||||
} // No payload
|
||||
toPort, newTTL := p.core.switchTable.lookup(coords, ttl)
|
||||
if toPort == p.port {
|
||||
return
|
||||
} // FIXME? shouldn't happen, does it? would loop
|
||||
to := p.core.peers.getPorts()[toPort]
|
||||
if to == nil {
|
||||
return
|
||||
}
|
||||
newTTLSlice := wire_encode_uint64(newTTL)
|
||||
// This mutates the packet in-place if the length of the TTL changes!
|
||||
shift := ttlLen - len(newTTLSlice)
|
||||
copy(packet[ttlBegin+shift:], newTTLSlice)
|
||||
copy(packet[shift:], packet[:pTypeLen])
|
||||
packet = packet[shift:]
|
||||
to.sendPacket(packet)
|
||||
}
|
||||
|
||||
func (p *peer) sendPacket(packet []byte) {
|
||||
// Is there ever a case where something more complicated is needed?
|
||||
// What if p.out blocks?
|
||||
p.out(packet)
|
||||
// Is there ever a case where something more complicated is needed?
|
||||
// What if p.out blocks?
|
||||
p.out(packet)
|
||||
}
|
||||
|
||||
func (p *peer) sendLinkPacket(packet []byte) {
|
||||
bs, nonce := boxSeal(&p.shared, packet, nil)
|
||||
linkPacket := wire_linkProtoTrafficPacket{
|
||||
toKey: p.box,
|
||||
fromKey: p.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload: bs,
|
||||
}
|
||||
packet = linkPacket.encode()
|
||||
p.sendPacket(packet)
|
||||
bs, nonce := boxSeal(&p.shared, packet, nil)
|
||||
linkPacket := wire_linkProtoTrafficPacket{
|
||||
toKey: p.box,
|
||||
fromKey: p.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload: bs,
|
||||
}
|
||||
packet = linkPacket.encode()
|
||||
p.sendPacket(packet)
|
||||
}
|
||||
|
||||
func (p *peer) handleLinkTraffic(bs []byte) {
|
||||
packet := wire_linkProtoTrafficPacket{}
|
||||
// TODO throttle on returns?
|
||||
if !packet.decode(bs) { return }
|
||||
if packet.toKey != p.core.boxPub { return }
|
||||
if packet.fromKey != p.box { return }
|
||||
payload, isOK := boxOpen(&p.shared, packet.payload, &packet.nonce)
|
||||
if !isOK { return }
|
||||
pType, pTypeLen := wire_decode_uint64(payload)
|
||||
if pTypeLen == 0 { return }
|
||||
switch pType {
|
||||
case wire_SwitchAnnounce: p.handleSwitchAnnounce(payload)
|
||||
case wire_SwitchHopRequest: p.handleSwitchHopRequest(payload)
|
||||
case wire_SwitchHop: p.handleSwitchHop(payload)
|
||||
}
|
||||
packet := wire_linkProtoTrafficPacket{}
|
||||
// TODO throttle on returns?
|
||||
if !packet.decode(bs) {
|
||||
return
|
||||
}
|
||||
if packet.toKey != p.core.boxPub {
|
||||
return
|
||||
}
|
||||
if packet.fromKey != p.box {
|
||||
return
|
||||
}
|
||||
payload, isOK := boxOpen(&p.shared, packet.payload, &packet.nonce)
|
||||
if !isOK {
|
||||
return
|
||||
}
|
||||
pType, pTypeLen := wire_decode_uint64(payload)
|
||||
if pTypeLen == 0 {
|
||||
return
|
||||
}
|
||||
switch pType {
|
||||
case wire_SwitchAnnounce:
|
||||
p.handleSwitchAnnounce(payload)
|
||||
case wire_SwitchHopRequest:
|
||||
p.handleSwitchHopRequest(payload)
|
||||
case wire_SwitchHop:
|
||||
p.handleSwitchHop(payload)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *peer) handleSwitchAnnounce(packet []byte) {
|
||||
//p.core.log.Println("DEBUG: handleSwitchAnnounce")
|
||||
anc := msgAnnounce{}
|
||||
//err := wire_decode_struct(packet, &anc)
|
||||
//if err != nil { return }
|
||||
if !anc.decode(packet) { return }
|
||||
//if p.msgAnc != nil && anc.Seq != p.msgAnc.Seq { p.msgHops = nil }
|
||||
if p.msgAnc == nil ||
|
||||
anc.root != p.msgAnc.root ||
|
||||
anc.tstamp != p.msgAnc.tstamp ||
|
||||
anc.seq != p.msgAnc.seq { p.msgHops = nil }
|
||||
p.msgAnc = &anc
|
||||
p.processSwitchMessage()
|
||||
//p.core.log.Println("DEBUG: handleSwitchAnnounce")
|
||||
anc := msgAnnounce{}
|
||||
//err := wire_decode_struct(packet, &anc)
|
||||
//if err != nil { return }
|
||||
if !anc.decode(packet) {
|
||||
return
|
||||
}
|
||||
//if p.msgAnc != nil && anc.Seq != p.msgAnc.Seq { p.msgHops = nil }
|
||||
if p.msgAnc == nil ||
|
||||
anc.root != p.msgAnc.root ||
|
||||
anc.tstamp != p.msgAnc.tstamp ||
|
||||
anc.seq != p.msgAnc.seq {
|
||||
p.msgHops = nil
|
||||
}
|
||||
p.msgAnc = &anc
|
||||
p.processSwitchMessage()
|
||||
}
|
||||
|
||||
func (p *peer) requestHop(hop uint64) {
|
||||
//p.core.log.Println("DEBUG requestHop")
|
||||
req := msgHopReq{}
|
||||
req.root = p.msgAnc.root
|
||||
req.tstamp = p.msgAnc.tstamp
|
||||
req.seq = p.msgAnc.seq
|
||||
req.hop = hop
|
||||
packet := req.encode()
|
||||
p.sendLinkPacket(packet)
|
||||
//p.core.log.Println("DEBUG requestHop")
|
||||
req := msgHopReq{}
|
||||
req.root = p.msgAnc.root
|
||||
req.tstamp = p.msgAnc.tstamp
|
||||
req.seq = p.msgAnc.seq
|
||||
req.hop = hop
|
||||
packet := req.encode()
|
||||
p.sendLinkPacket(packet)
|
||||
}
|
||||
|
||||
func (p *peer) handleSwitchHopRequest(packet []byte) {
|
||||
//p.core.log.Println("DEBUG: handleSwitchHopRequest")
|
||||
if p.throttle > peer_Throttle { return }
|
||||
if p.myMsg == nil { return }
|
||||
req := msgHopReq{}
|
||||
if !req.decode(packet) { return }
|
||||
if req.root != p.myMsg.locator.root { return }
|
||||
if req.tstamp != p.myMsg.locator.tstamp { return }
|
||||
if req.seq != p.myMsg.seq { return }
|
||||
if uint64(len(p.myMsg.locator.coords)) <= req.hop { return }
|
||||
res := msgHop{}
|
||||
res.root = p.myMsg.locator.root
|
||||
res.tstamp = p.myMsg.locator.tstamp
|
||||
res.seq = p.myMsg.seq
|
||||
res.hop = req.hop
|
||||
res.port = p.myMsg.locator.coords[res.hop]
|
||||
sinfo := p.getSig(res.hop)
|
||||
//p.core.log.Println("DEBUG sig:", sinfo)
|
||||
res.next = sinfo.next
|
||||
res.sig = sinfo.sig
|
||||
packet = res.encode()
|
||||
p.sendLinkPacket(packet)
|
||||
//p.core.log.Println("DEBUG: handleSwitchHopRequest")
|
||||
if p.throttle > peer_Throttle {
|
||||
return
|
||||
}
|
||||
if p.myMsg == nil {
|
||||
return
|
||||
}
|
||||
req := msgHopReq{}
|
||||
if !req.decode(packet) {
|
||||
return
|
||||
}
|
||||
if req.root != p.myMsg.locator.root {
|
||||
return
|
||||
}
|
||||
if req.tstamp != p.myMsg.locator.tstamp {
|
||||
return
|
||||
}
|
||||
if req.seq != p.myMsg.seq {
|
||||
return
|
||||
}
|
||||
if uint64(len(p.myMsg.locator.coords)) <= req.hop {
|
||||
return
|
||||
}
|
||||
res := msgHop{}
|
||||
res.root = p.myMsg.locator.root
|
||||
res.tstamp = p.myMsg.locator.tstamp
|
||||
res.seq = p.myMsg.seq
|
||||
res.hop = req.hop
|
||||
res.port = p.myMsg.locator.coords[res.hop]
|
||||
sinfo := p.getSig(res.hop)
|
||||
//p.core.log.Println("DEBUG sig:", sinfo)
|
||||
res.next = sinfo.next
|
||||
res.sig = sinfo.sig
|
||||
packet = res.encode()
|
||||
p.sendLinkPacket(packet)
|
||||
}
|
||||
|
||||
func (p *peer) handleSwitchHop(packet []byte) {
|
||||
//p.core.log.Println("DEBUG: handleSwitchHop")
|
||||
if p.throttle > peer_Throttle { return }
|
||||
if p.msgAnc == nil { return }
|
||||
res := msgHop{}
|
||||
if !res.decode(packet) { return }
|
||||
if res.root != p.msgAnc.root { return }
|
||||
if res.tstamp != p.msgAnc.tstamp { return }
|
||||
if res.seq != p.msgAnc.seq { return }
|
||||
if res.hop != uint64(len(p.msgHops)) { return } // always process in order
|
||||
loc := switchLocator{coords: make([]switchPort, 0, len(p.msgHops)+1)}
|
||||
loc.root = res.root
|
||||
loc.tstamp = res.tstamp
|
||||
for _, hop := range p.msgHops { loc.coords = append(loc.coords, hop.port) }
|
||||
loc.coords = append(loc.coords, res.port)
|
||||
thisHopKey := &res.root
|
||||
if res.hop != 0 { thisHopKey = &p.msgHops[res.hop-1].next }
|
||||
bs := getBytesForSig(&res.next, &loc)
|
||||
if p.core.sigs.check(thisHopKey, &res.sig, bs) {
|
||||
p.msgHops = append(p.msgHops, &res)
|
||||
p.processSwitchMessage()
|
||||
} else {
|
||||
p.throttle++
|
||||
}
|
||||
//p.core.log.Println("DEBUG: handleSwitchHop")
|
||||
if p.throttle > peer_Throttle {
|
||||
return
|
||||
}
|
||||
if p.msgAnc == nil {
|
||||
return
|
||||
}
|
||||
res := msgHop{}
|
||||
if !res.decode(packet) {
|
||||
return
|
||||
}
|
||||
if res.root != p.msgAnc.root {
|
||||
return
|
||||
}
|
||||
if res.tstamp != p.msgAnc.tstamp {
|
||||
return
|
||||
}
|
||||
if res.seq != p.msgAnc.seq {
|
||||
return
|
||||
}
|
||||
if res.hop != uint64(len(p.msgHops)) {
|
||||
return
|
||||
} // always process in order
|
||||
loc := switchLocator{coords: make([]switchPort, 0, len(p.msgHops)+1)}
|
||||
loc.root = res.root
|
||||
loc.tstamp = res.tstamp
|
||||
for _, hop := range p.msgHops {
|
||||
loc.coords = append(loc.coords, hop.port)
|
||||
}
|
||||
loc.coords = append(loc.coords, res.port)
|
||||
thisHopKey := &res.root
|
||||
if res.hop != 0 {
|
||||
thisHopKey = &p.msgHops[res.hop-1].next
|
||||
}
|
||||
bs := getBytesForSig(&res.next, &loc)
|
||||
if p.core.sigs.check(thisHopKey, &res.sig, bs) {
|
||||
p.msgHops = append(p.msgHops, &res)
|
||||
p.processSwitchMessage()
|
||||
} else {
|
||||
p.throttle++
|
||||
}
|
||||
}
|
||||
|
||||
func (p *peer) processSwitchMessage() {
|
||||
//p.core.log.Println("DEBUG: processSwitchMessage")
|
||||
if p.throttle > peer_Throttle { return }
|
||||
if p.msgAnc == nil { return }
|
||||
if uint64(len(p.msgHops)) < p.msgAnc.len {
|
||||
p.requestHop(uint64(len(p.msgHops)))
|
||||
return
|
||||
}
|
||||
p.throttle++
|
||||
if p.msgAnc.len != uint64(len(p.msgHops)) { return }
|
||||
msg := switchMessage{}
|
||||
coords := make([]switchPort, 0, len(p.msgHops))
|
||||
sigs := make([]sigInfo, 0, len(p.msgHops))
|
||||
for idx, hop := range p.msgHops {
|
||||
// Consistency checks, should be redundant (already checked these...)
|
||||
if hop.root != p.msgAnc.root { return }
|
||||
if hop.tstamp != p.msgAnc.tstamp { return }
|
||||
if hop.seq != p.msgAnc.seq { return }
|
||||
if hop.hop != uint64(idx) { return }
|
||||
coords = append(coords, hop.port)
|
||||
sigs = append(sigs, sigInfo{next: hop.next, sig: hop.sig})
|
||||
}
|
||||
msg.from = p.sig
|
||||
msg.locator.root = p.msgAnc.root
|
||||
msg.locator.tstamp = p.msgAnc.tstamp
|
||||
msg.locator.coords = coords
|
||||
msg.seq = p.msgAnc.seq
|
||||
//msg.RSeq = p.msgAnc.RSeq
|
||||
//msg.Degree = p.msgAnc.Deg
|
||||
p.core.switchTable.handleMessage(&msg, p.port, sigs)
|
||||
if len(coords) == 0 { return }
|
||||
// Reuse locator, set the coords to the peer's coords, to use in dht
|
||||
msg.locator.coords = coords[:len(coords)-1]
|
||||
// Pass a mesage to the dht informing it that this peer (still) exists
|
||||
dinfo := dhtInfo{
|
||||
key: p.box,
|
||||
coords: msg.locator.getCoords(),
|
||||
}
|
||||
p.core.dht.peers<-&dinfo
|
||||
//p.core.log.Println("DEBUG: processSwitchMessage")
|
||||
if p.throttle > peer_Throttle {
|
||||
return
|
||||
}
|
||||
if p.msgAnc == nil {
|
||||
return
|
||||
}
|
||||
if uint64(len(p.msgHops)) < p.msgAnc.len {
|
||||
p.requestHop(uint64(len(p.msgHops)))
|
||||
return
|
||||
}
|
||||
p.throttle++
|
||||
if p.msgAnc.len != uint64(len(p.msgHops)) {
|
||||
return
|
||||
}
|
||||
msg := switchMessage{}
|
||||
coords := make([]switchPort, 0, len(p.msgHops))
|
||||
sigs := make([]sigInfo, 0, len(p.msgHops))
|
||||
for idx, hop := range p.msgHops {
|
||||
// Consistency checks, should be redundant (already checked these...)
|
||||
if hop.root != p.msgAnc.root {
|
||||
return
|
||||
}
|
||||
if hop.tstamp != p.msgAnc.tstamp {
|
||||
return
|
||||
}
|
||||
if hop.seq != p.msgAnc.seq {
|
||||
return
|
||||
}
|
||||
if hop.hop != uint64(idx) {
|
||||
return
|
||||
}
|
||||
coords = append(coords, hop.port)
|
||||
sigs = append(sigs, sigInfo{next: hop.next, sig: hop.sig})
|
||||
}
|
||||
msg.from = p.sig
|
||||
msg.locator.root = p.msgAnc.root
|
||||
msg.locator.tstamp = p.msgAnc.tstamp
|
||||
msg.locator.coords = coords
|
||||
msg.seq = p.msgAnc.seq
|
||||
//msg.RSeq = p.msgAnc.RSeq
|
||||
//msg.Degree = p.msgAnc.Deg
|
||||
p.core.switchTable.handleMessage(&msg, p.port, sigs)
|
||||
if len(coords) == 0 {
|
||||
return
|
||||
}
|
||||
// Reuse locator, set the coords to the peer's coords, to use in dht
|
||||
msg.locator.coords = coords[:len(coords)-1]
|
||||
// Pass a mesage to the dht informing it that this peer (still) exists
|
||||
dinfo := dhtInfo{
|
||||
key: p.box,
|
||||
coords: msg.locator.getCoords(),
|
||||
}
|
||||
p.core.dht.peers <- &dinfo
|
||||
}
|
||||
|
||||
func (p *peer) sendSwitchAnnounce() {
|
||||
anc := msgAnnounce{}
|
||||
anc.root = p.myMsg.locator.root
|
||||
anc.tstamp = p.myMsg.locator.tstamp
|
||||
anc.seq = p.myMsg.seq
|
||||
anc.len = uint64(len(p.myMsg.locator.coords))
|
||||
//anc.Deg = p.myMsg.Degree
|
||||
//anc.RSeq = p.myMsg.RSeq
|
||||
packet := anc.encode()
|
||||
p.sendLinkPacket(packet)
|
||||
anc := msgAnnounce{}
|
||||
anc.root = p.myMsg.locator.root
|
||||
anc.tstamp = p.myMsg.locator.tstamp
|
||||
anc.seq = p.myMsg.seq
|
||||
anc.len = uint64(len(p.myMsg.locator.coords))
|
||||
//anc.Deg = p.myMsg.Degree
|
||||
//anc.RSeq = p.myMsg.RSeq
|
||||
packet := anc.encode()
|
||||
p.sendLinkPacket(packet)
|
||||
}
|
||||
|
||||
func (p *peer) getSig(hop uint64) sigInfo {
|
||||
//p.core.log.Println("DEBUG getSig:", len(p.mySigs), hop)
|
||||
if hop < uint64(len(p.mySigs)) { return p.mySigs[hop] }
|
||||
bs := getBytesForSig(&p.sig, &p.myMsg.locator)
|
||||
sig := sigInfo{}
|
||||
sig.next = p.sig
|
||||
sig.sig = *sign(&p.core.sigPriv, bs)
|
||||
p.mySigs = append(p.mySigs, sig)
|
||||
//p.core.log.Println("DEBUG sig bs:", bs)
|
||||
return sig
|
||||
//p.core.log.Println("DEBUG getSig:", len(p.mySigs), hop)
|
||||
if hop < uint64(len(p.mySigs)) {
|
||||
return p.mySigs[hop]
|
||||
}
|
||||
bs := getBytesForSig(&p.sig, &p.myMsg.locator)
|
||||
sig := sigInfo{}
|
||||
sig.next = p.sig
|
||||
sig.sig = *sign(&p.core.sigPriv, bs)
|
||||
p.mySigs = append(p.mySigs, sig)
|
||||
//p.core.log.Println("DEBUG sig bs:", bs)
|
||||
return sig
|
||||
}
|
||||
|
||||
func getBytesForSig(next *sigPubKey, loc *switchLocator) []byte {
|
||||
//bs, err := wire_encode_locator(loc)
|
||||
//if err != nil { panic(err) }
|
||||
bs := append([]byte(nil), next[:]...)
|
||||
bs = append(bs, wire_encode_locator(loc)...)
|
||||
//bs := wire_encode_locator(loc)
|
||||
//bs = append(next[:], bs...)
|
||||
return bs
|
||||
//bs, err := wire_encode_locator(loc)
|
||||
//if err != nil { panic(err) }
|
||||
bs := append([]byte(nil), next[:]...)
|
||||
bs = append(bs, wire_encode_locator(loc)...)
|
||||
//bs := wire_encode_locator(loc)
|
||||
//bs = append(next[:], bs...)
|
||||
return bs
|
||||
}
|
||||
|
||||
|
@ -23,198 +23,267 @@ package yggdrasil
|
||||
// The router then runs some sanity checks before passing it to the tun
|
||||
|
||||
import "time"
|
||||
|
||||
//import "fmt"
|
||||
//import "net"
|
||||
|
||||
type router struct {
|
||||
core *Core
|
||||
addr address
|
||||
in <-chan []byte // packets we received from the network, link to peer's "out"
|
||||
out func([]byte) // packets we're sending to the network, link to peer's "in"
|
||||
recv chan<- []byte // place where the tun pulls received packets from
|
||||
send <-chan []byte // place where the tun puts outgoing packets
|
||||
reset chan struct{} // signal that coords changed (re-init sessions/dht)
|
||||
core *Core
|
||||
addr address
|
||||
in <-chan []byte // packets we received from the network, link to peer's "out"
|
||||
out func([]byte) // packets we're sending to the network, link to peer's "in"
|
||||
recv chan<- []byte // place where the tun pulls received packets from
|
||||
send <-chan []byte // place where the tun puts outgoing packets
|
||||
reset chan struct{} // signal that coords changed (re-init sessions/dht)
|
||||
}
|
||||
|
||||
func (r *router) init(core *Core) {
|
||||
r.core = core
|
||||
r.addr = *address_addrForNodeID(&r.core.dht.nodeID)
|
||||
in := make(chan []byte, 1) // TODO something better than this...
|
||||
p := r.core.peers.newPeer(&r.core.boxPub, &r.core.sigPub)//, out, in)
|
||||
// TODO set in/out functions on the new peer...
|
||||
p.out = func(packet []byte) { in<-packet } // FIXME in theory it blocks...
|
||||
r.in = in
|
||||
// TODO? make caller responsible for go-ing if it needs to not block
|
||||
r.out = func(packet []byte) { p.handlePacket(packet, nil) }
|
||||
// TODO attach these to the tun
|
||||
// Maybe that's the core's job...
|
||||
// It creates tun, creates the router, creates channels, sets them?
|
||||
recv := make(chan []byte, 1)
|
||||
send := make(chan []byte, 1)
|
||||
r.recv = recv
|
||||
r.send = send
|
||||
r.core.tun.recv = recv
|
||||
r.core.tun.send = send
|
||||
r.reset = make(chan struct{}, 1)
|
||||
go r.mainLoop()
|
||||
r.core = core
|
||||
r.addr = *address_addrForNodeID(&r.core.dht.nodeID)
|
||||
in := make(chan []byte, 1) // TODO something better than this...
|
||||
p := r.core.peers.newPeer(&r.core.boxPub, &r.core.sigPub) //, out, in)
|
||||
// TODO set in/out functions on the new peer...
|
||||
p.out = func(packet []byte) { in <- packet } // FIXME in theory it blocks...
|
||||
r.in = in
|
||||
// TODO? make caller responsible for go-ing if it needs to not block
|
||||
r.out = func(packet []byte) { p.handlePacket(packet, nil) }
|
||||
// TODO attach these to the tun
|
||||
// Maybe that's the core's job...
|
||||
// It creates tun, creates the router, creates channels, sets them?
|
||||
recv := make(chan []byte, 1)
|
||||
send := make(chan []byte, 1)
|
||||
r.recv = recv
|
||||
r.send = send
|
||||
r.core.tun.recv = recv
|
||||
r.core.tun.send = send
|
||||
r.reset = make(chan struct{}, 1)
|
||||
go r.mainLoop()
|
||||
}
|
||||
|
||||
func (r *router) mainLoop() {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case p := <-r.in: r.handleIn(p)
|
||||
case p := <-r.send: r.sendPacket(p)
|
||||
case info := <-r.core.dht.peers: r.core.dht.insert(info) //r.core.dht.insertIfNew(info)
|
||||
case <-r.reset: r.core.sessions.resetInits()
|
||||
case <-ticker.C: {
|
||||
// Any periodic maintenance stuff goes here
|
||||
r.core.dht.doMaintenance()
|
||||
util_getBytes() // To slowly drain things
|
||||
}
|
||||
}
|
||||
}
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case p := <-r.in:
|
||||
r.handleIn(p)
|
||||
case p := <-r.send:
|
||||
r.sendPacket(p)
|
||||
case info := <-r.core.dht.peers:
|
||||
r.core.dht.insert(info) //r.core.dht.insertIfNew(info)
|
||||
case <-r.reset:
|
||||
r.core.sessions.resetInits()
|
||||
case <-ticker.C:
|
||||
{
|
||||
// Any periodic maintenance stuff goes here
|
||||
r.core.dht.doMaintenance()
|
||||
util_getBytes() // To slowly drain things
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *router) sendPacket(bs []byte) {
|
||||
if len(bs) < 40 { panic("Tried to send a packet shorter than a header...") }
|
||||
var sourceAddr address
|
||||
var sourceSubnet subnet
|
||||
copy(sourceAddr[:], bs[8:])
|
||||
copy(sourceSubnet[:], bs[8:])
|
||||
if !sourceAddr.isValid() && !sourceSubnet.isValid() { return }
|
||||
var dest address
|
||||
copy(dest[:], bs[24:])
|
||||
var snet subnet
|
||||
copy(snet[:], bs[24:])
|
||||
if !dest.isValid() && !snet.isValid() { return }
|
||||
doSearch := func (packet []byte) {
|
||||
var nodeID, mask *NodeID
|
||||
if dest.isValid() { nodeID, mask = dest.getNodeIDandMask() }
|
||||
if snet.isValid() { nodeID, mask = snet.getNodeIDandMask() }
|
||||
sinfo, isIn := r.core.searches.searches[*nodeID]
|
||||
if !isIn { sinfo = r.core.searches.createSearch(nodeID, mask) }
|
||||
if packet != nil { sinfo.packet = packet }
|
||||
r.core.searches.sendSearch(sinfo)
|
||||
}
|
||||
var sinfo *sessionInfo
|
||||
var isIn bool
|
||||
if dest.isValid() { sinfo, isIn = r.core.sessions.getByTheirAddr(&dest) }
|
||||
if snet.isValid() { sinfo, isIn = r.core.sessions.getByTheirSubnet(&snet) }
|
||||
switch {
|
||||
case !isIn || !sinfo.init:
|
||||
// No or unintiialized session, so we need to search first
|
||||
doSearch(bs)
|
||||
case time.Since(sinfo.time) > 6*time.Second:
|
||||
// We haven't heard from the dest in a while; they may have changed coords
|
||||
// Maybe the connection is idle, or maybe one of us changed coords
|
||||
// Try searching to either ping them (a little overhead) or fix the coords
|
||||
doSearch(nil)
|
||||
fallthrough
|
||||
//default: go func() { sinfo.send<-bs }()
|
||||
default: sinfo.send<-bs
|
||||
}
|
||||
if len(bs) < 40 {
|
||||
panic("Tried to send a packet shorter than a header...")
|
||||
}
|
||||
var sourceAddr address
|
||||
var sourceSubnet subnet
|
||||
copy(sourceAddr[:], bs[8:])
|
||||
copy(sourceSubnet[:], bs[8:])
|
||||
if !sourceAddr.isValid() && !sourceSubnet.isValid() {
|
||||
return
|
||||
}
|
||||
var dest address
|
||||
copy(dest[:], bs[24:])
|
||||
var snet subnet
|
||||
copy(snet[:], bs[24:])
|
||||
if !dest.isValid() && !snet.isValid() {
|
||||
return
|
||||
}
|
||||
doSearch := func(packet []byte) {
|
||||
var nodeID, mask *NodeID
|
||||
if dest.isValid() {
|
||||
nodeID, mask = dest.getNodeIDandMask()
|
||||
}
|
||||
if snet.isValid() {
|
||||
nodeID, mask = snet.getNodeIDandMask()
|
||||
}
|
||||
sinfo, isIn := r.core.searches.searches[*nodeID]
|
||||
if !isIn {
|
||||
sinfo = r.core.searches.createSearch(nodeID, mask)
|
||||
}
|
||||
if packet != nil {
|
||||
sinfo.packet = packet
|
||||
}
|
||||
r.core.searches.sendSearch(sinfo)
|
||||
}
|
||||
var sinfo *sessionInfo
|
||||
var isIn bool
|
||||
if dest.isValid() {
|
||||
sinfo, isIn = r.core.sessions.getByTheirAddr(&dest)
|
||||
}
|
||||
if snet.isValid() {
|
||||
sinfo, isIn = r.core.sessions.getByTheirSubnet(&snet)
|
||||
}
|
||||
switch {
|
||||
case !isIn || !sinfo.init:
|
||||
// No or unintiialized session, so we need to search first
|
||||
doSearch(bs)
|
||||
case time.Since(sinfo.time) > 6*time.Second:
|
||||
// We haven't heard from the dest in a while; they may have changed coords
|
||||
// Maybe the connection is idle, or maybe one of us changed coords
|
||||
// Try searching to either ping them (a little overhead) or fix the coords
|
||||
doSearch(nil)
|
||||
fallthrough
|
||||
//default: go func() { sinfo.send<-bs }()
|
||||
default:
|
||||
sinfo.send <- bs
|
||||
}
|
||||
}
|
||||
|
||||
func (r *router) recvPacket(bs []byte, theirAddr *address) {
|
||||
// TODO pass their NodeID, check *that* instead
|
||||
// Or store their address in the session?...
|
||||
//fmt.Println("Recv packet")
|
||||
if theirAddr == nil { panic("Should not happen ever") }
|
||||
if len(bs) < 24 { return }
|
||||
var source address
|
||||
copy(source[:], bs[8:])
|
||||
var snet subnet
|
||||
copy(snet[:], bs[8:])
|
||||
if !source.isValid() && !snet.isValid() { return }
|
||||
//go func() { r.recv<-bs }()
|
||||
r.recv<-bs
|
||||
// TODO pass their NodeID, check *that* instead
|
||||
// Or store their address in the session?...
|
||||
//fmt.Println("Recv packet")
|
||||
if theirAddr == nil {
|
||||
panic("Should not happen ever")
|
||||
}
|
||||
if len(bs) < 24 {
|
||||
return
|
||||
}
|
||||
var source address
|
||||
copy(source[:], bs[8:])
|
||||
var snet subnet
|
||||
copy(snet[:], bs[8:])
|
||||
if !source.isValid() && !snet.isValid() {
|
||||
return
|
||||
}
|
||||
//go func() { r.recv<-bs }()
|
||||
r.recv <- bs
|
||||
}
|
||||
|
||||
func (r *router) handleIn(packet []byte) {
|
||||
pType, pTypeLen := wire_decode_uint64(packet)
|
||||
if pTypeLen == 0 { return }
|
||||
switch pType {
|
||||
case wire_Traffic: r.handleTraffic(packet)
|
||||
case wire_ProtocolTraffic: r.handleProto(packet)
|
||||
default: /*panic("Should not happen in testing") ;*/ return
|
||||
}
|
||||
pType, pTypeLen := wire_decode_uint64(packet)
|
||||
if pTypeLen == 0 {
|
||||
return
|
||||
}
|
||||
switch pType {
|
||||
case wire_Traffic:
|
||||
r.handleTraffic(packet)
|
||||
case wire_ProtocolTraffic:
|
||||
r.handleProto(packet)
|
||||
default: /*panic("Should not happen in testing") ;*/
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (r *router) handleTraffic(packet []byte) {
|
||||
defer util_putBytes(packet)
|
||||
p := wire_trafficPacket{}
|
||||
if !p.decode(packet) { return }
|
||||
sinfo, isIn := r.core.sessions.getSessionForHandle(&p.handle)
|
||||
if !isIn { return }
|
||||
//go func () { sinfo.recv<-&p }()
|
||||
sinfo.recv<-&p
|
||||
defer util_putBytes(packet)
|
||||
p := wire_trafficPacket{}
|
||||
if !p.decode(packet) {
|
||||
return
|
||||
}
|
||||
sinfo, isIn := r.core.sessions.getSessionForHandle(&p.handle)
|
||||
if !isIn {
|
||||
return
|
||||
}
|
||||
//go func () { sinfo.recv<-&p }()
|
||||
sinfo.recv <- &p
|
||||
}
|
||||
|
||||
func (r *router) handleProto(packet []byte) {
|
||||
// First parse the packet
|
||||
p := wire_protoTrafficPacket{}
|
||||
if !p.decode(packet) { return }
|
||||
// Now try to open the payload
|
||||
var sharedKey *boxSharedKey
|
||||
//var theirPermPub *boxPubKey
|
||||
if p.toKey == r.core.boxPub {
|
||||
// Try to open using our permanent key
|
||||
sharedKey = r.core.sessions.getSharedKey(&r.core.boxPriv, &p.fromKey)
|
||||
} else { return }
|
||||
bs, isOK := boxOpen(sharedKey, p.payload, &p.nonce)
|
||||
if !isOK { return }
|
||||
// Now do something with the bytes in bs...
|
||||
// send dht messages to dht, sessionRefresh to sessions, data to tun...
|
||||
// For data, should check that key and IP match...
|
||||
bsType, bsTypeLen := wire_decode_uint64(bs)
|
||||
if bsTypeLen == 0 { return }
|
||||
//fmt.Println("RECV bytes:", bs)
|
||||
switch bsType {
|
||||
case wire_SessionPing: r.handlePing(bs, &p.fromKey)
|
||||
case wire_SessionPong: r.handlePong(bs, &p.fromKey)
|
||||
case wire_DHTLookupRequest: r.handleDHTReq(bs, &p.fromKey)
|
||||
case wire_DHTLookupResponse: r.handleDHTRes(bs, &p.fromKey)
|
||||
case wire_SearchRequest: r.handleSearchReq(bs)
|
||||
case wire_SearchResponse: r.handleSearchRes(bs)
|
||||
default: /*panic("Should not happen in testing") ;*/ return
|
||||
}
|
||||
// First parse the packet
|
||||
p := wire_protoTrafficPacket{}
|
||||
if !p.decode(packet) {
|
||||
return
|
||||
}
|
||||
// Now try to open the payload
|
||||
var sharedKey *boxSharedKey
|
||||
//var theirPermPub *boxPubKey
|
||||
if p.toKey == r.core.boxPub {
|
||||
// Try to open using our permanent key
|
||||
sharedKey = r.core.sessions.getSharedKey(&r.core.boxPriv, &p.fromKey)
|
||||
} else {
|
||||
return
|
||||
}
|
||||
bs, isOK := boxOpen(sharedKey, p.payload, &p.nonce)
|
||||
if !isOK {
|
||||
return
|
||||
}
|
||||
// Now do something with the bytes in bs...
|
||||
// send dht messages to dht, sessionRefresh to sessions, data to tun...
|
||||
// For data, should check that key and IP match...
|
||||
bsType, bsTypeLen := wire_decode_uint64(bs)
|
||||
if bsTypeLen == 0 {
|
||||
return
|
||||
}
|
||||
//fmt.Println("RECV bytes:", bs)
|
||||
switch bsType {
|
||||
case wire_SessionPing:
|
||||
r.handlePing(bs, &p.fromKey)
|
||||
case wire_SessionPong:
|
||||
r.handlePong(bs, &p.fromKey)
|
||||
case wire_DHTLookupRequest:
|
||||
r.handleDHTReq(bs, &p.fromKey)
|
||||
case wire_DHTLookupResponse:
|
||||
r.handleDHTRes(bs, &p.fromKey)
|
||||
case wire_SearchRequest:
|
||||
r.handleSearchReq(bs)
|
||||
case wire_SearchResponse:
|
||||
r.handleSearchRes(bs)
|
||||
default: /*panic("Should not happen in testing") ;*/
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (r *router) handlePing(bs []byte, fromKey *boxPubKey) {
|
||||
ping := sessionPing{}
|
||||
if !ping.decode(bs) { return }
|
||||
ping.sendPermPub = *fromKey
|
||||
r.core.sessions.handlePing(&ping)
|
||||
ping := sessionPing{}
|
||||
if !ping.decode(bs) {
|
||||
return
|
||||
}
|
||||
ping.sendPermPub = *fromKey
|
||||
r.core.sessions.handlePing(&ping)
|
||||
}
|
||||
|
||||
func (r *router) handlePong(bs []byte, fromKey *boxPubKey) {
|
||||
r.handlePing(bs, fromKey)
|
||||
r.handlePing(bs, fromKey)
|
||||
}
|
||||
|
||||
func (r *router) handleDHTReq(bs []byte, fromKey *boxPubKey) {
|
||||
req := dhtReq{}
|
||||
if !req.decode(bs) { return }
|
||||
if req.key != *fromKey { return }
|
||||
r.core.dht.handleReq(&req)
|
||||
req := dhtReq{}
|
||||
if !req.decode(bs) {
|
||||
return
|
||||
}
|
||||
if req.key != *fromKey {
|
||||
return
|
||||
}
|
||||
r.core.dht.handleReq(&req)
|
||||
}
|
||||
|
||||
func (r *router) handleDHTRes(bs []byte, fromKey *boxPubKey) {
|
||||
res := dhtRes{}
|
||||
if !res.decode(bs) { return }
|
||||
if res.key != *fromKey { return }
|
||||
r.core.dht.handleRes(&res)
|
||||
res := dhtRes{}
|
||||
if !res.decode(bs) {
|
||||
return
|
||||
}
|
||||
if res.key != *fromKey {
|
||||
return
|
||||
}
|
||||
r.core.dht.handleRes(&res)
|
||||
}
|
||||
|
||||
func (r *router) handleSearchReq(bs []byte) {
|
||||
req := searchReq{}
|
||||
if !req.decode(bs) { return }
|
||||
r.core.searches.handleSearchReq(&req)
|
||||
req := searchReq{}
|
||||
if !req.decode(bs) {
|
||||
return
|
||||
}
|
||||
r.core.searches.handleSearchReq(&req)
|
||||
}
|
||||
|
||||
func (r *router) handleSearchRes(bs []byte) {
|
||||
res := searchRes{}
|
||||
if !res.decode(bs) { return }
|
||||
r.core.searches.handleSearchRes(&res)
|
||||
res := searchRes{}
|
||||
if !res.decode(bs) {
|
||||
return
|
||||
}
|
||||
r.core.searches.handleSearchRes(&res)
|
||||
}
|
||||
|
@ -17,152 +17,162 @@ package yggdrasil
|
||||
// This hides bugs, which I don't want to do right now
|
||||
|
||||
import "time"
|
||||
|
||||
//import "fmt"
|
||||
|
||||
type searchInfo struct {
|
||||
dest *NodeID
|
||||
mask *NodeID
|
||||
time time.Time
|
||||
packet []byte
|
||||
dest *NodeID
|
||||
mask *NodeID
|
||||
time time.Time
|
||||
packet []byte
|
||||
}
|
||||
|
||||
type searches struct {
|
||||
core *Core
|
||||
searches map[NodeID]*searchInfo
|
||||
core *Core
|
||||
searches map[NodeID]*searchInfo
|
||||
}
|
||||
|
||||
func (s *searches) init(core *Core) {
|
||||
s.core = core
|
||||
s.searches = make(map[NodeID]*searchInfo)
|
||||
s.core = core
|
||||
s.searches = make(map[NodeID]*searchInfo)
|
||||
}
|
||||
|
||||
func (s *searches) createSearch(dest *NodeID, mask *NodeID) *searchInfo {
|
||||
now := time.Now()
|
||||
for dest, sinfo := range s.searches {
|
||||
if now.Sub(sinfo.time) > time.Minute {
|
||||
delete(s.searches, dest)
|
||||
}
|
||||
}
|
||||
info := searchInfo{
|
||||
dest: dest,
|
||||
mask: mask,
|
||||
time: now.Add(-time.Second),
|
||||
}
|
||||
s.searches[*dest] = &info
|
||||
return &info
|
||||
now := time.Now()
|
||||
for dest, sinfo := range s.searches {
|
||||
if now.Sub(sinfo.time) > time.Minute {
|
||||
delete(s.searches, dest)
|
||||
}
|
||||
}
|
||||
info := searchInfo{
|
||||
dest: dest,
|
||||
mask: mask,
|
||||
time: now.Add(-time.Second),
|
||||
}
|
||||
s.searches[*dest] = &info
|
||||
return &info
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type searchReq struct {
|
||||
key boxPubKey // Who I am
|
||||
coords []byte // Where I am
|
||||
dest NodeID // Who I'm trying to connect to
|
||||
key boxPubKey // Who I am
|
||||
coords []byte // Where I am
|
||||
dest NodeID // Who I'm trying to connect to
|
||||
}
|
||||
|
||||
type searchRes struct {
|
||||
key boxPubKey // Who I am
|
||||
coords []byte // Where I am
|
||||
dest NodeID // Who I was asked about
|
||||
key boxPubKey // Who I am
|
||||
coords []byte // Where I am
|
||||
dest NodeID // Who I was asked about
|
||||
}
|
||||
|
||||
func (s *searches) sendSearch(info *searchInfo) {
|
||||
now := time.Now()
|
||||
if now.Sub(info.time) < time.Second { return }
|
||||
loc := s.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
req := searchReq{
|
||||
key: s.core.boxPub,
|
||||
coords: coords,
|
||||
dest: *info.dest,
|
||||
}
|
||||
info.time = time.Now()
|
||||
s.handleSearchReq(&req)
|
||||
now := time.Now()
|
||||
if now.Sub(info.time) < time.Second {
|
||||
return
|
||||
}
|
||||
loc := s.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
req := searchReq{
|
||||
key: s.core.boxPub,
|
||||
coords: coords,
|
||||
dest: *info.dest,
|
||||
}
|
||||
info.time = time.Now()
|
||||
s.handleSearchReq(&req)
|
||||
}
|
||||
|
||||
func (s *searches) handleSearchReq(req *searchReq) {
|
||||
lookup := s.core.dht.lookup(&req.dest)
|
||||
sent := false
|
||||
//fmt.Println("DEBUG len:", len(lookup))
|
||||
for _, info := range lookup {
|
||||
//fmt.Println("DEBUG lup:", info.getNodeID())
|
||||
if dht_firstCloserThanThird(info.getNodeID(),
|
||||
&req.dest,
|
||||
&s.core.dht.nodeID) {
|
||||
s.forwardSearch(req, info)
|
||||
sent = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !sent { s.sendSearchRes(req) }
|
||||
lookup := s.core.dht.lookup(&req.dest)
|
||||
sent := false
|
||||
//fmt.Println("DEBUG len:", len(lookup))
|
||||
for _, info := range lookup {
|
||||
//fmt.Println("DEBUG lup:", info.getNodeID())
|
||||
if dht_firstCloserThanThird(info.getNodeID(),
|
||||
&req.dest,
|
||||
&s.core.dht.nodeID) {
|
||||
s.forwardSearch(req, info)
|
||||
sent = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !sent {
|
||||
s.sendSearchRes(req)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *searches) forwardSearch(req *searchReq, next *dhtInfo) {
|
||||
//fmt.Println("DEBUG fwd:", req.dest, next.getNodeID())
|
||||
bs := req.encode()
|
||||
shared := s.core.sessions.getSharedKey(&s.core.boxPriv, &next.key)
|
||||
payload, nonce := boxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: next.coords,
|
||||
toKey: next.key,
|
||||
fromKey: s.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
s.core.router.out(packet)
|
||||
//fmt.Println("DEBUG fwd:", req.dest, next.getNodeID())
|
||||
bs := req.encode()
|
||||
shared := s.core.sessions.getSharedKey(&s.core.boxPriv, &next.key)
|
||||
payload, nonce := boxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: next.coords,
|
||||
toKey: next.key,
|
||||
fromKey: s.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
s.core.router.out(packet)
|
||||
}
|
||||
|
||||
func (s *searches) sendSearchRes(req *searchReq) {
|
||||
//fmt.Println("DEBUG res:", req.dest, s.core.dht.nodeID)
|
||||
loc := s.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
res := searchRes{
|
||||
key: s.core.boxPub,
|
||||
coords: coords,
|
||||
dest: req.dest,
|
||||
}
|
||||
bs := res.encode()
|
||||
shared := s.core.sessions.getSharedKey(&s.core.boxPriv, &req.key)
|
||||
payload, nonce := boxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: req.coords,
|
||||
toKey: req.key,
|
||||
fromKey: s.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
s.core.router.out(packet)
|
||||
//fmt.Println("DEBUG res:", req.dest, s.core.dht.nodeID)
|
||||
loc := s.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
res := searchRes{
|
||||
key: s.core.boxPub,
|
||||
coords: coords,
|
||||
dest: req.dest,
|
||||
}
|
||||
bs := res.encode()
|
||||
shared := s.core.sessions.getSharedKey(&s.core.boxPriv, &req.key)
|
||||
payload, nonce := boxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: req.coords,
|
||||
toKey: req.key,
|
||||
fromKey: s.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
s.core.router.out(packet)
|
||||
}
|
||||
|
||||
func (s *searches) handleSearchRes(res *searchRes) {
|
||||
info, isIn := s.searches[res.dest]
|
||||
if !isIn { return }
|
||||
them := getNodeID(&res.key)
|
||||
var destMasked NodeID
|
||||
var themMasked NodeID
|
||||
for idx := 0 ; idx < NodeIDLen ; idx++ {
|
||||
destMasked[idx] = info.dest[idx] & info.mask[idx]
|
||||
themMasked[idx] = them[idx] & info.mask[idx]
|
||||
}
|
||||
//fmt.Println("DEBUG search res1:", themMasked, destMasked)
|
||||
//fmt.Println("DEBUG search res2:", *them, *info.dest, *info.mask)
|
||||
if themMasked != destMasked { return }
|
||||
// They match, so create a session and send a sessionRequest
|
||||
sinfo, isIn := s.core.sessions.getByTheirPerm(&res.key)
|
||||
if !isIn {
|
||||
sinfo = s.core.sessions.createSession(&res.key)
|
||||
_, isIn := s.core.sessions.getByTheirPerm(&res.key)
|
||||
if !isIn { panic("This should never happen") }
|
||||
}
|
||||
// FIXME replay attacks could mess with coords?
|
||||
sinfo.coords = res.coords
|
||||
sinfo.packet = info.packet
|
||||
s.core.sessions.ping(sinfo)
|
||||
// Cleanup
|
||||
delete(s.searches, res.dest)
|
||||
info, isIn := s.searches[res.dest]
|
||||
if !isIn {
|
||||
return
|
||||
}
|
||||
them := getNodeID(&res.key)
|
||||
var destMasked NodeID
|
||||
var themMasked NodeID
|
||||
for idx := 0; idx < NodeIDLen; idx++ {
|
||||
destMasked[idx] = info.dest[idx] & info.mask[idx]
|
||||
themMasked[idx] = them[idx] & info.mask[idx]
|
||||
}
|
||||
//fmt.Println("DEBUG search res1:", themMasked, destMasked)
|
||||
//fmt.Println("DEBUG search res2:", *them, *info.dest, *info.mask)
|
||||
if themMasked != destMasked {
|
||||
return
|
||||
}
|
||||
// They match, so create a session and send a sessionRequest
|
||||
sinfo, isIn := s.core.sessions.getByTheirPerm(&res.key)
|
||||
if !isIn {
|
||||
sinfo = s.core.sessions.createSession(&res.key)
|
||||
_, isIn := s.core.sessions.getByTheirPerm(&res.key)
|
||||
if !isIn {
|
||||
panic("This should never happen")
|
||||
}
|
||||
}
|
||||
// FIXME replay attacks could mess with coords?
|
||||
sinfo.coords = res.coords
|
||||
sinfo.packet = info.packet
|
||||
s.core.sessions.ping(sinfo)
|
||||
// Cleanup
|
||||
delete(s.searches, res.dest)
|
||||
}
|
||||
|
||||
|
@ -7,281 +7,315 @@ package yggdrasil
|
||||
import "time"
|
||||
|
||||
type sessionInfo struct {
|
||||
core *Core
|
||||
theirAddr address
|
||||
theirSubnet subnet
|
||||
theirPermPub boxPubKey
|
||||
theirSesPub boxPubKey
|
||||
mySesPub boxPubKey
|
||||
mySesPriv boxPrivKey
|
||||
sharedSesKey boxSharedKey // derived from session keys
|
||||
theirHandle handle
|
||||
myHandle handle
|
||||
theirNonce boxNonce
|
||||
myNonce boxNonce
|
||||
time time.Time // Time we last received a packet
|
||||
coords []byte // coords of destination
|
||||
packet []byte // a buffered packet, sent immediately on ping/pong
|
||||
init bool // Reset if coords change
|
||||
send chan []byte
|
||||
recv chan *wire_trafficPacket
|
||||
nonceMask uint64
|
||||
tstamp int64 // tstamp from their last session ping, replay attack mitigation
|
||||
core *Core
|
||||
theirAddr address
|
||||
theirSubnet subnet
|
||||
theirPermPub boxPubKey
|
||||
theirSesPub boxPubKey
|
||||
mySesPub boxPubKey
|
||||
mySesPriv boxPrivKey
|
||||
sharedSesKey boxSharedKey // derived from session keys
|
||||
theirHandle handle
|
||||
myHandle handle
|
||||
theirNonce boxNonce
|
||||
myNonce boxNonce
|
||||
time time.Time // Time we last received a packet
|
||||
coords []byte // coords of destination
|
||||
packet []byte // a buffered packet, sent immediately on ping/pong
|
||||
init bool // Reset if coords change
|
||||
send chan []byte
|
||||
recv chan *wire_trafficPacket
|
||||
nonceMask uint64
|
||||
tstamp int64 // tstamp from their last session ping, replay attack mitigation
|
||||
}
|
||||
|
||||
// FIXME replay attacks (include nonce or some sequence number)
|
||||
type sessionPing struct {
|
||||
sendPermPub boxPubKey // Sender's permanent key
|
||||
handle handle // Random number to ID session
|
||||
sendSesPub boxPubKey // Session key to use
|
||||
coords []byte
|
||||
tstamp int64 // unix time, but the only real requirement is that it increases
|
||||
isPong bool
|
||||
sendPermPub boxPubKey // Sender's permanent key
|
||||
handle handle // Random number to ID session
|
||||
sendSesPub boxPubKey // Session key to use
|
||||
coords []byte
|
||||
tstamp int64 // unix time, but the only real requirement is that it increases
|
||||
isPong bool
|
||||
}
|
||||
|
||||
// Returns true if the session was updated, false otherwise
|
||||
func (s *sessionInfo) update(p *sessionPing) bool {
|
||||
if !(p.tstamp > s.tstamp) { return false }
|
||||
if p.sendPermPub != s.theirPermPub { return false } // Shouldn't happen
|
||||
if p.sendSesPub != s.theirSesPub {
|
||||
// FIXME need to protect against replay attacks
|
||||
// Put a sequence number or a timestamp or something in the pings?
|
||||
// Or just return false, make the session time out?
|
||||
s.theirSesPub = p.sendSesPub
|
||||
s.theirHandle = p.handle
|
||||
s.sharedSesKey = *getSharedKey(&s.mySesPriv, &s.theirSesPub)
|
||||
s.theirNonce = boxNonce{}
|
||||
s.nonceMask = 0
|
||||
}
|
||||
s.coords = append([]byte{}, p.coords...)
|
||||
s.time = time.Now()
|
||||
s.tstamp = p.tstamp
|
||||
s.init = true
|
||||
return true
|
||||
if !(p.tstamp > s.tstamp) {
|
||||
return false
|
||||
}
|
||||
if p.sendPermPub != s.theirPermPub {
|
||||
return false
|
||||
} // Shouldn't happen
|
||||
if p.sendSesPub != s.theirSesPub {
|
||||
// FIXME need to protect against replay attacks
|
||||
// Put a sequence number or a timestamp or something in the pings?
|
||||
// Or just return false, make the session time out?
|
||||
s.theirSesPub = p.sendSesPub
|
||||
s.theirHandle = p.handle
|
||||
s.sharedSesKey = *getSharedKey(&s.mySesPriv, &s.theirSesPub)
|
||||
s.theirNonce = boxNonce{}
|
||||
s.nonceMask = 0
|
||||
}
|
||||
s.coords = append([]byte{}, p.coords...)
|
||||
s.time = time.Now()
|
||||
s.tstamp = p.tstamp
|
||||
s.init = true
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *sessionInfo) timedout() bool {
|
||||
return time.Since(s.time) > time.Minute
|
||||
return time.Since(s.time) > time.Minute
|
||||
}
|
||||
|
||||
type sessions struct {
|
||||
core *Core
|
||||
// Maps known permanent keys to their shared key, used by DHT a lot
|
||||
permShared map[boxPubKey]*boxSharedKey
|
||||
// Maps (secret) handle onto session info
|
||||
sinfos map[handle]*sessionInfo
|
||||
// Maps mySesPub onto handle
|
||||
byMySes map[boxPubKey]*handle
|
||||
// Maps theirPermPub onto handle
|
||||
byTheirPerm map[boxPubKey]*handle
|
||||
addrToPerm map[address]*boxPubKey
|
||||
subnetToPerm map[subnet]*boxPubKey
|
||||
core *Core
|
||||
// Maps known permanent keys to their shared key, used by DHT a lot
|
||||
permShared map[boxPubKey]*boxSharedKey
|
||||
// Maps (secret) handle onto session info
|
||||
sinfos map[handle]*sessionInfo
|
||||
// Maps mySesPub onto handle
|
||||
byMySes map[boxPubKey]*handle
|
||||
// Maps theirPermPub onto handle
|
||||
byTheirPerm map[boxPubKey]*handle
|
||||
addrToPerm map[address]*boxPubKey
|
||||
subnetToPerm map[subnet]*boxPubKey
|
||||
}
|
||||
|
||||
func (ss *sessions) init(core *Core) {
|
||||
ss.core = core
|
||||
ss.permShared = make(map[boxPubKey]*boxSharedKey)
|
||||
ss.sinfos = make(map[handle]*sessionInfo)
|
||||
ss.byMySes = make(map[boxPubKey]*handle)
|
||||
ss.byTheirPerm = make(map[boxPubKey]*handle)
|
||||
ss.addrToPerm = make(map[address]*boxPubKey)
|
||||
ss.subnetToPerm = make(map[subnet]*boxPubKey)
|
||||
ss.core = core
|
||||
ss.permShared = make(map[boxPubKey]*boxSharedKey)
|
||||
ss.sinfos = make(map[handle]*sessionInfo)
|
||||
ss.byMySes = make(map[boxPubKey]*handle)
|
||||
ss.byTheirPerm = make(map[boxPubKey]*handle)
|
||||
ss.addrToPerm = make(map[address]*boxPubKey)
|
||||
ss.subnetToPerm = make(map[subnet]*boxPubKey)
|
||||
}
|
||||
|
||||
func (ss *sessions) getSessionForHandle(handle *handle) (*sessionInfo, bool) {
|
||||
sinfo, isIn := ss.sinfos[*handle]
|
||||
if isIn && sinfo.timedout() {
|
||||
// We have a session, but it has timed out
|
||||
return nil, false
|
||||
}
|
||||
return sinfo, isIn
|
||||
sinfo, isIn := ss.sinfos[*handle]
|
||||
if isIn && sinfo.timedout() {
|
||||
// We have a session, but it has timed out
|
||||
return nil, false
|
||||
}
|
||||
return sinfo, isIn
|
||||
}
|
||||
|
||||
func (ss *sessions) getByMySes(key *boxPubKey) (*sessionInfo, bool) {
|
||||
h, isIn := ss.byMySes[*key]
|
||||
if !isIn { return nil, false }
|
||||
sinfo, isIn := ss.getSessionForHandle(h)
|
||||
return sinfo, isIn
|
||||
h, isIn := ss.byMySes[*key]
|
||||
if !isIn {
|
||||
return nil, false
|
||||
}
|
||||
sinfo, isIn := ss.getSessionForHandle(h)
|
||||
return sinfo, isIn
|
||||
}
|
||||
|
||||
func (ss *sessions) getByTheirPerm(key *boxPubKey) (*sessionInfo, bool) {
|
||||
h, isIn := ss.byTheirPerm[*key]
|
||||
if !isIn { return nil, false }
|
||||
sinfo, isIn := ss.getSessionForHandle(h)
|
||||
return sinfo, isIn
|
||||
h, isIn := ss.byTheirPerm[*key]
|
||||
if !isIn {
|
||||
return nil, false
|
||||
}
|
||||
sinfo, isIn := ss.getSessionForHandle(h)
|
||||
return sinfo, isIn
|
||||
}
|
||||
|
||||
func (ss *sessions) getByTheirAddr(addr *address) (*sessionInfo, bool) {
|
||||
p, isIn := ss.addrToPerm[*addr]
|
||||
if !isIn { return nil, false }
|
||||
sinfo, isIn := ss.getByTheirPerm(p)
|
||||
return sinfo, isIn
|
||||
p, isIn := ss.addrToPerm[*addr]
|
||||
if !isIn {
|
||||
return nil, false
|
||||
}
|
||||
sinfo, isIn := ss.getByTheirPerm(p)
|
||||
return sinfo, isIn
|
||||
}
|
||||
|
||||
func (ss *sessions) getByTheirSubnet(snet *subnet) (*sessionInfo, bool) {
|
||||
p, isIn := ss.subnetToPerm[*snet]
|
||||
if !isIn { return nil, false }
|
||||
sinfo, isIn := ss.getByTheirPerm(p)
|
||||
return sinfo, isIn
|
||||
p, isIn := ss.subnetToPerm[*snet]
|
||||
if !isIn {
|
||||
return nil, false
|
||||
}
|
||||
sinfo, isIn := ss.getByTheirPerm(p)
|
||||
return sinfo, isIn
|
||||
}
|
||||
|
||||
func (ss *sessions) createSession(theirPermKey *boxPubKey) *sessionInfo {
|
||||
sinfo := sessionInfo{}
|
||||
sinfo.core = ss.core
|
||||
sinfo.theirPermPub = *theirPermKey
|
||||
pub, priv := newBoxKeys()
|
||||
sinfo.mySesPub = *pub
|
||||
sinfo.mySesPriv = *priv
|
||||
sinfo.myNonce = *newBoxNonce() // TODO make sure nonceIsOK tolerates this
|
||||
higher := false
|
||||
for idx := range ss.core.boxPub {
|
||||
if ss.core.boxPub[idx] > sinfo.theirPermPub[idx] {
|
||||
higher = true
|
||||
break
|
||||
} else if ss.core.boxPub[idx] < sinfo.theirPermPub[idx] {
|
||||
break
|
||||
}
|
||||
}
|
||||
if higher {
|
||||
// higher => odd nonce
|
||||
sinfo.myNonce[len(sinfo.myNonce)-1] |= 0x01
|
||||
} else {
|
||||
// lower => even nonce
|
||||
sinfo.myNonce[len(sinfo.myNonce)-1] &= 0xfe
|
||||
}
|
||||
sinfo.myHandle = *newHandle()
|
||||
sinfo.theirAddr = *address_addrForNodeID(getNodeID(&sinfo.theirPermPub))
|
||||
sinfo.theirSubnet = *address_subnetForNodeID(getNodeID(&sinfo.theirPermPub))
|
||||
sinfo.send = make(chan []byte, 1)
|
||||
sinfo.recv = make(chan *wire_trafficPacket, 1)
|
||||
go sinfo.doWorker()
|
||||
sinfo.time = time.Now()
|
||||
// Do some cleanup
|
||||
// Time thresholds almost certainly could use some adjusting
|
||||
for _, s := range ss.sinfos {
|
||||
if s.timedout() { s.close() }
|
||||
}
|
||||
ss.sinfos[sinfo.myHandle] = &sinfo
|
||||
ss.byMySes[sinfo.mySesPub] = &sinfo.myHandle
|
||||
ss.byTheirPerm[sinfo.theirPermPub] = &sinfo.myHandle
|
||||
ss.addrToPerm[sinfo.theirAddr] = &sinfo.theirPermPub
|
||||
ss.subnetToPerm[sinfo.theirSubnet] = &sinfo.theirPermPub
|
||||
return &sinfo
|
||||
sinfo := sessionInfo{}
|
||||
sinfo.core = ss.core
|
||||
sinfo.theirPermPub = *theirPermKey
|
||||
pub, priv := newBoxKeys()
|
||||
sinfo.mySesPub = *pub
|
||||
sinfo.mySesPriv = *priv
|
||||
sinfo.myNonce = *newBoxNonce() // TODO make sure nonceIsOK tolerates this
|
||||
higher := false
|
||||
for idx := range ss.core.boxPub {
|
||||
if ss.core.boxPub[idx] > sinfo.theirPermPub[idx] {
|
||||
higher = true
|
||||
break
|
||||
} else if ss.core.boxPub[idx] < sinfo.theirPermPub[idx] {
|
||||
break
|
||||
}
|
||||
}
|
||||
if higher {
|
||||
// higher => odd nonce
|
||||
sinfo.myNonce[len(sinfo.myNonce)-1] |= 0x01
|
||||
} else {
|
||||
// lower => even nonce
|
||||
sinfo.myNonce[len(sinfo.myNonce)-1] &= 0xfe
|
||||
}
|
||||
sinfo.myHandle = *newHandle()
|
||||
sinfo.theirAddr = *address_addrForNodeID(getNodeID(&sinfo.theirPermPub))
|
||||
sinfo.theirSubnet = *address_subnetForNodeID(getNodeID(&sinfo.theirPermPub))
|
||||
sinfo.send = make(chan []byte, 1)
|
||||
sinfo.recv = make(chan *wire_trafficPacket, 1)
|
||||
go sinfo.doWorker()
|
||||
sinfo.time = time.Now()
|
||||
// Do some cleanup
|
||||
// Time thresholds almost certainly could use some adjusting
|
||||
for _, s := range ss.sinfos {
|
||||
if s.timedout() {
|
||||
s.close()
|
||||
}
|
||||
}
|
||||
ss.sinfos[sinfo.myHandle] = &sinfo
|
||||
ss.byMySes[sinfo.mySesPub] = &sinfo.myHandle
|
||||
ss.byTheirPerm[sinfo.theirPermPub] = &sinfo.myHandle
|
||||
ss.addrToPerm[sinfo.theirAddr] = &sinfo.theirPermPub
|
||||
ss.subnetToPerm[sinfo.theirSubnet] = &sinfo.theirPermPub
|
||||
return &sinfo
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) close() {
|
||||
delete(sinfo.core.sessions.sinfos, sinfo.myHandle)
|
||||
delete(sinfo.core.sessions.byMySes, sinfo.mySesPub)
|
||||
delete(sinfo.core.sessions.byTheirPerm, sinfo.theirPermPub)
|
||||
delete(sinfo.core.sessions.addrToPerm, sinfo.theirAddr)
|
||||
delete(sinfo.core.sessions.subnetToPerm, sinfo.theirSubnet)
|
||||
close(sinfo.send)
|
||||
close(sinfo.recv)
|
||||
delete(sinfo.core.sessions.sinfos, sinfo.myHandle)
|
||||
delete(sinfo.core.sessions.byMySes, sinfo.mySesPub)
|
||||
delete(sinfo.core.sessions.byTheirPerm, sinfo.theirPermPub)
|
||||
delete(sinfo.core.sessions.addrToPerm, sinfo.theirAddr)
|
||||
delete(sinfo.core.sessions.subnetToPerm, sinfo.theirSubnet)
|
||||
close(sinfo.send)
|
||||
close(sinfo.recv)
|
||||
}
|
||||
|
||||
func (ss *sessions) getPing(sinfo *sessionInfo) sessionPing {
|
||||
loc := ss.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
ref := sessionPing{
|
||||
sendPermPub: ss.core.boxPub,
|
||||
handle: sinfo.myHandle,
|
||||
sendSesPub: sinfo.mySesPub,
|
||||
tstamp: time.Now().Unix(),
|
||||
coords: coords,
|
||||
}
|
||||
sinfo.myNonce.update()
|
||||
return ref
|
||||
loc := ss.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
ref := sessionPing{
|
||||
sendPermPub: ss.core.boxPub,
|
||||
handle: sinfo.myHandle,
|
||||
sendSesPub: sinfo.mySesPub,
|
||||
tstamp: time.Now().Unix(),
|
||||
coords: coords,
|
||||
}
|
||||
sinfo.myNonce.update()
|
||||
return ref
|
||||
}
|
||||
|
||||
func (ss *sessions) getSharedKey(myPriv *boxPrivKey,
|
||||
theirPub *boxPubKey) *boxSharedKey {
|
||||
if skey, isIn := ss.permShared[*theirPub] ; isIn { return skey }
|
||||
// First do some cleanup
|
||||
const maxKeys = dht_bucket_number*dht_bucket_size
|
||||
for key := range ss.permShared {
|
||||
// Remove a random key until the store is small enough
|
||||
if len(ss.permShared) < maxKeys { break }
|
||||
delete(ss.permShared, key)
|
||||
}
|
||||
ss.permShared[*theirPub] = getSharedKey(myPriv, theirPub)
|
||||
return ss.permShared[*theirPub]
|
||||
theirPub *boxPubKey) *boxSharedKey {
|
||||
if skey, isIn := ss.permShared[*theirPub]; isIn {
|
||||
return skey
|
||||
}
|
||||
// First do some cleanup
|
||||
const maxKeys = dht_bucket_number * dht_bucket_size
|
||||
for key := range ss.permShared {
|
||||
// Remove a random key until the store is small enough
|
||||
if len(ss.permShared) < maxKeys {
|
||||
break
|
||||
}
|
||||
delete(ss.permShared, key)
|
||||
}
|
||||
ss.permShared[*theirPub] = getSharedKey(myPriv, theirPub)
|
||||
return ss.permShared[*theirPub]
|
||||
}
|
||||
|
||||
func (ss *sessions) ping(sinfo *sessionInfo) {
|
||||
ss.sendPingPong(sinfo, false)
|
||||
ss.sendPingPong(sinfo, false)
|
||||
}
|
||||
|
||||
func (ss *sessions) sendPingPong(sinfo *sessionInfo, isPong bool) {
|
||||
ping := ss.getPing(sinfo)
|
||||
ping.isPong = isPong
|
||||
bs := ping.encode()
|
||||
shared := ss.getSharedKey(&ss.core.boxPriv, &sinfo.theirPermPub)
|
||||
payload, nonce := boxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: sinfo.coords,
|
||||
toKey: sinfo.theirPermPub,
|
||||
fromKey: ss.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
ss.core.router.out(packet)
|
||||
ping := ss.getPing(sinfo)
|
||||
ping.isPong = isPong
|
||||
bs := ping.encode()
|
||||
shared := ss.getSharedKey(&ss.core.boxPriv, &sinfo.theirPermPub)
|
||||
payload, nonce := boxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: sinfo.coords,
|
||||
toKey: sinfo.theirPermPub,
|
||||
fromKey: ss.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
ss.core.router.out(packet)
|
||||
}
|
||||
|
||||
func (ss *sessions) handlePing(ping *sessionPing) {
|
||||
// Get the corresponding session (or create a new session)
|
||||
sinfo, isIn := ss.getByTheirPerm(&ping.sendPermPub)
|
||||
if !isIn || sinfo.timedout() {
|
||||
if isIn { sinfo.close() }
|
||||
ss.createSession(&ping.sendPermPub)
|
||||
sinfo, isIn = ss.getByTheirPerm(&ping.sendPermPub)
|
||||
if !isIn { panic("This should not happen") }
|
||||
}
|
||||
// Update the session
|
||||
if !sinfo.update(ping) { /*panic("Should not happen in testing")*/ ; return }
|
||||
if !ping.isPong{ ss.sendPingPong(sinfo, true) }
|
||||
if sinfo.packet != nil {
|
||||
// send
|
||||
var bs []byte
|
||||
bs, sinfo.packet = sinfo.packet, nil
|
||||
go func() { sinfo.send<-bs }()
|
||||
}
|
||||
// Get the corresponding session (or create a new session)
|
||||
sinfo, isIn := ss.getByTheirPerm(&ping.sendPermPub)
|
||||
if !isIn || sinfo.timedout() {
|
||||
if isIn {
|
||||
sinfo.close()
|
||||
}
|
||||
ss.createSession(&ping.sendPermPub)
|
||||
sinfo, isIn = ss.getByTheirPerm(&ping.sendPermPub)
|
||||
if !isIn {
|
||||
panic("This should not happen")
|
||||
}
|
||||
}
|
||||
// Update the session
|
||||
if !sinfo.update(ping) { /*panic("Should not happen in testing")*/
|
||||
return
|
||||
}
|
||||
if !ping.isPong {
|
||||
ss.sendPingPong(sinfo, true)
|
||||
}
|
||||
if sinfo.packet != nil {
|
||||
// send
|
||||
var bs []byte
|
||||
bs, sinfo.packet = sinfo.packet, nil
|
||||
go func() { sinfo.send <- bs }()
|
||||
}
|
||||
}
|
||||
|
||||
func (n *boxNonce) minus(m *boxNonce) int64 {
|
||||
diff := int64(0)
|
||||
for idx := range n {
|
||||
diff *= 256
|
||||
diff += int64(n[idx]) - int64(m[idx])
|
||||
if diff > 64 { diff = 64 }
|
||||
if diff < -64 { diff = -64 }
|
||||
}
|
||||
return diff
|
||||
diff := int64(0)
|
||||
for idx := range n {
|
||||
diff *= 256
|
||||
diff += int64(n[idx]) - int64(m[idx])
|
||||
if diff > 64 {
|
||||
diff = 64
|
||||
}
|
||||
if diff < -64 {
|
||||
diff = -64
|
||||
}
|
||||
}
|
||||
return diff
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) nonceIsOK(theirNonce *boxNonce) bool {
|
||||
// The bitmask is to allow for some non-duplicate out-of-order packets
|
||||
diff := theirNonce.minus(&sinfo.theirNonce)
|
||||
if diff > 0 { return true }
|
||||
return ^sinfo.nonceMask & (0x01 << uint64(-diff)) != 0
|
||||
// The bitmask is to allow for some non-duplicate out-of-order packets
|
||||
diff := theirNonce.minus(&sinfo.theirNonce)
|
||||
if diff > 0 {
|
||||
return true
|
||||
}
|
||||
return ^sinfo.nonceMask&(0x01<<uint64(-diff)) != 0
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) updateNonce(theirNonce *boxNonce) {
|
||||
// Shift nonce mask if needed
|
||||
// Set bit
|
||||
diff := theirNonce.minus(&sinfo.theirNonce)
|
||||
if diff > 0 {
|
||||
sinfo.nonceMask <<= uint64(diff)
|
||||
sinfo.nonceMask &= 0x01
|
||||
} else {
|
||||
sinfo.nonceMask &= 0x01 << uint64(-diff)
|
||||
}
|
||||
sinfo.theirNonce = *theirNonce
|
||||
// Shift nonce mask if needed
|
||||
// Set bit
|
||||
diff := theirNonce.minus(&sinfo.theirNonce)
|
||||
if diff > 0 {
|
||||
sinfo.nonceMask <<= uint64(diff)
|
||||
sinfo.nonceMask &= 0x01
|
||||
} else {
|
||||
sinfo.nonceMask &= 0x01 << uint64(-diff)
|
||||
}
|
||||
sinfo.theirNonce = *theirNonce
|
||||
}
|
||||
|
||||
func (ss *sessions) resetInits() {
|
||||
for _, sinfo := range ss.sinfos { sinfo.init = false }
|
||||
for _, sinfo := range ss.sinfos {
|
||||
sinfo.init = false
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
@ -291,37 +325,53 @@ func (ss *sessions) resetInits() {
|
||||
// It's also responsible for keeping nonces consistent
|
||||
|
||||
func (sinfo *sessionInfo) doWorker() {
|
||||
for {
|
||||
select {
|
||||
case p, ok := <-sinfo.recv: if ok { sinfo.doRecv(p) } else { return }
|
||||
case bs, ok := <-sinfo.send: if ok { sinfo.doSend(bs) } else { return }
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case p, ok := <-sinfo.recv:
|
||||
if ok {
|
||||
sinfo.doRecv(p)
|
||||
} else {
|
||||
return
|
||||
}
|
||||
case bs, ok := <-sinfo.send:
|
||||
if ok {
|
||||
sinfo.doSend(bs)
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) doSend(bs []byte) {
|
||||
defer util_putBytes(bs)
|
||||
if !sinfo.init { return } // To prevent using empty session keys
|
||||
payload, nonce := boxSeal(&sinfo.sharedSesKey, bs, &sinfo.myNonce)
|
||||
defer util_putBytes(payload)
|
||||
p := wire_trafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: sinfo.coords,
|
||||
handle: sinfo.theirHandle,
|
||||
nonce: *nonce,
|
||||
payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
sinfo.core.router.out(packet)
|
||||
defer util_putBytes(bs)
|
||||
if !sinfo.init {
|
||||
return
|
||||
} // To prevent using empty session keys
|
||||
payload, nonce := boxSeal(&sinfo.sharedSesKey, bs, &sinfo.myNonce)
|
||||
defer util_putBytes(payload)
|
||||
p := wire_trafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: sinfo.coords,
|
||||
handle: sinfo.theirHandle,
|
||||
nonce: *nonce,
|
||||
payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
sinfo.core.router.out(packet)
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) doRecv(p *wire_trafficPacket) {
|
||||
defer util_putBytes(p.payload)
|
||||
if !sinfo.nonceIsOK(&p.nonce) { return }
|
||||
bs, isOK := boxOpen(&sinfo.sharedSesKey, p.payload, &p.nonce)
|
||||
if !isOK { util_putBytes(bs) ; return }
|
||||
sinfo.updateNonce(&p.nonce)
|
||||
sinfo.time = time.Now()
|
||||
sinfo.core.router.recvPacket(bs, &sinfo.theirAddr)
|
||||
defer util_putBytes(p.payload)
|
||||
if !sinfo.nonceIsOK(&p.nonce) {
|
||||
return
|
||||
}
|
||||
bs, isOK := boxOpen(&sinfo.sharedSesKey, p.payload, &p.nonce)
|
||||
if !isOK {
|
||||
util_putBytes(bs)
|
||||
return
|
||||
}
|
||||
sinfo.updateNonce(&p.nonce)
|
||||
sinfo.time = time.Now()
|
||||
sinfo.core.router.recvPacket(bs, &sinfo.theirAddr)
|
||||
}
|
||||
|
||||
|
@ -7,52 +7,63 @@ import "sync"
|
||||
import "time"
|
||||
|
||||
type sigManager struct {
|
||||
mutex sync.RWMutex
|
||||
checked map[sigBytes]knownSig
|
||||
lastCleaned time.Time
|
||||
mutex sync.RWMutex
|
||||
checked map[sigBytes]knownSig
|
||||
lastCleaned time.Time
|
||||
}
|
||||
|
||||
type knownSig struct {
|
||||
bs []byte
|
||||
time time.Time
|
||||
bs []byte
|
||||
time time.Time
|
||||
}
|
||||
|
||||
func (m *sigManager) init() {
|
||||
m.checked = make(map[sigBytes]knownSig)
|
||||
m.checked = make(map[sigBytes]knownSig)
|
||||
}
|
||||
|
||||
func (m *sigManager) check(key *sigPubKey, sig *sigBytes, bs []byte) bool {
|
||||
if m.isChecked(sig, bs) { return true }
|
||||
verified := verify(key, bs, sig)
|
||||
if verified { m.putChecked(sig, bs) }
|
||||
return verified
|
||||
if m.isChecked(sig, bs) {
|
||||
return true
|
||||
}
|
||||
verified := verify(key, bs, sig)
|
||||
if verified {
|
||||
m.putChecked(sig, bs)
|
||||
}
|
||||
return verified
|
||||
}
|
||||
|
||||
func (m *sigManager) isChecked(sig *sigBytes, bs []byte) bool {
|
||||
m.mutex.RLock()
|
||||
defer m.mutex.RUnlock()
|
||||
k, isIn := m.checked[*sig]
|
||||
if !isIn { return false }
|
||||
if len(bs) != len(k.bs) { return false }
|
||||
for idx := 0 ; idx < len(bs) ; idx++ {
|
||||
if bs[idx] != k.bs[idx] { return false }
|
||||
}
|
||||
k.time = time.Now()
|
||||
return true
|
||||
m.mutex.RLock()
|
||||
defer m.mutex.RUnlock()
|
||||
k, isIn := m.checked[*sig]
|
||||
if !isIn {
|
||||
return false
|
||||
}
|
||||
if len(bs) != len(k.bs) {
|
||||
return false
|
||||
}
|
||||
for idx := 0; idx < len(bs); idx++ {
|
||||
if bs[idx] != k.bs[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
k.time = time.Now()
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *sigManager) putChecked(newsig *sigBytes, bs []byte) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
now := time.Now()
|
||||
if time.Since(m.lastCleaned) > 60*time.Second {
|
||||
// Since we have the write lock anyway, do some cleanup
|
||||
for s, k := range m.checked {
|
||||
if time.Since(k.time) > 60*time.Second { delete(m.checked, s) }
|
||||
}
|
||||
m.lastCleaned = now
|
||||
}
|
||||
k := knownSig{bs: bs, time: now}
|
||||
m.checked[*newsig] = k
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
now := time.Now()
|
||||
if time.Since(m.lastCleaned) > 60*time.Second {
|
||||
// Since we have the write lock anyway, do some cleanup
|
||||
for s, k := range m.checked {
|
||||
if time.Since(k.time) > 60*time.Second {
|
||||
delete(m.checked, s)
|
||||
}
|
||||
}
|
||||
m.lastCleaned = now
|
||||
}
|
||||
k := knownSig{bs: bs, time: now}
|
||||
m.checked[*newsig] = k
|
||||
}
|
||||
|
||||
|
@ -23,366 +23,414 @@ const switch_timeout = time.Minute
|
||||
// 1 signature per coord, from the *sender* to that coord
|
||||
// E.g. A->B->C has sigA(A->B) and sigB(A->B->C)
|
||||
type switchLocator struct {
|
||||
root sigPubKey
|
||||
tstamp int64
|
||||
coords []switchPort
|
||||
root sigPubKey
|
||||
tstamp int64
|
||||
coords []switchPort
|
||||
}
|
||||
|
||||
func firstIsBetter(first, second *sigPubKey) bool {
|
||||
// Higher TreeID is better
|
||||
ftid := getTreeID(first)
|
||||
stid := getTreeID(second)
|
||||
for idx := 0 ; idx < len(ftid) ; idx++ {
|
||||
if ftid[idx] == stid[idx] { continue }
|
||||
return ftid[idx] > stid[idx]
|
||||
}
|
||||
// Edge case, when comparing identical IDs
|
||||
return false
|
||||
// Higher TreeID is better
|
||||
ftid := getTreeID(first)
|
||||
stid := getTreeID(second)
|
||||
for idx := 0; idx < len(ftid); idx++ {
|
||||
if ftid[idx] == stid[idx] {
|
||||
continue
|
||||
}
|
||||
return ftid[idx] > stid[idx]
|
||||
}
|
||||
// Edge case, when comparing identical IDs
|
||||
return false
|
||||
}
|
||||
|
||||
func (l *switchLocator) clone() switchLocator {
|
||||
// Used to create a deep copy for use in messages
|
||||
// Copy required because we need to mutate coords before sending
|
||||
// (By appending the port from us to the destination)
|
||||
loc := *l
|
||||
loc.coords = make([]switchPort, len(l.coords), len(l.coords)+1)
|
||||
copy(loc.coords, l.coords)
|
||||
return loc
|
||||
// Used to create a deep copy for use in messages
|
||||
// Copy required because we need to mutate coords before sending
|
||||
// (By appending the port from us to the destination)
|
||||
loc := *l
|
||||
loc.coords = make([]switchPort, len(l.coords), len(l.coords)+1)
|
||||
copy(loc.coords, l.coords)
|
||||
return loc
|
||||
}
|
||||
|
||||
func (l *switchLocator) dist(dest []byte) int {
|
||||
// Returns distance (on the tree) from these coords
|
||||
offset := 0
|
||||
fdc := 0
|
||||
for {
|
||||
if fdc >= len(l.coords) { break }
|
||||
coord, length := wire_decode_uint64(dest[offset:])
|
||||
if length == 0 { break }
|
||||
if l.coords[fdc] != switchPort(coord) { break }
|
||||
fdc++
|
||||
offset += length
|
||||
}
|
||||
dist := len(l.coords[fdc:])
|
||||
for {
|
||||
_, length := wire_decode_uint64(dest[offset:])
|
||||
if length == 0 { break }
|
||||
dist++
|
||||
offset += length
|
||||
}
|
||||
return dist
|
||||
// Returns distance (on the tree) from these coords
|
||||
offset := 0
|
||||
fdc := 0
|
||||
for {
|
||||
if fdc >= len(l.coords) {
|
||||
break
|
||||
}
|
||||
coord, length := wire_decode_uint64(dest[offset:])
|
||||
if length == 0 {
|
||||
break
|
||||
}
|
||||
if l.coords[fdc] != switchPort(coord) {
|
||||
break
|
||||
}
|
||||
fdc++
|
||||
offset += length
|
||||
}
|
||||
dist := len(l.coords[fdc:])
|
||||
for {
|
||||
_, length := wire_decode_uint64(dest[offset:])
|
||||
if length == 0 {
|
||||
break
|
||||
}
|
||||
dist++
|
||||
offset += length
|
||||
}
|
||||
return dist
|
||||
}
|
||||
|
||||
func (l *switchLocator) getCoords() []byte {
|
||||
bs := make([]byte, 0, len(l.coords))
|
||||
for _, coord := range l.coords {
|
||||
c := wire_encode_uint64(uint64(coord))
|
||||
bs = append(bs, c...)
|
||||
}
|
||||
return bs
|
||||
bs := make([]byte, 0, len(l.coords))
|
||||
for _, coord := range l.coords {
|
||||
c := wire_encode_uint64(uint64(coord))
|
||||
bs = append(bs, c...)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (x *switchLocator) isAncestorOf(y *switchLocator) bool {
|
||||
if x.root != y.root { return false }
|
||||
if len(x.coords) > len(y.coords) { return false }
|
||||
for idx := range x.coords {
|
||||
if x.coords[idx] != y.coords[idx] { return false }
|
||||
}
|
||||
return true
|
||||
if x.root != y.root {
|
||||
return false
|
||||
}
|
||||
if len(x.coords) > len(y.coords) {
|
||||
return false
|
||||
}
|
||||
for idx := range x.coords {
|
||||
if x.coords[idx] != y.coords[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type peerInfo struct {
|
||||
key sigPubKey // ID of this peer
|
||||
locator switchLocator // Should be able to respond with signatures upon request
|
||||
degree uint64 // Self-reported degree
|
||||
coords []switchPort // Coords of this peer (taken from coords of the sent locator)
|
||||
time time.Time // Time this node was last seen
|
||||
firstSeen time.Time
|
||||
port switchPort // Interface number of this peer
|
||||
seq uint64 // Seq number we last saw this peer advertise
|
||||
key sigPubKey // ID of this peer
|
||||
locator switchLocator // Should be able to respond with signatures upon request
|
||||
degree uint64 // Self-reported degree
|
||||
coords []switchPort // Coords of this peer (taken from coords of the sent locator)
|
||||
time time.Time // Time this node was last seen
|
||||
firstSeen time.Time
|
||||
port switchPort // Interface number of this peer
|
||||
seq uint64 // Seq number we last saw this peer advertise
|
||||
}
|
||||
|
||||
type switchMessage struct {
|
||||
from sigPubKey // key of the sender
|
||||
locator switchLocator // Locator advertised for the receiver, not the sender's loc!
|
||||
seq uint64
|
||||
from sigPubKey // key of the sender
|
||||
locator switchLocator // Locator advertised for the receiver, not the sender's loc!
|
||||
seq uint64
|
||||
}
|
||||
|
||||
type switchPort uint64
|
||||
type tableElem struct {
|
||||
locator switchLocator
|
||||
firstSeen time.Time
|
||||
locator switchLocator
|
||||
firstSeen time.Time
|
||||
}
|
||||
|
||||
type lookupTable struct {
|
||||
self switchLocator
|
||||
elems map[switchPort]tableElem
|
||||
self switchLocator
|
||||
elems map[switchPort]tableElem
|
||||
}
|
||||
|
||||
type switchData struct {
|
||||
// All data that's mutable and used by exported Table methods
|
||||
// To be read/written with atomic.Value Store/Load calls
|
||||
locator switchLocator
|
||||
seq uint64 // Sequence number, reported to peers, so they know about changes
|
||||
peers map[switchPort]peerInfo
|
||||
sigs []sigInfo
|
||||
// All data that's mutable and used by exported Table methods
|
||||
// To be read/written with atomic.Value Store/Load calls
|
||||
locator switchLocator
|
||||
seq uint64 // Sequence number, reported to peers, so they know about changes
|
||||
peers map[switchPort]peerInfo
|
||||
sigs []sigInfo
|
||||
}
|
||||
|
||||
type switchTable struct {
|
||||
core *Core
|
||||
key sigPubKey // Our own key
|
||||
time time.Time // Time when locator.tstamp was last updated
|
||||
parent switchPort // Port of whatever peer is our parent, or self if we're root
|
||||
drop map[sigPubKey]int64 // Tstamp associated with a dropped root
|
||||
mutex sync.RWMutex // Lock for reads/writes of switchData
|
||||
data switchData
|
||||
updater atomic.Value //*sync.Once
|
||||
table atomic.Value //lookupTable
|
||||
core *Core
|
||||
key sigPubKey // Our own key
|
||||
time time.Time // Time when locator.tstamp was last updated
|
||||
parent switchPort // Port of whatever peer is our parent, or self if we're root
|
||||
drop map[sigPubKey]int64 // Tstamp associated with a dropped root
|
||||
mutex sync.RWMutex // Lock for reads/writes of switchData
|
||||
data switchData
|
||||
updater atomic.Value //*sync.Once
|
||||
table atomic.Value //lookupTable
|
||||
}
|
||||
|
||||
func (t *switchTable) init(core *Core, key sigPubKey) {
|
||||
now := time.Now()
|
||||
t.core = core
|
||||
t.key = key
|
||||
locator := switchLocator{root: key, tstamp: now.Unix()}
|
||||
peers := make(map[switchPort]peerInfo)
|
||||
t.data = switchData{locator: locator, peers: peers}
|
||||
t.updater.Store(&sync.Once{})
|
||||
t.table.Store(lookupTable{elems: make(map[switchPort]tableElem)})
|
||||
t.drop = make(map[sigPubKey]int64)
|
||||
doTicker := func () {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
<-ticker.C
|
||||
t.Tick()
|
||||
}
|
||||
}
|
||||
go doTicker()
|
||||
now := time.Now()
|
||||
t.core = core
|
||||
t.key = key
|
||||
locator := switchLocator{root: key, tstamp: now.Unix()}
|
||||
peers := make(map[switchPort]peerInfo)
|
||||
t.data = switchData{locator: locator, peers: peers}
|
||||
t.updater.Store(&sync.Once{})
|
||||
t.table.Store(lookupTable{elems: make(map[switchPort]tableElem)})
|
||||
t.drop = make(map[sigPubKey]int64)
|
||||
doTicker := func() {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
<-ticker.C
|
||||
t.Tick()
|
||||
}
|
||||
}
|
||||
go doTicker()
|
||||
}
|
||||
|
||||
func (t *switchTable) getLocator() switchLocator {
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
return t.data.locator.clone()
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
return t.data.locator.clone()
|
||||
}
|
||||
|
||||
func (t *switchTable) Tick() {
|
||||
// Periodic maintenance work to keep things internally consistent
|
||||
t.mutex.Lock() // Write lock
|
||||
defer t.mutex.Unlock() // Release lock when we're done
|
||||
t.cleanRoot()
|
||||
t.cleanPeers()
|
||||
t.cleanDropped()
|
||||
// Periodic maintenance work to keep things internally consistent
|
||||
t.mutex.Lock() // Write lock
|
||||
defer t.mutex.Unlock() // Release lock when we're done
|
||||
t.cleanRoot()
|
||||
t.cleanPeers()
|
||||
t.cleanDropped()
|
||||
}
|
||||
|
||||
func (t *switchTable) cleanRoot() {
|
||||
// TODO rethink how this is done?...
|
||||
// Get rid of the root if it looks like its timed out
|
||||
now := time.Now()
|
||||
doUpdate := false
|
||||
//fmt.Println("DEBUG clean root:", now.Sub(t.time))
|
||||
if now.Sub(t.time) > switch_timeout {
|
||||
//fmt.Println("root timed out", t.data.locator)
|
||||
dropped := t.data.peers[t.parent]
|
||||
dropped.time = t.time
|
||||
t.drop[t.data.locator.root] = t.data.locator.tstamp
|
||||
doUpdate = true
|
||||
//t.core.log.Println("DEBUG: switch root timeout", len(t.drop))
|
||||
}
|
||||
// Or, if we're better than our root, root ourself
|
||||
if firstIsBetter(&t.key, &t.data.locator.root) {
|
||||
//fmt.Println("root is worse than us", t.data.locator.Root)
|
||||
doUpdate = true
|
||||
//t.core.log.Println("DEBUG: switch root replace with self", t.data.locator.Root)
|
||||
}
|
||||
// Or, if we are the root, possibly update our timestamp
|
||||
if t.data.locator.root == t.key &&
|
||||
now.Sub(t.time) > switch_timeout/2 {
|
||||
//fmt.Println("root is self and old, updating", t.data.locator.Root)
|
||||
doUpdate = true
|
||||
}
|
||||
if doUpdate {
|
||||
t.parent = switchPort(0)
|
||||
t.time = now
|
||||
if t.data.locator.root != t.key {
|
||||
t.data.seq++
|
||||
t.updater.Store(&sync.Once{})
|
||||
select {
|
||||
case t.core.router.reset<-struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
t.data.locator = switchLocator{root: t.key, tstamp: now.Unix()}
|
||||
t.data.sigs = nil
|
||||
}
|
||||
// TODO rethink how this is done?...
|
||||
// Get rid of the root if it looks like its timed out
|
||||
now := time.Now()
|
||||
doUpdate := false
|
||||
//fmt.Println("DEBUG clean root:", now.Sub(t.time))
|
||||
if now.Sub(t.time) > switch_timeout {
|
||||
//fmt.Println("root timed out", t.data.locator)
|
||||
dropped := t.data.peers[t.parent]
|
||||
dropped.time = t.time
|
||||
t.drop[t.data.locator.root] = t.data.locator.tstamp
|
||||
doUpdate = true
|
||||
//t.core.log.Println("DEBUG: switch root timeout", len(t.drop))
|
||||
}
|
||||
// Or, if we're better than our root, root ourself
|
||||
if firstIsBetter(&t.key, &t.data.locator.root) {
|
||||
//fmt.Println("root is worse than us", t.data.locator.Root)
|
||||
doUpdate = true
|
||||
//t.core.log.Println("DEBUG: switch root replace with self", t.data.locator.Root)
|
||||
}
|
||||
// Or, if we are the root, possibly update our timestamp
|
||||
if t.data.locator.root == t.key &&
|
||||
now.Sub(t.time) > switch_timeout/2 {
|
||||
//fmt.Println("root is self and old, updating", t.data.locator.Root)
|
||||
doUpdate = true
|
||||
}
|
||||
if doUpdate {
|
||||
t.parent = switchPort(0)
|
||||
t.time = now
|
||||
if t.data.locator.root != t.key {
|
||||
t.data.seq++
|
||||
t.updater.Store(&sync.Once{})
|
||||
select {
|
||||
case t.core.router.reset <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
t.data.locator = switchLocator{root: t.key, tstamp: now.Unix()}
|
||||
t.data.sigs = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (t *switchTable) cleanPeers() {
|
||||
now := time.Now()
|
||||
changed := false
|
||||
for idx, info := range t.data.peers {
|
||||
if info.port != switchPort(0) && now.Sub(info.time) > 6*time.Second /*switch_timeout*/ {
|
||||
//fmt.Println("peer timed out", t.key, info.locator)
|
||||
delete(t.data.peers, idx)
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
if changed { t.updater.Store(&sync.Once{}) }
|
||||
now := time.Now()
|
||||
changed := false
|
||||
for idx, info := range t.data.peers {
|
||||
if info.port != switchPort(0) && now.Sub(info.time) > 6*time.Second /*switch_timeout*/ {
|
||||
//fmt.Println("peer timed out", t.key, info.locator)
|
||||
delete(t.data.peers, idx)
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
if changed {
|
||||
t.updater.Store(&sync.Once{})
|
||||
}
|
||||
}
|
||||
|
||||
func (t *switchTable) cleanDropped() {
|
||||
// TODO only call this after root changes, not periodically
|
||||
for root, _ := range t.drop {
|
||||
if !firstIsBetter(&root, &t.data.locator.root) { delete(t.drop, root) }
|
||||
}
|
||||
// TODO only call this after root changes, not periodically
|
||||
for root := range t.drop {
|
||||
if !firstIsBetter(&root, &t.data.locator.root) {
|
||||
delete(t.drop, root)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *switchTable) createMessage(port switchPort) (*switchMessage, []sigInfo) {
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
msg := switchMessage{from: t.key, locator: t.data.locator.clone()}
|
||||
msg.locator.coords = append(msg.locator.coords, port)
|
||||
msg.seq = t.data.seq
|
||||
return &msg, t.data.sigs
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
msg := switchMessage{from: t.key, locator: t.data.locator.clone()}
|
||||
msg.locator.coords = append(msg.locator.coords, port)
|
||||
msg.seq = t.data.seq
|
||||
return &msg, t.data.sigs
|
||||
}
|
||||
|
||||
func (t *switchTable) handleMessage(msg *switchMessage, fromPort switchPort, sigs []sigInfo) {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
now := time.Now()
|
||||
if len(msg.locator.coords) == 0 { return } // Should always have >=1 links
|
||||
oldSender, isIn := t.data.peers[fromPort]
|
||||
if !isIn { oldSender.firstSeen = now }
|
||||
sender := peerInfo{key: msg.from,
|
||||
locator: msg.locator,
|
||||
coords: msg.locator.coords[:len(msg.locator.coords)-1],
|
||||
time: now,
|
||||
firstSeen: oldSender.firstSeen,
|
||||
port: fromPort,
|
||||
seq: msg.seq}
|
||||
equiv := func (x *switchLocator, y *switchLocator) bool {
|
||||
if x.root != y.root { return false }
|
||||
if len(x.coords) != len(y.coords) { return false }
|
||||
for idx := range x.coords {
|
||||
if x.coords[idx] != y.coords[idx] { return false }
|
||||
}
|
||||
return true
|
||||
}
|
||||
doUpdate := false
|
||||
if !equiv(&msg.locator, &oldSender.locator) {
|
||||
doUpdate = true
|
||||
sender.firstSeen = now
|
||||
}
|
||||
t.data.peers[fromPort] = sender
|
||||
updateRoot := false
|
||||
oldParent, isIn := t.data.peers[t.parent]
|
||||
noParent := !isIn
|
||||
noLoop := func () bool {
|
||||
for idx := 0 ; idx < len(sigs)-1 ; idx++ {
|
||||
if sigs[idx].next == t.core.sigPub { return false }
|
||||
}
|
||||
if msg.locator.root == t.core.sigPub { return false }
|
||||
return true
|
||||
}()
|
||||
sTime := now.Sub(sender.firstSeen)
|
||||
pTime := oldParent.time.Sub(oldParent.firstSeen) + switch_timeout
|
||||
// Really want to compare sLen/sTime and pLen/pTime
|
||||
// Cross multiplied to avoid divide-by-zero
|
||||
cost := len(msg.locator.coords)*int(pTime.Seconds())
|
||||
pCost := len(t.data.locator.coords)*int(sTime.Seconds())
|
||||
dropTstamp, isIn := t.drop[msg.locator.root]
|
||||
// Here be dragons
|
||||
switch {
|
||||
case !noLoop: // do nothing
|
||||
case isIn && dropTstamp >= msg.locator.tstamp: // do nothing
|
||||
case firstIsBetter(&msg.locator.root, &t.data.locator.root): updateRoot = true
|
||||
case t.data.locator.root != msg.locator.root: // do nothing
|
||||
case t.data.locator.tstamp > msg.locator.tstamp: // do nothing
|
||||
case noParent: updateRoot = true
|
||||
case cost < pCost: updateRoot = true
|
||||
case sender.port == t.parent &&
|
||||
(msg.locator.tstamp > t.data.locator.tstamp ||
|
||||
!equiv(&msg.locator, &t.data.locator)): updateRoot = true
|
||||
}
|
||||
if updateRoot {
|
||||
if !equiv(&msg.locator, &t.data.locator) {
|
||||
doUpdate = true
|
||||
t.data.seq++
|
||||
select {
|
||||
case t.core.router.reset<-struct{}{}:
|
||||
default:
|
||||
}
|
||||
//t.core.log.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords)
|
||||
//fmt.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords)
|
||||
}
|
||||
if t.data.locator.tstamp != msg.locator.tstamp { t.time = now }
|
||||
t.data.locator = msg.locator
|
||||
t.parent = sender.port
|
||||
t.data.sigs = sigs
|
||||
//t.core.log.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords)
|
||||
}
|
||||
if doUpdate { t.updater.Store(&sync.Once{}) }
|
||||
return
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
now := time.Now()
|
||||
if len(msg.locator.coords) == 0 {
|
||||
return
|
||||
} // Should always have >=1 links
|
||||
oldSender, isIn := t.data.peers[fromPort]
|
||||
if !isIn {
|
||||
oldSender.firstSeen = now
|
||||
}
|
||||
sender := peerInfo{key: msg.from,
|
||||
locator: msg.locator,
|
||||
coords: msg.locator.coords[:len(msg.locator.coords)-1],
|
||||
time: now,
|
||||
firstSeen: oldSender.firstSeen,
|
||||
port: fromPort,
|
||||
seq: msg.seq}
|
||||
equiv := func(x *switchLocator, y *switchLocator) bool {
|
||||
if x.root != y.root {
|
||||
return false
|
||||
}
|
||||
if len(x.coords) != len(y.coords) {
|
||||
return false
|
||||
}
|
||||
for idx := range x.coords {
|
||||
if x.coords[idx] != y.coords[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
doUpdate := false
|
||||
if !equiv(&msg.locator, &oldSender.locator) {
|
||||
doUpdate = true
|
||||
sender.firstSeen = now
|
||||
}
|
||||
t.data.peers[fromPort] = sender
|
||||
updateRoot := false
|
||||
oldParent, isIn := t.data.peers[t.parent]
|
||||
noParent := !isIn
|
||||
noLoop := func() bool {
|
||||
for idx := 0; idx < len(sigs)-1; idx++ {
|
||||
if sigs[idx].next == t.core.sigPub {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if msg.locator.root == t.core.sigPub {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}()
|
||||
sTime := now.Sub(sender.firstSeen)
|
||||
pTime := oldParent.time.Sub(oldParent.firstSeen) + switch_timeout
|
||||
// Really want to compare sLen/sTime and pLen/pTime
|
||||
// Cross multiplied to avoid divide-by-zero
|
||||
cost := len(msg.locator.coords) * int(pTime.Seconds())
|
||||
pCost := len(t.data.locator.coords) * int(sTime.Seconds())
|
||||
dropTstamp, isIn := t.drop[msg.locator.root]
|
||||
// Here be dragons
|
||||
switch {
|
||||
case !noLoop: // do nothing
|
||||
case isIn && dropTstamp >= msg.locator.tstamp: // do nothing
|
||||
case firstIsBetter(&msg.locator.root, &t.data.locator.root):
|
||||
updateRoot = true
|
||||
case t.data.locator.root != msg.locator.root: // do nothing
|
||||
case t.data.locator.tstamp > msg.locator.tstamp: // do nothing
|
||||
case noParent:
|
||||
updateRoot = true
|
||||
case cost < pCost:
|
||||
updateRoot = true
|
||||
case sender.port == t.parent &&
|
||||
(msg.locator.tstamp > t.data.locator.tstamp ||
|
||||
!equiv(&msg.locator, &t.data.locator)):
|
||||
updateRoot = true
|
||||
}
|
||||
if updateRoot {
|
||||
if !equiv(&msg.locator, &t.data.locator) {
|
||||
doUpdate = true
|
||||
t.data.seq++
|
||||
select {
|
||||
case t.core.router.reset <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
//t.core.log.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords)
|
||||
//fmt.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords)
|
||||
}
|
||||
if t.data.locator.tstamp != msg.locator.tstamp {
|
||||
t.time = now
|
||||
}
|
||||
t.data.locator = msg.locator
|
||||
t.parent = sender.port
|
||||
t.data.sigs = sigs
|
||||
//t.core.log.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords)
|
||||
}
|
||||
if doUpdate {
|
||||
t.updater.Store(&sync.Once{})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *switchTable) updateTable() {
|
||||
// WARNING this should only be called from within t.data.updater.Do()
|
||||
// It relies on the sync.Once for synchronization with messages and lookups
|
||||
// TODO use a pre-computed faster lookup table
|
||||
// Instead of checking distance for every destination every time
|
||||
// Array of structs, indexed by first coord that differs from self
|
||||
// Each struct has stores the best port to forward to, and a next coord map
|
||||
// Move to struct, then iterate over coord maps until you dead end
|
||||
// The last port before the dead end should be the closest
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
newTable := lookupTable{
|
||||
self: t.data.locator.clone(),
|
||||
elems: make(map[switchPort]tableElem),
|
||||
}
|
||||
for _, pinfo := range t.data.peers {
|
||||
//if !pinfo.forward { continue }
|
||||
loc := pinfo.locator.clone()
|
||||
loc.coords = loc.coords[:len(loc.coords)-1] // Remove the them->self link
|
||||
newTable.elems[pinfo.port] = tableElem {
|
||||
locator: loc,
|
||||
//degree: pinfo.degree,
|
||||
firstSeen: pinfo.firstSeen,
|
||||
//forward: pinfo.forward,
|
||||
}
|
||||
}
|
||||
t.table.Store(newTable)
|
||||
// WARNING this should only be called from within t.data.updater.Do()
|
||||
// It relies on the sync.Once for synchronization with messages and lookups
|
||||
// TODO use a pre-computed faster lookup table
|
||||
// Instead of checking distance for every destination every time
|
||||
// Array of structs, indexed by first coord that differs from self
|
||||
// Each struct has stores the best port to forward to, and a next coord map
|
||||
// Move to struct, then iterate over coord maps until you dead end
|
||||
// The last port before the dead end should be the closest
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
newTable := lookupTable{
|
||||
self: t.data.locator.clone(),
|
||||
elems: make(map[switchPort]tableElem),
|
||||
}
|
||||
for _, pinfo := range t.data.peers {
|
||||
//if !pinfo.forward { continue }
|
||||
loc := pinfo.locator.clone()
|
||||
loc.coords = loc.coords[:len(loc.coords)-1] // Remove the them->self link
|
||||
newTable.elems[pinfo.port] = tableElem{
|
||||
locator: loc,
|
||||
//degree: pinfo.degree,
|
||||
firstSeen: pinfo.firstSeen,
|
||||
//forward: pinfo.forward,
|
||||
}
|
||||
}
|
||||
t.table.Store(newTable)
|
||||
}
|
||||
|
||||
func (t *switchTable) lookup(dest []byte, ttl uint64) (switchPort, uint64) {
|
||||
t.updater.Load().(*sync.Once).Do(t.updateTable)
|
||||
table := t.table.Load().(lookupTable)
|
||||
ports := t.core.peers.getPorts()
|
||||
getBandwidth := func (port switchPort) float64 {
|
||||
var bandwidth float64
|
||||
if p, isIn := ports[port]; isIn {
|
||||
bandwidth = p.getBandwidth()
|
||||
}
|
||||
return bandwidth
|
||||
}
|
||||
var best switchPort
|
||||
myDist := table.self.dist(dest) //getDist(table.self.coords)
|
||||
if !(uint64(myDist) < ttl) { return 0, 0 }
|
||||
// score is in units of bandwidth / distance
|
||||
bestScore := float64(-1)
|
||||
for port, info := range table.elems {
|
||||
if info.locator.root != table.self.root { continue }
|
||||
dist := info.locator.dist(dest) //getDist(info.locator.coords)
|
||||
if !(dist < myDist) { continue }
|
||||
score := getBandwidth(port)
|
||||
score /= float64(1+dist)
|
||||
if score > bestScore {
|
||||
best = port
|
||||
bestScore = score
|
||||
}
|
||||
}
|
||||
//t.core.log.Println("DEBUG: sending to", best, "bandwidth", getBandwidth(best))
|
||||
return best, uint64(myDist)
|
||||
t.updater.Load().(*sync.Once).Do(t.updateTable)
|
||||
table := t.table.Load().(lookupTable)
|
||||
ports := t.core.peers.getPorts()
|
||||
getBandwidth := func(port switchPort) float64 {
|
||||
var bandwidth float64
|
||||
if p, isIn := ports[port]; isIn {
|
||||
bandwidth = p.getBandwidth()
|
||||
}
|
||||
return bandwidth
|
||||
}
|
||||
var best switchPort
|
||||
myDist := table.self.dist(dest) //getDist(table.self.coords)
|
||||
if !(uint64(myDist) < ttl) {
|
||||
return 0, 0
|
||||
}
|
||||
// score is in units of bandwidth / distance
|
||||
bestScore := float64(-1)
|
||||
for port, info := range table.elems {
|
||||
if info.locator.root != table.self.root {
|
||||
continue
|
||||
}
|
||||
dist := info.locator.dist(dest) //getDist(info.locator.coords)
|
||||
if !(dist < myDist) {
|
||||
continue
|
||||
}
|
||||
score := getBandwidth(port)
|
||||
score /= float64(1 + dist)
|
||||
if score > bestScore {
|
||||
best = port
|
||||
bestScore = score
|
||||
}
|
||||
}
|
||||
//t.core.log.Println("DEBUG: sending to", best, "bandwidth", getBandwidth(best))
|
||||
return best, uint64(myDist)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
@ -390,9 +438,8 @@ func (t *switchTable) lookup(dest []byte, ttl uint64) (switchPort, uint64) {
|
||||
//Signature stuff
|
||||
|
||||
type sigInfo struct {
|
||||
next sigPubKey
|
||||
sig sigBytes
|
||||
next sigPubKey
|
||||
sig sigBytes
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
@ -16,189 +16,226 @@ import "errors"
|
||||
import "sync"
|
||||
import "fmt"
|
||||
|
||||
const tcp_msgSize = 2048+65535 // TODO figure out what makes sense
|
||||
const tcp_msgSize = 2048 + 65535 // TODO figure out what makes sense
|
||||
|
||||
type tcpInterface struct {
|
||||
core *Core
|
||||
serv *net.TCPListener
|
||||
mutex sync.Mutex // Protecting the below
|
||||
calls map[string]struct{}
|
||||
core *Core
|
||||
serv *net.TCPListener
|
||||
mutex sync.Mutex // Protecting the below
|
||||
calls map[string]struct{}
|
||||
}
|
||||
|
||||
type tcpKeys struct {
|
||||
box boxPubKey
|
||||
sig sigPubKey
|
||||
box boxPubKey
|
||||
sig sigPubKey
|
||||
}
|
||||
|
||||
func (iface *tcpInterface) init(core *Core, addr string) {
|
||||
iface.core = core
|
||||
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil { panic(err) }
|
||||
iface.serv, err = net.ListenTCP("tcp", tcpAddr)
|
||||
if err != nil { panic(err) }
|
||||
iface.calls = make(map[string]struct{})
|
||||
go iface.listener()
|
||||
iface.core = core
|
||||
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
iface.serv, err = net.ListenTCP("tcp", tcpAddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
iface.calls = make(map[string]struct{})
|
||||
go iface.listener()
|
||||
}
|
||||
|
||||
func (iface *tcpInterface) listener() {
|
||||
defer iface.serv.Close()
|
||||
iface.core.log.Println("Listening on:", iface.serv.Addr().String())
|
||||
for {
|
||||
sock, err := iface.serv.AcceptTCP()
|
||||
if err != nil { panic(err) }
|
||||
go iface.handler(sock)
|
||||
}
|
||||
defer iface.serv.Close()
|
||||
iface.core.log.Println("Listening on:", iface.serv.Addr().String())
|
||||
for {
|
||||
sock, err := iface.serv.AcceptTCP()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go iface.handler(sock)
|
||||
}
|
||||
}
|
||||
|
||||
func (iface *tcpInterface) call(saddr string) {
|
||||
go func() {
|
||||
quit := false
|
||||
iface.mutex.Lock()
|
||||
if _, isIn := iface.calls[saddr]; isIn {
|
||||
quit = true
|
||||
} else {
|
||||
iface.calls[saddr] = struct{}{}
|
||||
defer func() {
|
||||
iface.mutex.Lock()
|
||||
delete(iface.calls, saddr)
|
||||
iface.mutex.Unlock()
|
||||
}()
|
||||
}
|
||||
iface.mutex.Unlock()
|
||||
if !quit {
|
||||
conn, err := net.DialTimeout("tcp", saddr, 6*time.Second)
|
||||
if err != nil { return }
|
||||
sock := conn.(*net.TCPConn)
|
||||
iface.handler(sock)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
quit := false
|
||||
iface.mutex.Lock()
|
||||
if _, isIn := iface.calls[saddr]; isIn {
|
||||
quit = true
|
||||
} else {
|
||||
iface.calls[saddr] = struct{}{}
|
||||
defer func() {
|
||||
iface.mutex.Lock()
|
||||
delete(iface.calls, saddr)
|
||||
iface.mutex.Unlock()
|
||||
}()
|
||||
}
|
||||
iface.mutex.Unlock()
|
||||
if !quit {
|
||||
conn, err := net.DialTimeout("tcp", saddr, 6*time.Second)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
sock := conn.(*net.TCPConn)
|
||||
iface.handler(sock)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (iface *tcpInterface) handler(sock *net.TCPConn) {
|
||||
defer sock.Close()
|
||||
// Get our keys
|
||||
keys := []byte{}
|
||||
keys = append(keys, tcp_key[:]...)
|
||||
keys = append(keys, iface.core.boxPub[:]...)
|
||||
keys = append(keys, iface.core.sigPub[:]...)
|
||||
_, err := sock.Write(keys)
|
||||
if err != nil { return }
|
||||
timeout := time.Now().Add(6*time.Second)
|
||||
sock.SetReadDeadline(timeout)
|
||||
n, err := sock.Read(keys)
|
||||
if err != nil { return }
|
||||
if n < len(keys) { /*panic("Partial key packet?") ;*/ return }
|
||||
ks := tcpKeys{}
|
||||
if !tcp_chop_keys(&ks.box, &ks.sig, &keys) { /*panic("Invalid key packet?") ;*/ return }
|
||||
// Quit the parent call if this is a connection to ourself
|
||||
equiv := func(k1, k2 []byte) bool {
|
||||
for idx := range k1 {
|
||||
if k1[idx] != k2[idx] { return false }
|
||||
}
|
||||
return true
|
||||
}
|
||||
if equiv(ks.box[:], iface.core.boxPub[:]) { return } // testing
|
||||
if equiv(ks.sig[:], iface.core.sigPub[:]) { return }
|
||||
// Note that multiple connections to the same node are allowed
|
||||
// E.g. over different interfaces
|
||||
linkIn := make(chan []byte, 1)
|
||||
p := iface.core.peers.newPeer(&ks.box, &ks.sig)//, in, out)
|
||||
in := func(bs []byte) {
|
||||
p.handlePacket(bs, linkIn)
|
||||
}
|
||||
out := make(chan []byte, 1024) // TODO? what size makes sense
|
||||
defer close(out)
|
||||
go func() {
|
||||
var stack [][]byte
|
||||
put := func(msg []byte) {
|
||||
stack = append(stack, msg)
|
||||
for len(stack) > 1024 {
|
||||
util_putBytes(stack[0])
|
||||
stack = stack[1:]
|
||||
}
|
||||
}
|
||||
send := func() {
|
||||
msg := stack[len(stack)-1]
|
||||
stack = stack[:len(stack)-1]
|
||||
buf := net.Buffers{tcp_msg[:],
|
||||
wire_encode_uint64(uint64(len(msg))),
|
||||
msg}
|
||||
size := 0
|
||||
for _, bs := range buf { size += len(bs) }
|
||||
start := time.Now()
|
||||
buf.WriteTo(sock)
|
||||
timed := time.Since(start)
|
||||
pType, _ := wire_decode_uint64(msg)
|
||||
if pType == wire_LinkProtocolTraffic {
|
||||
p.updateBandwidth(size, timed)
|
||||
}
|
||||
util_putBytes(msg)
|
||||
}
|
||||
for msg := range out {
|
||||
put(msg)
|
||||
for len(stack) > 0 {
|
||||
// Keep trying to fill the stack (LIFO order) while sending
|
||||
select {
|
||||
case msg, ok := <-out:
|
||||
if !ok { return }
|
||||
put(msg)
|
||||
default: send()
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
p.out = func(msg []byte) {
|
||||
defer func() { recover() }()
|
||||
for {
|
||||
select {
|
||||
case out<-msg: return
|
||||
default: util_putBytes(<-out)
|
||||
}
|
||||
}
|
||||
}
|
||||
sock.SetNoDelay(true)
|
||||
go p.linkLoop(linkIn)
|
||||
defer func() {
|
||||
// Put all of our cleanup here...
|
||||
p.core.peers.mutex.Lock()
|
||||
oldPorts := p.core.peers.getPorts()
|
||||
newPorts := make(map[switchPort]*peer)
|
||||
for k,v := range oldPorts{ newPorts[k] = v }
|
||||
delete(newPorts, p.port)
|
||||
p.core.peers.putPorts(newPorts)
|
||||
p.core.peers.mutex.Unlock()
|
||||
close(linkIn)
|
||||
}()
|
||||
them := sock.RemoteAddr()
|
||||
themNodeID := getNodeID(&ks.box)
|
||||
themAddr := address_addrForNodeID(themNodeID)
|
||||
themAddrString := net.IP(themAddr[:]).String()
|
||||
themString := fmt.Sprintf("%s@%s", themAddrString, them)
|
||||
iface.core.log.Println("Connected:", themString)
|
||||
iface.reader(sock, in) // In this goroutine, because of defers
|
||||
iface.core.log.Println("Disconnected:", themString)
|
||||
return
|
||||
defer sock.Close()
|
||||
// Get our keys
|
||||
keys := []byte{}
|
||||
keys = append(keys, tcp_key[:]...)
|
||||
keys = append(keys, iface.core.boxPub[:]...)
|
||||
keys = append(keys, iface.core.sigPub[:]...)
|
||||
_, err := sock.Write(keys)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
timeout := time.Now().Add(6 * time.Second)
|
||||
sock.SetReadDeadline(timeout)
|
||||
n, err := sock.Read(keys)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if n < len(keys) { /*panic("Partial key packet?") ;*/
|
||||
return
|
||||
}
|
||||
ks := tcpKeys{}
|
||||
if !tcp_chop_keys(&ks.box, &ks.sig, &keys) { /*panic("Invalid key packet?") ;*/
|
||||
return
|
||||
}
|
||||
// Quit the parent call if this is a connection to ourself
|
||||
equiv := func(k1, k2 []byte) bool {
|
||||
for idx := range k1 {
|
||||
if k1[idx] != k2[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
if equiv(ks.box[:], iface.core.boxPub[:]) {
|
||||
return
|
||||
} // testing
|
||||
if equiv(ks.sig[:], iface.core.sigPub[:]) {
|
||||
return
|
||||
}
|
||||
// Note that multiple connections to the same node are allowed
|
||||
// E.g. over different interfaces
|
||||
linkIn := make(chan []byte, 1)
|
||||
p := iface.core.peers.newPeer(&ks.box, &ks.sig) //, in, out)
|
||||
in := func(bs []byte) {
|
||||
p.handlePacket(bs, linkIn)
|
||||
}
|
||||
out := make(chan []byte, 1024) // TODO? what size makes sense
|
||||
defer close(out)
|
||||
go func() {
|
||||
var stack [][]byte
|
||||
put := func(msg []byte) {
|
||||
stack = append(stack, msg)
|
||||
for len(stack) > 1024 {
|
||||
util_putBytes(stack[0])
|
||||
stack = stack[1:]
|
||||
}
|
||||
}
|
||||
send := func() {
|
||||
msg := stack[len(stack)-1]
|
||||
stack = stack[:len(stack)-1]
|
||||
buf := net.Buffers{tcp_msg[:],
|
||||
wire_encode_uint64(uint64(len(msg))),
|
||||
msg}
|
||||
size := 0
|
||||
for _, bs := range buf {
|
||||
size += len(bs)
|
||||
}
|
||||
start := time.Now()
|
||||
buf.WriteTo(sock)
|
||||
timed := time.Since(start)
|
||||
pType, _ := wire_decode_uint64(msg)
|
||||
if pType == wire_LinkProtocolTraffic {
|
||||
p.updateBandwidth(size, timed)
|
||||
}
|
||||
util_putBytes(msg)
|
||||
}
|
||||
for msg := range out {
|
||||
put(msg)
|
||||
for len(stack) > 0 {
|
||||
// Keep trying to fill the stack (LIFO order) while sending
|
||||
select {
|
||||
case msg, ok := <-out:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
put(msg)
|
||||
default:
|
||||
send()
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
p.out = func(msg []byte) {
|
||||
defer func() { recover() }()
|
||||
for {
|
||||
select {
|
||||
case out <- msg:
|
||||
return
|
||||
default:
|
||||
util_putBytes(<-out)
|
||||
}
|
||||
}
|
||||
}
|
||||
sock.SetNoDelay(true)
|
||||
go p.linkLoop(linkIn)
|
||||
defer func() {
|
||||
// Put all of our cleanup here...
|
||||
p.core.peers.mutex.Lock()
|
||||
oldPorts := p.core.peers.getPorts()
|
||||
newPorts := make(map[switchPort]*peer)
|
||||
for k, v := range oldPorts {
|
||||
newPorts[k] = v
|
||||
}
|
||||
delete(newPorts, p.port)
|
||||
p.core.peers.putPorts(newPorts)
|
||||
p.core.peers.mutex.Unlock()
|
||||
close(linkIn)
|
||||
}()
|
||||
them := sock.RemoteAddr()
|
||||
themNodeID := getNodeID(&ks.box)
|
||||
themAddr := address_addrForNodeID(themNodeID)
|
||||
themAddrString := net.IP(themAddr[:]).String()
|
||||
themString := fmt.Sprintf("%s@%s", themAddrString, them)
|
||||
iface.core.log.Println("Connected:", themString)
|
||||
iface.reader(sock, in) // In this goroutine, because of defers
|
||||
iface.core.log.Println("Disconnected:", themString)
|
||||
return
|
||||
}
|
||||
|
||||
func (iface *tcpInterface) reader(sock *net.TCPConn, in func([]byte)) {
|
||||
bs := make([]byte, 2*tcp_msgSize)
|
||||
frag := bs[:0]
|
||||
for {
|
||||
timeout := time.Now().Add(6*time.Second)
|
||||
sock.SetReadDeadline(timeout)
|
||||
n, err := sock.Read(bs[len(frag):])
|
||||
if err != nil || n == 0 { break }
|
||||
frag = bs[:len(frag)+n]
|
||||
for {
|
||||
msg, ok, err := tcp_chop_msg(&frag)
|
||||
if err != nil { return }
|
||||
if !ok { break } // We didn't get the whole message yet
|
||||
newMsg := append(util_getBytes(), msg...)
|
||||
in(newMsg)
|
||||
util_yield()
|
||||
}
|
||||
frag = append(bs[:0], frag...)
|
||||
}
|
||||
bs := make([]byte, 2*tcp_msgSize)
|
||||
frag := bs[:0]
|
||||
for {
|
||||
timeout := time.Now().Add(6 * time.Second)
|
||||
sock.SetReadDeadline(timeout)
|
||||
n, err := sock.Read(bs[len(frag):])
|
||||
if err != nil || n == 0 {
|
||||
break
|
||||
}
|
||||
frag = bs[:len(frag)+n]
|
||||
for {
|
||||
msg, ok, err := tcp_chop_msg(&frag)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
break
|
||||
} // We didn't get the whole message yet
|
||||
newMsg := append(util_getBytes(), msg...)
|
||||
in(newMsg)
|
||||
util_yield()
|
||||
}
|
||||
frag = append(bs[:0], frag...)
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
@ -208,39 +245,46 @@ var tcp_key = [...]byte{'k', 'e', 'y', 's'}
|
||||
var tcp_msg = [...]byte{0xde, 0xad, 0xb1, 0x75} // "dead bits"
|
||||
|
||||
func tcp_chop_keys(box *boxPubKey, sig *sigPubKey, bs *[]byte) bool {
|
||||
// This one is pretty simple: we know how long the message should be
|
||||
// So don't call this with a message that's too short
|
||||
if len(*bs) < len(tcp_key) + len(*box) + len(*sig) { return false }
|
||||
for idx := range tcp_key {
|
||||
if (*bs)[idx] != tcp_key[idx] { return false }
|
||||
}
|
||||
(*bs) = (*bs)[len(tcp_key):]
|
||||
copy(box[:], *bs)
|
||||
(*bs) = (*bs)[len(box):]
|
||||
copy(sig[:], *bs)
|
||||
(*bs) = (*bs)[len(sig):]
|
||||
return true
|
||||
// This one is pretty simple: we know how long the message should be
|
||||
// So don't call this with a message that's too short
|
||||
if len(*bs) < len(tcp_key)+len(*box)+len(*sig) {
|
||||
return false
|
||||
}
|
||||
for idx := range tcp_key {
|
||||
if (*bs)[idx] != tcp_key[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
(*bs) = (*bs)[len(tcp_key):]
|
||||
copy(box[:], *bs)
|
||||
(*bs) = (*bs)[len(box):]
|
||||
copy(sig[:], *bs)
|
||||
(*bs) = (*bs)[len(sig):]
|
||||
return true
|
||||
}
|
||||
|
||||
func tcp_chop_msg(bs *[]byte) ([]byte, bool, error) {
|
||||
// Returns msg, ok, err
|
||||
if len(*bs) < len(tcp_msg) { return nil, false, nil }
|
||||
for idx := range tcp_msg {
|
||||
if (*bs)[idx] != tcp_msg[idx] {
|
||||
return nil, false, errors.New("Bad message!")
|
||||
}
|
||||
}
|
||||
msgLen, msgLenLen := wire_decode_uint64((*bs)[len(tcp_msg):])
|
||||
if msgLen > tcp_msgSize { return nil, false, errors.New("Oversized message!") }
|
||||
msgBegin := len(tcp_msg) + msgLenLen
|
||||
msgEnd := msgBegin + int(msgLen)
|
||||
if msgLenLen == 0 || len(*bs) < msgEnd {
|
||||
// We don't have the full message
|
||||
// Need to buffer this and wait for the rest to come in
|
||||
return nil, false, nil
|
||||
}
|
||||
msg := (*bs)[msgBegin:msgEnd]
|
||||
(*bs) = (*bs)[msgEnd:]
|
||||
return msg, true, nil
|
||||
// Returns msg, ok, err
|
||||
if len(*bs) < len(tcp_msg) {
|
||||
return nil, false, nil
|
||||
}
|
||||
for idx := range tcp_msg {
|
||||
if (*bs)[idx] != tcp_msg[idx] {
|
||||
return nil, false, errors.New("Bad message!")
|
||||
}
|
||||
}
|
||||
msgLen, msgLenLen := wire_decode_uint64((*bs)[len(tcp_msg):])
|
||||
if msgLen > tcp_msgSize {
|
||||
return nil, false, errors.New("Oversized message!")
|
||||
}
|
||||
msgBegin := len(tcp_msg) + msgLenLen
|
||||
msgEnd := msgBegin + int(msgLen)
|
||||
if msgLenLen == 0 || len(*bs) < msgEnd {
|
||||
// We don't have the full message
|
||||
// Need to buffer this and wait for the rest to come in
|
||||
return nil, false, nil
|
||||
}
|
||||
msg := (*bs)[msgBegin:msgEnd]
|
||||
(*bs) = (*bs)[msgEnd:]
|
||||
return msg, true, nil
|
||||
}
|
||||
|
||||
|
@ -7,50 +7,45 @@ import water "github.com/songgao/water"
|
||||
const IPv6_HEADER_LENGTH = 40
|
||||
|
||||
type tunDevice struct {
|
||||
core *Core
|
||||
send chan<- []byte
|
||||
recv <-chan []byte
|
||||
mtu int
|
||||
iface *water.Interface
|
||||
core *Core
|
||||
send chan<- []byte
|
||||
recv <-chan []byte
|
||||
mtu int
|
||||
iface *water.Interface
|
||||
}
|
||||
|
||||
func (tun *tunDevice) init(core *Core) {
|
||||
tun.core = core
|
||||
}
|
||||
|
||||
func (tun *tunDevice) setup(addr string, mtu int) error {
|
||||
iface, err := water.New(water.Config{ DeviceType: water.TUN })
|
||||
if err != nil { panic(err) }
|
||||
tun.iface = iface
|
||||
tun.mtu = mtu //1280 // Lets default to the smallest thing allowed for now
|
||||
return tun.setupAddress(addr)
|
||||
tun.core = core
|
||||
}
|
||||
|
||||
func (tun *tunDevice) write() error {
|
||||
for {
|
||||
data := <-tun.recv
|
||||
if _, err := tun.iface.Write(data); err != nil { return err }
|
||||
util_putBytes(data)
|
||||
}
|
||||
for {
|
||||
data := <-tun.recv
|
||||
if _, err := tun.iface.Write(data); err != nil {
|
||||
return err
|
||||
}
|
||||
util_putBytes(data)
|
||||
}
|
||||
}
|
||||
|
||||
func (tun *tunDevice) read() error {
|
||||
buf := make([]byte, tun.mtu)
|
||||
for {
|
||||
n, err := tun.iface.Read(buf)
|
||||
if err != nil { return err }
|
||||
if buf[0] & 0xf0 != 0x60 ||
|
||||
n != 256*int(buf[4]) + int(buf[5]) + IPv6_HEADER_LENGTH {
|
||||
// Either not an IPv6 packet or not the complete packet for some reason
|
||||
//panic("Should not happen in testing")
|
||||
continue
|
||||
}
|
||||
packet := append(util_getBytes(), buf[:n]...)
|
||||
tun.send<-packet
|
||||
}
|
||||
buf := make([]byte, tun.mtu)
|
||||
for {
|
||||
n, err := tun.iface.Read(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if buf[0]&0xf0 != 0x60 ||
|
||||
n != 256*int(buf[4])+int(buf[5])+IPv6_HEADER_LENGTH {
|
||||
// Either not an IPv6 packet or not the complete packet for some reason
|
||||
//panic("Should not happen in testing")
|
||||
continue
|
||||
}
|
||||
packet := append(util_getBytes(), buf[:n]...)
|
||||
tun.send <- packet
|
||||
}
|
||||
}
|
||||
|
||||
func (tun *tunDevice) close() error {
|
||||
return tun.iface.Close()
|
||||
return tun.iface.Close()
|
||||
}
|
||||
|
||||
|
@ -7,30 +7,45 @@ import "fmt"
|
||||
import "os/exec"
|
||||
import "strings"
|
||||
|
||||
func (tun *tunDevice) setupAddress(addr string) error {
|
||||
// Set address
|
||||
cmd := exec.Command("ip", "-f", "inet6",
|
||||
"addr", "add", addr,
|
||||
"dev", tun.iface.Name())
|
||||
tun.core.log.Printf("ip command: %v", strings.Join(cmd.Args, " "))
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
tun.core.log.Printf("Linux ip failed: %v.", err)
|
||||
tun.core.log.Println(string(output))
|
||||
return err
|
||||
}
|
||||
// Set MTU and bring device up
|
||||
cmd = exec.Command("ip", "link", "set",
|
||||
"dev", tun.iface.Name(),
|
||||
"mtu", fmt.Sprintf("%d", tun.mtu),
|
||||
"up")
|
||||
tun.core.log.Printf("ip command: %v", strings.Join(cmd.Args, " "))
|
||||
output, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
tun.core.log.Printf("Linux ip failed: %v.", err)
|
||||
tun.core.log.Println(string(output))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
import water "github.com/songgao/water"
|
||||
|
||||
func (tun *tunDevice) setup(ifname string, addr string, mtu int) error {
|
||||
config := water.Config{DeviceType: water.TUN}
|
||||
if ifname != "" && ifname != "auto" {
|
||||
config.Name = ifname
|
||||
}
|
||||
iface, err := water.New(config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
tun.iface = iface
|
||||
tun.mtu = mtu //1280 // Lets default to the smallest thing allowed for now
|
||||
return tun.setupAddress(addr)
|
||||
}
|
||||
|
||||
func (tun *tunDevice) setupAddress(addr string) error {
|
||||
// Set address
|
||||
cmd := exec.Command("ip", "-f", "inet6",
|
||||
"addr", "add", addr,
|
||||
"dev", tun.iface.Name())
|
||||
tun.core.log.Printf("ip command: %v", strings.Join(cmd.Args, " "))
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
tun.core.log.Printf("Linux ip failed: %v.", err)
|
||||
tun.core.log.Println(string(output))
|
||||
return err
|
||||
}
|
||||
// Set MTU and bring device up
|
||||
cmd = exec.Command("ip", "link", "set",
|
||||
"dev", tun.iface.Name(),
|
||||
"mtu", fmt.Sprintf("%d", tun.mtu),
|
||||
"up")
|
||||
tun.core.log.Printf("ip command: %v", strings.Join(cmd.Args, " "))
|
||||
output, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
tun.core.log.Printf("Linux ip failed: %v.", err)
|
||||
tun.core.log.Println(string(output))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -2,11 +2,23 @@
|
||||
|
||||
package yggdrasil
|
||||
|
||||
import water "github.com/songgao/water"
|
||||
|
||||
// This is to catch unsupported platforms
|
||||
// If your platform supports tun devices, you could try configuring it manually
|
||||
|
||||
func (tun *tunDevice) setupAddress(addr string) error {
|
||||
tun.core.log.Println("Platform not supported, you must set the address of", tun.iface.Name(), "to", addr)
|
||||
return nil
|
||||
func (tun *tunDevice) setup(ifname string, addr string, mtu int) error {
|
||||
config := water.Config{DeviceType: water.TUN}
|
||||
iface, err := water.New(config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
tun.iface = iface
|
||||
tun.mtu = mtu //1280 // Lets default to the smallest thing allowed for now
|
||||
return tun.setupAddress(addr)
|
||||
}
|
||||
|
||||
func (tun *tunDevice) setupAddress(addr string) error {
|
||||
tun.core.log.Println("Platform not supported, you must set the address of", tun.iface.Name(), "to", addr)
|
||||
return nil
|
||||
}
|
||||
|
@ -15,247 +15,277 @@ import "sync"
|
||||
import "fmt"
|
||||
|
||||
type udpInterface struct {
|
||||
core *Core
|
||||
sock *net.UDPConn // Or more general PacketConn?
|
||||
mutex sync.RWMutex // each conn has an owner goroutine
|
||||
conns map[connAddr]*connInfo
|
||||
core *Core
|
||||
sock *net.UDPConn // Or more general PacketConn?
|
||||
mutex sync.RWMutex // each conn has an owner goroutine
|
||||
conns map[connAddr]*connInfo
|
||||
}
|
||||
|
||||
type connAddr string // TODO something more efficient, but still a valid map key
|
||||
type connInfo struct {
|
||||
addr connAddr
|
||||
peer *peer
|
||||
linkIn chan []byte
|
||||
keysIn chan *udpKeys
|
||||
timeout int // count of how many heartbeats have been missed
|
||||
in func([]byte)
|
||||
out chan []byte
|
||||
countIn uint8
|
||||
countOut uint8
|
||||
addr connAddr
|
||||
peer *peer
|
||||
linkIn chan []byte
|
||||
keysIn chan *udpKeys
|
||||
timeout int // count of how many heartbeats have been missed
|
||||
in func([]byte)
|
||||
out chan []byte
|
||||
countIn uint8
|
||||
countOut uint8
|
||||
}
|
||||
|
||||
type udpKeys struct {
|
||||
box boxPubKey
|
||||
sig sigPubKey
|
||||
box boxPubKey
|
||||
sig sigPubKey
|
||||
}
|
||||
|
||||
func (iface *udpInterface) init(core *Core, addr string) {
|
||||
iface.core = core
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", addr)
|
||||
if err != nil { panic(err) }
|
||||
iface.sock, err = net.ListenUDP("udp", udpAddr)
|
||||
if err != nil { panic(err) }
|
||||
iface.conns = make(map[connAddr]*connInfo)
|
||||
go iface.reader()
|
||||
iface.core = core
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
iface.sock, err = net.ListenUDP("udp", udpAddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
iface.conns = make(map[connAddr]*connInfo)
|
||||
go iface.reader()
|
||||
}
|
||||
|
||||
func (iface *udpInterface) sendKeys(addr connAddr) {
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", string(addr))
|
||||
if err != nil { panic(err) }
|
||||
msg := []byte{}
|
||||
msg = udp_encode(msg, 0, 0, 0, nil)
|
||||
msg = append(msg, iface.core.boxPub[:]...)
|
||||
msg = append(msg, iface.core.sigPub[:]...)
|
||||
iface.sock.WriteToUDP(msg, udpAddr)
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", string(addr))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
msg := []byte{}
|
||||
msg = udp_encode(msg, 0, 0, 0, nil)
|
||||
msg = append(msg, iface.core.boxPub[:]...)
|
||||
msg = append(msg, iface.core.sigPub[:]...)
|
||||
iface.sock.WriteToUDP(msg, udpAddr)
|
||||
}
|
||||
|
||||
func udp_isKeys(msg []byte) bool {
|
||||
keyLen := 3 + boxPubKeyLen + sigPubKeyLen
|
||||
return len(msg) == keyLen && msg[0] == 0x00
|
||||
keyLen := 3 + boxPubKeyLen + sigPubKeyLen
|
||||
return len(msg) == keyLen && msg[0] == 0x00
|
||||
}
|
||||
|
||||
func (iface *udpInterface) startConn(info *connInfo) {
|
||||
ticker := time.NewTicker(6*time.Second)
|
||||
defer ticker.Stop()
|
||||
defer func () {
|
||||
// Cleanup
|
||||
// FIXME this still leaks a peer struct
|
||||
iface.mutex.Lock()
|
||||
delete(iface.conns, info.addr)
|
||||
iface.mutex.Unlock()
|
||||
iface.core.peers.mutex.Lock()
|
||||
oldPorts := iface.core.peers.getPorts()
|
||||
newPorts := make(map[switchPort]*peer)
|
||||
for k,v := range oldPorts{ newPorts[k] = v }
|
||||
delete(newPorts, info.peer.port)
|
||||
iface.core.peers.putPorts(newPorts)
|
||||
iface.core.peers.mutex.Unlock()
|
||||
close(info.linkIn)
|
||||
close(info.keysIn)
|
||||
close(info.out)
|
||||
iface.core.log.Println("Removing peer:", info.addr)
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case ks := <-info.keysIn: {
|
||||
// FIXME? need signatures/sequence-numbers or something
|
||||
// Spoofers could lock out a peer with fake/bad keys
|
||||
if ks.box == info.peer.box && ks.sig == info.peer.sig {
|
||||
info.timeout = 0
|
||||
}
|
||||
}
|
||||
case <-ticker.C: {
|
||||
if info.timeout > 10 { return }
|
||||
info.timeout++
|
||||
iface.sendKeys(info.addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
ticker := time.NewTicker(6 * time.Second)
|
||||
defer ticker.Stop()
|
||||
defer func() {
|
||||
// Cleanup
|
||||
// FIXME this still leaks a peer struct
|
||||
iface.mutex.Lock()
|
||||
delete(iface.conns, info.addr)
|
||||
iface.mutex.Unlock()
|
||||
iface.core.peers.mutex.Lock()
|
||||
oldPorts := iface.core.peers.getPorts()
|
||||
newPorts := make(map[switchPort]*peer)
|
||||
for k, v := range oldPorts {
|
||||
newPorts[k] = v
|
||||
}
|
||||
delete(newPorts, info.peer.port)
|
||||
iface.core.peers.putPorts(newPorts)
|
||||
iface.core.peers.mutex.Unlock()
|
||||
close(info.linkIn)
|
||||
close(info.keysIn)
|
||||
close(info.out)
|
||||
iface.core.log.Println("Removing peer:", info.addr)
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case ks := <-info.keysIn:
|
||||
{
|
||||
// FIXME? need signatures/sequence-numbers or something
|
||||
// Spoofers could lock out a peer with fake/bad keys
|
||||
if ks.box == info.peer.box && ks.sig == info.peer.sig {
|
||||
info.timeout = 0
|
||||
}
|
||||
}
|
||||
case <-ticker.C:
|
||||
{
|
||||
if info.timeout > 10 {
|
||||
return
|
||||
}
|
||||
info.timeout++
|
||||
iface.sendKeys(info.addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (iface *udpInterface) handleKeys(msg []byte, addr connAddr) {
|
||||
//defer util_putBytes(msg)
|
||||
var ks udpKeys
|
||||
_, _, _, bs := udp_decode(msg)
|
||||
switch {
|
||||
case !wire_chop_slice(ks.box[:], &bs): return
|
||||
case !wire_chop_slice(ks.sig[:], &bs): return
|
||||
}
|
||||
if ks.box == iface.core.boxPub { return }
|
||||
if ks.sig == iface.core.sigPub { return }
|
||||
iface.mutex.RLock()
|
||||
conn, isIn := iface.conns[addr]
|
||||
iface.mutex.RUnlock() // TODO? keep the lock longer?...
|
||||
if !isIn {
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", string(addr))
|
||||
if err != nil { panic(err) }
|
||||
conn = &connInfo{
|
||||
addr: connAddr(addr),
|
||||
peer: iface.core.peers.newPeer(&ks.box, &ks.sig),
|
||||
linkIn: make(chan []byte, 1),
|
||||
keysIn: make(chan *udpKeys, 1),
|
||||
out: make(chan []byte, 1024),
|
||||
}
|
||||
/*
|
||||
conn.in = func (msg []byte) { conn.peer.handlePacket(msg, conn.linkIn) }
|
||||
conn.peer.out = func (msg []byte) {
|
||||
start := time.Now()
|
||||
iface.sock.WriteToUDP(msg, udpAddr)
|
||||
timed := time.Since(start)
|
||||
conn.peer.updateBandwidth(len(msg), timed)
|
||||
util_putBytes(msg)
|
||||
} // Old version, always one syscall per packet
|
||||
//*/
|
||||
/*
|
||||
conn.peer.out = func (msg []byte) {
|
||||
defer func() { recover() }()
|
||||
select {
|
||||
case conn.out<-msg:
|
||||
default: util_putBytes(msg)
|
||||
}
|
||||
}
|
||||
go func () {
|
||||
for msg := range conn.out {
|
||||
start := time.Now()
|
||||
iface.sock.WriteToUDP(msg, udpAddr)
|
||||
timed := time.Since(start)
|
||||
conn.peer.updateBandwidth(len(msg), timed)
|
||||
util_putBytes(msg)
|
||||
}
|
||||
}()
|
||||
//*/
|
||||
//*
|
||||
var inChunks uint8
|
||||
var inBuf []byte
|
||||
conn.in = func(bs []byte) {
|
||||
//defer util_putBytes(bs)
|
||||
chunks, chunk, count, payload := udp_decode(bs)
|
||||
//iface.core.log.Println("DEBUG:", addr, chunks, chunk, count, len(payload))
|
||||
//iface.core.log.Println("DEBUG: payload:", payload)
|
||||
if count != conn.countIn {
|
||||
inChunks = 0
|
||||
inBuf = inBuf[:0]
|
||||
conn.countIn = count
|
||||
}
|
||||
if chunk <= chunks && chunk == inChunks + 1 {
|
||||
//iface.core.log.Println("GOING:", addr, chunks, chunk, count, len(payload))
|
||||
inChunks += 1
|
||||
inBuf = append(inBuf, payload...)
|
||||
if chunks != chunk { return }
|
||||
msg := append(util_getBytes(), inBuf...)
|
||||
conn.peer.handlePacket(msg, conn.linkIn)
|
||||
//iface.core.log.Println("DONE:", addr, chunks, chunk, count, len(payload))
|
||||
}
|
||||
}
|
||||
conn.peer.out = func (msg []byte) {
|
||||
defer func() { recover() }()
|
||||
select {
|
||||
case conn.out<-msg:
|
||||
default: util_putBytes(msg)
|
||||
}
|
||||
}
|
||||
go func () {
|
||||
//var chunks [][]byte
|
||||
var out []byte
|
||||
for msg := range conn.out {
|
||||
var chunks [][]byte
|
||||
bs := msg
|
||||
for len(bs) > udp_chunkSize {
|
||||
chunks, bs = append(chunks, bs[:udp_chunkSize]), bs[udp_chunkSize:]
|
||||
}
|
||||
chunks = append(chunks, bs)
|
||||
//iface.core.log.Println("DEBUG: out chunks:", len(chunks), len(msg))
|
||||
if len(chunks) > 255 { continue }
|
||||
start := time.Now()
|
||||
for idx,bs := range chunks {
|
||||
nChunks, nChunk, count := uint8(len(chunks)), uint8(idx)+1, conn.countOut
|
||||
out = udp_encode(out[:0], nChunks, nChunk, count, bs)
|
||||
//iface.core.log.Println("DEBUG out:", nChunks, nChunk, count, len(bs))
|
||||
iface.sock.WriteToUDP(out, udpAddr)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
conn.countOut += 1
|
||||
conn.peer.updateBandwidth(len(msg), timed)
|
||||
util_putBytes(msg)
|
||||
}
|
||||
}()
|
||||
//*/
|
||||
iface.mutex.Lock()
|
||||
iface.conns[addr] = conn
|
||||
iface.mutex.Unlock()
|
||||
themNodeID := getNodeID(&ks.box)
|
||||
themAddr := address_addrForNodeID(themNodeID)
|
||||
themAddrString := net.IP(themAddr[:]).String()
|
||||
themString := fmt.Sprintf("%s@%s", themAddrString, addr)
|
||||
iface.core.log.Println("Adding peer:", themString)
|
||||
go iface.startConn(conn)
|
||||
go conn.peer.linkLoop(conn.linkIn)
|
||||
iface.sendKeys(conn.addr)
|
||||
}
|
||||
func() {
|
||||
defer func() { recover() }()
|
||||
select {
|
||||
case conn.keysIn<-&ks:
|
||||
default:
|
||||
}
|
||||
}()
|
||||
//defer util_putBytes(msg)
|
||||
var ks udpKeys
|
||||
_, _, _, bs := udp_decode(msg)
|
||||
switch {
|
||||
case !wire_chop_slice(ks.box[:], &bs):
|
||||
return
|
||||
case !wire_chop_slice(ks.sig[:], &bs):
|
||||
return
|
||||
}
|
||||
if ks.box == iface.core.boxPub {
|
||||
return
|
||||
}
|
||||
if ks.sig == iface.core.sigPub {
|
||||
return
|
||||
}
|
||||
iface.mutex.RLock()
|
||||
conn, isIn := iface.conns[addr]
|
||||
iface.mutex.RUnlock() // TODO? keep the lock longer?...
|
||||
if !isIn {
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", string(addr))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
conn = &connInfo{
|
||||
addr: connAddr(addr),
|
||||
peer: iface.core.peers.newPeer(&ks.box, &ks.sig),
|
||||
linkIn: make(chan []byte, 1),
|
||||
keysIn: make(chan *udpKeys, 1),
|
||||
out: make(chan []byte, 1024),
|
||||
}
|
||||
/*
|
||||
conn.in = func (msg []byte) { conn.peer.handlePacket(msg, conn.linkIn) }
|
||||
conn.peer.out = func (msg []byte) {
|
||||
start := time.Now()
|
||||
iface.sock.WriteToUDP(msg, udpAddr)
|
||||
timed := time.Since(start)
|
||||
conn.peer.updateBandwidth(len(msg), timed)
|
||||
util_putBytes(msg)
|
||||
} // Old version, always one syscall per packet
|
||||
//*/
|
||||
/*
|
||||
conn.peer.out = func (msg []byte) {
|
||||
defer func() { recover() }()
|
||||
select {
|
||||
case conn.out<-msg:
|
||||
default: util_putBytes(msg)
|
||||
}
|
||||
}
|
||||
go func () {
|
||||
for msg := range conn.out {
|
||||
start := time.Now()
|
||||
iface.sock.WriteToUDP(msg, udpAddr)
|
||||
timed := time.Since(start)
|
||||
conn.peer.updateBandwidth(len(msg), timed)
|
||||
util_putBytes(msg)
|
||||
}
|
||||
}()
|
||||
//*/
|
||||
//*
|
||||
var inChunks uint8
|
||||
var inBuf []byte
|
||||
conn.in = func(bs []byte) {
|
||||
//defer util_putBytes(bs)
|
||||
chunks, chunk, count, payload := udp_decode(bs)
|
||||
//iface.core.log.Println("DEBUG:", addr, chunks, chunk, count, len(payload))
|
||||
//iface.core.log.Println("DEBUG: payload:", payload)
|
||||
if count != conn.countIn {
|
||||
inChunks = 0
|
||||
inBuf = inBuf[:0]
|
||||
conn.countIn = count
|
||||
}
|
||||
if chunk <= chunks && chunk == inChunks+1 {
|
||||
//iface.core.log.Println("GOING:", addr, chunks, chunk, count, len(payload))
|
||||
inChunks += 1
|
||||
inBuf = append(inBuf, payload...)
|
||||
if chunks != chunk {
|
||||
return
|
||||
}
|
||||
msg := append(util_getBytes(), inBuf...)
|
||||
conn.peer.handlePacket(msg, conn.linkIn)
|
||||
//iface.core.log.Println("DONE:", addr, chunks, chunk, count, len(payload))
|
||||
}
|
||||
}
|
||||
conn.peer.out = func(msg []byte) {
|
||||
defer func() { recover() }()
|
||||
select {
|
||||
case conn.out <- msg:
|
||||
default:
|
||||
util_putBytes(msg)
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
//var chunks [][]byte
|
||||
var out []byte
|
||||
for msg := range conn.out {
|
||||
var chunks [][]byte
|
||||
bs := msg
|
||||
for len(bs) > udp_chunkSize {
|
||||
chunks, bs = append(chunks, bs[:udp_chunkSize]), bs[udp_chunkSize:]
|
||||
}
|
||||
chunks = append(chunks, bs)
|
||||
//iface.core.log.Println("DEBUG: out chunks:", len(chunks), len(msg))
|
||||
if len(chunks) > 255 {
|
||||
continue
|
||||
}
|
||||
start := time.Now()
|
||||
for idx, bs := range chunks {
|
||||
nChunks, nChunk, count := uint8(len(chunks)), uint8(idx)+1, conn.countOut
|
||||
out = udp_encode(out[:0], nChunks, nChunk, count, bs)
|
||||
//iface.core.log.Println("DEBUG out:", nChunks, nChunk, count, len(bs))
|
||||
iface.sock.WriteToUDP(out, udpAddr)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
conn.countOut += 1
|
||||
conn.peer.updateBandwidth(len(msg), timed)
|
||||
util_putBytes(msg)
|
||||
}
|
||||
}()
|
||||
//*/
|
||||
iface.mutex.Lock()
|
||||
iface.conns[addr] = conn
|
||||
iface.mutex.Unlock()
|
||||
themNodeID := getNodeID(&ks.box)
|
||||
themAddr := address_addrForNodeID(themNodeID)
|
||||
themAddrString := net.IP(themAddr[:]).String()
|
||||
themString := fmt.Sprintf("%s@%s", themAddrString, addr)
|
||||
iface.core.log.Println("Adding peer:", themString)
|
||||
go iface.startConn(conn)
|
||||
go conn.peer.linkLoop(conn.linkIn)
|
||||
iface.sendKeys(conn.addr)
|
||||
}
|
||||
func() {
|
||||
defer func() { recover() }()
|
||||
select {
|
||||
case conn.keysIn <- &ks:
|
||||
default:
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (iface *udpInterface) handlePacket(msg []byte, addr connAddr) {
|
||||
iface.mutex.RLock()
|
||||
if conn, isIn := iface.conns[addr]; isIn {
|
||||
conn.in(msg)
|
||||
}
|
||||
iface.mutex.RUnlock()
|
||||
iface.mutex.RLock()
|
||||
if conn, isIn := iface.conns[addr]; isIn {
|
||||
conn.in(msg)
|
||||
}
|
||||
iface.mutex.RUnlock()
|
||||
}
|
||||
|
||||
func (iface *udpInterface) reader() {
|
||||
bs := make([]byte, 2048) // This needs to be large enough for everything...
|
||||
for {
|
||||
//iface.core.log.Println("Starting read")
|
||||
n, udpAddr, err := iface.sock.ReadFromUDP(bs)
|
||||
//iface.core.log.Println("Read", n, udpAddr.String(), err)
|
||||
if err != nil { panic(err) ; break }
|
||||
if n > 1500 { panic(n) }
|
||||
//msg := append(util_getBytes(), bs[:n]...)
|
||||
msg := bs[:n]
|
||||
addr := connAddr(udpAddr.String())
|
||||
if udp_isKeys(msg) {
|
||||
iface.handleKeys(msg, addr)
|
||||
} else {
|
||||
iface.handlePacket(msg, addr)
|
||||
}
|
||||
}
|
||||
bs := make([]byte, 2048) // This needs to be large enough for everything...
|
||||
for {
|
||||
//iface.core.log.Println("Starting read")
|
||||
n, udpAddr, err := iface.sock.ReadFromUDP(bs)
|
||||
//iface.core.log.Println("Read", n, udpAddr.String(), err)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
break
|
||||
}
|
||||
if n > 1500 {
|
||||
panic(n)
|
||||
}
|
||||
//msg := append(util_getBytes(), bs[:n]...)
|
||||
msg := bs[:n]
|
||||
addr := connAddr(udpAddr.String())
|
||||
if udp_isKeys(msg) {
|
||||
iface.handleKeys(msg, addr)
|
||||
} else {
|
||||
iface.handlePacket(msg, addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
@ -263,13 +293,12 @@ func (iface *udpInterface) reader() {
|
||||
const udp_chunkSize = 65535
|
||||
|
||||
func udp_decode(bs []byte) (chunks, chunk, count uint8, payload []byte) {
|
||||
if len(bs) >= 3 {
|
||||
chunks, chunk, count, payload = bs[0], bs[1], bs[2], bs[3:]
|
||||
}
|
||||
return
|
||||
if len(bs) >= 3 {
|
||||
chunks, chunk, count, payload = bs[0], bs[1], bs[2], bs[3:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func udp_encode(out []byte, chunks, chunk, count uint8, payload []byte) []byte {
|
||||
return append(append(out, chunks, chunk, count), payload...)
|
||||
return append(append(out, chunks, chunk, count), payload...)
|
||||
}
|
||||
|
||||
|
@ -4,41 +4,42 @@ package yggdrasil
|
||||
|
||||
import "fmt"
|
||||
import "runtime"
|
||||
|
||||
//import "sync"
|
||||
|
||||
func Util_testAddrIDMask() {
|
||||
for idx := 0 ; idx < 16 ; idx++ {
|
||||
var orig NodeID
|
||||
orig[8] = 42
|
||||
for bidx := 0 ; bidx < idx ; bidx++ {
|
||||
orig[bidx/8] |= (0x80 >> uint8(bidx % 8))
|
||||
}
|
||||
addr := address_addrForNodeID(&orig)
|
||||
nid, mask := addr.getNodeIDandMask()
|
||||
for b := 0 ; b < len(mask) ; b++ {
|
||||
nid[b] &= mask[b]
|
||||
orig[b] &= mask[b]
|
||||
}
|
||||
if *nid != orig {
|
||||
fmt.Println(orig)
|
||||
fmt.Println(*addr)
|
||||
fmt.Println(*nid)
|
||||
fmt.Println(*mask)
|
||||
panic(idx)
|
||||
}
|
||||
}
|
||||
for idx := 0; idx < 16; idx++ {
|
||||
var orig NodeID
|
||||
orig[8] = 42
|
||||
for bidx := 0; bidx < idx; bidx++ {
|
||||
orig[bidx/8] |= (0x80 >> uint8(bidx%8))
|
||||
}
|
||||
addr := address_addrForNodeID(&orig)
|
||||
nid, mask := addr.getNodeIDandMask()
|
||||
for b := 0; b < len(mask); b++ {
|
||||
nid[b] &= mask[b]
|
||||
orig[b] &= mask[b]
|
||||
}
|
||||
if *nid != orig {
|
||||
fmt.Println(orig)
|
||||
fmt.Println(*addr)
|
||||
fmt.Println(*nid)
|
||||
fmt.Println(*mask)
|
||||
panic(idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func util_yield() {
|
||||
runtime.Gosched()
|
||||
runtime.Gosched()
|
||||
}
|
||||
|
||||
func util_lockthread() {
|
||||
runtime.LockOSThread()
|
||||
runtime.LockOSThread()
|
||||
}
|
||||
|
||||
func util_unlockthread() {
|
||||
runtime.UnlockOSThread()
|
||||
runtime.UnlockOSThread()
|
||||
}
|
||||
|
||||
/*
|
||||
@ -58,22 +59,23 @@ func util_putBytes(bs []byte) {
|
||||
var byteStore chan []byte
|
||||
|
||||
func util_initByteStore() {
|
||||
if byteStore == nil {
|
||||
byteStore = make(chan []byte, 32)
|
||||
}
|
||||
if byteStore == nil {
|
||||
byteStore = make(chan []byte, 32)
|
||||
}
|
||||
}
|
||||
|
||||
func util_getBytes() []byte {
|
||||
select {
|
||||
case bs := <-byteStore: return bs[:0]
|
||||
default: return nil
|
||||
}
|
||||
select {
|
||||
case bs := <-byteStore:
|
||||
return bs[:0]
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func util_putBytes(bs []byte) {
|
||||
select {
|
||||
case byteStore<-bs:
|
||||
default:
|
||||
}
|
||||
select {
|
||||
case byteStore <- bs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7,101 +7,107 @@ package yggdrasil
|
||||
// TODO? make things still work after reordering (after things stabilize more?)
|
||||
// Type safety would also be nice, `type wire_type uint64`, rewrite as needed?
|
||||
const (
|
||||
wire_Traffic = iota // data being routed somewhere, handle for crypto
|
||||
wire_ProtocolTraffic // protocol traffic, pub keys for crypto
|
||||
wire_LinkProtocolTraffic // link proto traffic, pub keys for crypto
|
||||
wire_SwitchAnnounce // TODO put inside protocol traffic header
|
||||
wire_SwitchHopRequest // TODO put inside protocol traffic header
|
||||
wire_SwitchHop // TODO put inside protocol traffic header
|
||||
wire_SessionPing // inside protocol traffic header
|
||||
wire_SessionPong // inside protocol traffic header
|
||||
wire_DHTLookupRequest // inside protocol traffic header
|
||||
wire_DHTLookupResponse // inside protocol traffic header
|
||||
wire_SearchRequest // inside protocol traffic header
|
||||
wire_SearchResponse // inside protocol traffic header
|
||||
//wire_Keys // udp key packet (boxPub, sigPub)
|
||||
wire_Traffic = iota // data being routed somewhere, handle for crypto
|
||||
wire_ProtocolTraffic // protocol traffic, pub keys for crypto
|
||||
wire_LinkProtocolTraffic // link proto traffic, pub keys for crypto
|
||||
wire_SwitchAnnounce // TODO put inside protocol traffic header
|
||||
wire_SwitchHopRequest // TODO put inside protocol traffic header
|
||||
wire_SwitchHop // TODO put inside protocol traffic header
|
||||
wire_SessionPing // inside protocol traffic header
|
||||
wire_SessionPong // inside protocol traffic header
|
||||
wire_DHTLookupRequest // inside protocol traffic header
|
||||
wire_DHTLookupResponse // inside protocol traffic header
|
||||
wire_SearchRequest // inside protocol traffic header
|
||||
wire_SearchResponse // inside protocol traffic header
|
||||
//wire_Keys // udp key packet (boxPub, sigPub)
|
||||
)
|
||||
|
||||
// Encode uint64 using a variable length scheme
|
||||
// Similar to binary.Uvarint, but big-endian
|
||||
func wire_encode_uint64(elem uint64) []byte {
|
||||
return wire_put_uint64(elem, nil)
|
||||
return wire_put_uint64(elem, nil)
|
||||
}
|
||||
|
||||
// Occasionally useful for appending to an existing slice (if there's room)
|
||||
func wire_put_uint64(elem uint64, out []byte) []byte {
|
||||
bs := make([]byte, 0, 10)
|
||||
bs = append(bs, byte(elem & 0x7f))
|
||||
for e := elem >> 7 ; e > 0 ; e >>= 7 {
|
||||
bs = append(bs, byte(e | 0x80))
|
||||
}
|
||||
// Now reverse bytes, because we set them in the wrong order
|
||||
// TODO just put them in the right place the first time...
|
||||
last := len(bs)-1
|
||||
for idx := 0 ; idx < len(bs)/2 ; idx++ {
|
||||
bs[idx], bs[last-idx] = bs[last-idx], bs[idx]
|
||||
}
|
||||
return append(out, bs...)
|
||||
bs := make([]byte, 0, 10)
|
||||
bs = append(bs, byte(elem&0x7f))
|
||||
for e := elem >> 7; e > 0; e >>= 7 {
|
||||
bs = append(bs, byte(e|0x80))
|
||||
}
|
||||
// Now reverse bytes, because we set them in the wrong order
|
||||
// TODO just put them in the right place the first time...
|
||||
last := len(bs) - 1
|
||||
for idx := 0; idx < len(bs)/2; idx++ {
|
||||
bs[idx], bs[last-idx] = bs[last-idx], bs[idx]
|
||||
}
|
||||
return append(out, bs...)
|
||||
}
|
||||
|
||||
// Decode uint64 from a []byte slice
|
||||
// Returns the decoded uint64 and the number of bytes used
|
||||
func wire_decode_uint64(bs []byte) (uint64, int) {
|
||||
length := 0
|
||||
elem := uint64(0)
|
||||
for _, b := range bs {
|
||||
elem <<= 7
|
||||
elem |= uint64(b & 0x7f)
|
||||
length++
|
||||
if b & 0x80 == 0 { break }
|
||||
}
|
||||
return elem, length
|
||||
length := 0
|
||||
elem := uint64(0)
|
||||
for _, b := range bs {
|
||||
elem <<= 7
|
||||
elem |= uint64(b & 0x7f)
|
||||
length++
|
||||
if b&0x80 == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return elem, length
|
||||
}
|
||||
|
||||
func wire_intToUint(i int64) uint64 {
|
||||
var u uint64
|
||||
if i < 0 {
|
||||
u = uint64(-i) << 1
|
||||
u |= 0x01 // sign bit
|
||||
} else {
|
||||
u = uint64(i) << 1
|
||||
}
|
||||
return u
|
||||
var u uint64
|
||||
if i < 0 {
|
||||
u = uint64(-i) << 1
|
||||
u |= 0x01 // sign bit
|
||||
} else {
|
||||
u = uint64(i) << 1
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
func wire_intFromUint(u uint64) int64 {
|
||||
var i int64
|
||||
i = int64(u >> 1)
|
||||
if u & 0x01 != 0 { i *= -1 }
|
||||
return i
|
||||
var i int64
|
||||
i = int64(u >> 1)
|
||||
if u&0x01 != 0 {
|
||||
i *= -1
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Takes coords, returns coords prefixed with encoded coord length
|
||||
func wire_encode_coords(coords []byte) ([]byte) {
|
||||
coordLen := wire_encode_uint64(uint64(len(coords)))
|
||||
bs := make([]byte, 0, len(coordLen)+len(coords))
|
||||
bs = append(bs, coordLen...)
|
||||
bs = append(bs, coords...)
|
||||
return bs
|
||||
func wire_encode_coords(coords []byte) []byte {
|
||||
coordLen := wire_encode_uint64(uint64(len(coords)))
|
||||
bs := make([]byte, 0, len(coordLen)+len(coords))
|
||||
bs = append(bs, coordLen...)
|
||||
bs = append(bs, coords...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func wire_put_coords(coords []byte, bs []byte) ([]byte) {
|
||||
bs = wire_put_uint64(uint64(len(coords)), bs)
|
||||
bs = append(bs, coords...)
|
||||
return bs
|
||||
func wire_put_coords(coords []byte, bs []byte) []byte {
|
||||
bs = wire_put_uint64(uint64(len(coords)), bs)
|
||||
bs = append(bs, coords...)
|
||||
return bs
|
||||
}
|
||||
|
||||
// Takes a packet that begins with coords (starting with coord length)
|
||||
// Returns a slice of coords and the number of bytes read
|
||||
func wire_decode_coords(packet []byte) ([]byte, int) {
|
||||
coordLen, coordBegin := wire_decode_uint64(packet)
|
||||
coordEnd := coordBegin+int(coordLen)
|
||||
//if coordBegin == 0 { panic("No coords found") } // Testing
|
||||
//if coordEnd > len(packet) { panic("Packet too short") } // Testing
|
||||
if coordBegin == 0 || coordEnd > len(packet) { return nil, 0 }
|
||||
return packet[coordBegin:coordEnd], coordEnd
|
||||
coordLen, coordBegin := wire_decode_uint64(packet)
|
||||
coordEnd := coordBegin + int(coordLen)
|
||||
//if coordBegin == 0 { panic("No coords found") } // Testing
|
||||
//if coordEnd > len(packet) { panic("Packet too short") } // Testing
|
||||
if coordBegin == 0 || coordEnd > len(packet) {
|
||||
return nil, 0
|
||||
}
|
||||
return packet[coordBegin:coordEnd], coordEnd
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
@ -109,144 +115,171 @@ func wire_decode_coords(packet []byte) ([]byte, int) {
|
||||
|
||||
// Announces that we can send parts of a Message with a particular seq
|
||||
type msgAnnounce struct {
|
||||
root sigPubKey
|
||||
tstamp int64
|
||||
seq uint64
|
||||
len uint64
|
||||
//Deg uint64
|
||||
//RSeq uint64
|
||||
root sigPubKey
|
||||
tstamp int64
|
||||
seq uint64
|
||||
len uint64
|
||||
//Deg uint64
|
||||
//RSeq uint64
|
||||
}
|
||||
|
||||
func (m *msgAnnounce) encode() []byte {
|
||||
bs := wire_encode_uint64(wire_SwitchAnnounce)
|
||||
bs = append(bs, m.root[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...)
|
||||
bs = append(bs, wire_encode_uint64(m.seq)...)
|
||||
bs = append(bs, wire_encode_uint64(m.len)...)
|
||||
//bs = append(bs, wire_encode_uint64(m.Deg)...)
|
||||
//bs = append(bs, wire_encode_uint64(m.RSeq)...)
|
||||
return bs
|
||||
bs := wire_encode_uint64(wire_SwitchAnnounce)
|
||||
bs = append(bs, m.root[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...)
|
||||
bs = append(bs, wire_encode_uint64(m.seq)...)
|
||||
bs = append(bs, wire_encode_uint64(m.len)...)
|
||||
//bs = append(bs, wire_encode_uint64(m.Deg)...)
|
||||
//bs = append(bs, wire_encode_uint64(m.RSeq)...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func (m *msgAnnounce) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
var tstamp uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_SwitchAnnounce: return false
|
||||
case !wire_chop_slice(m.root[:], &bs): return false
|
||||
case !wire_chop_uint64(&tstamp, &bs): return false
|
||||
case !wire_chop_uint64(&m.seq, &bs): return false
|
||||
case !wire_chop_uint64(&m.len, &bs): return false
|
||||
//case !wire_chop_uint64(&m.Deg, &bs): return false
|
||||
//case !wire_chop_uint64(&m.RSeq, &bs): return false
|
||||
}
|
||||
m.tstamp = wire_intFromUint(tstamp)
|
||||
return true
|
||||
var pType uint64
|
||||
var tstamp uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_SwitchAnnounce:
|
||||
return false
|
||||
case !wire_chop_slice(m.root[:], &bs):
|
||||
return false
|
||||
case !wire_chop_uint64(&tstamp, &bs):
|
||||
return false
|
||||
case !wire_chop_uint64(&m.seq, &bs):
|
||||
return false
|
||||
case !wire_chop_uint64(&m.len, &bs):
|
||||
return false
|
||||
//case !wire_chop_uint64(&m.Deg, &bs): return false
|
||||
//case !wire_chop_uint64(&m.RSeq, &bs): return false
|
||||
}
|
||||
m.tstamp = wire_intFromUint(tstamp)
|
||||
return true
|
||||
}
|
||||
|
||||
type msgHopReq struct {
|
||||
root sigPubKey
|
||||
tstamp int64
|
||||
seq uint64
|
||||
hop uint64
|
||||
root sigPubKey
|
||||
tstamp int64
|
||||
seq uint64
|
||||
hop uint64
|
||||
}
|
||||
|
||||
func (m *msgHopReq) encode() []byte {
|
||||
bs := wire_encode_uint64(wire_SwitchHopRequest)
|
||||
bs = append(bs, m.root[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...)
|
||||
bs = append(bs, wire_encode_uint64(m.seq)...)
|
||||
bs = append(bs, wire_encode_uint64(m.hop)...)
|
||||
return bs
|
||||
bs := wire_encode_uint64(wire_SwitchHopRequest)
|
||||
bs = append(bs, m.root[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...)
|
||||
bs = append(bs, wire_encode_uint64(m.seq)...)
|
||||
bs = append(bs, wire_encode_uint64(m.hop)...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func (m *msgHopReq) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
var tstamp uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_SwitchHopRequest: return false
|
||||
case !wire_chop_slice(m.root[:], &bs): return false
|
||||
case !wire_chop_uint64(&tstamp, &bs): return false
|
||||
case !wire_chop_uint64(&m.seq, &bs): return false
|
||||
case !wire_chop_uint64(&m.hop, &bs): return false
|
||||
}
|
||||
m.tstamp = wire_intFromUint(tstamp)
|
||||
return true
|
||||
var pType uint64
|
||||
var tstamp uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_SwitchHopRequest:
|
||||
return false
|
||||
case !wire_chop_slice(m.root[:], &bs):
|
||||
return false
|
||||
case !wire_chop_uint64(&tstamp, &bs):
|
||||
return false
|
||||
case !wire_chop_uint64(&m.seq, &bs):
|
||||
return false
|
||||
case !wire_chop_uint64(&m.hop, &bs):
|
||||
return false
|
||||
}
|
||||
m.tstamp = wire_intFromUint(tstamp)
|
||||
return true
|
||||
}
|
||||
|
||||
type msgHop struct {
|
||||
root sigPubKey
|
||||
tstamp int64
|
||||
seq uint64
|
||||
hop uint64
|
||||
port switchPort
|
||||
next sigPubKey
|
||||
sig sigBytes
|
||||
root sigPubKey
|
||||
tstamp int64
|
||||
seq uint64
|
||||
hop uint64
|
||||
port switchPort
|
||||
next sigPubKey
|
||||
sig sigBytes
|
||||
}
|
||||
|
||||
func (m *msgHop) encode() []byte {
|
||||
bs := wire_encode_uint64(wire_SwitchHop)
|
||||
bs = append(bs, m.root[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...)
|
||||
bs = append(bs, wire_encode_uint64(m.seq)...)
|
||||
bs = append(bs, wire_encode_uint64(m.hop)...)
|
||||
bs = append(bs, wire_encode_uint64(uint64(m.port))...)
|
||||
bs = append(bs, m.next[:]...)
|
||||
bs = append(bs, m.sig[:]...)
|
||||
return bs
|
||||
bs := wire_encode_uint64(wire_SwitchHop)
|
||||
bs = append(bs, m.root[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...)
|
||||
bs = append(bs, wire_encode_uint64(m.seq)...)
|
||||
bs = append(bs, wire_encode_uint64(m.hop)...)
|
||||
bs = append(bs, wire_encode_uint64(uint64(m.port))...)
|
||||
bs = append(bs, m.next[:]...)
|
||||
bs = append(bs, m.sig[:]...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func (m *msgHop) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
var tstamp uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_SwitchHop: return false
|
||||
case !wire_chop_slice(m.root[:], &bs): return false
|
||||
case !wire_chop_uint64(&tstamp, &bs): return false
|
||||
case !wire_chop_uint64(&m.seq, &bs): return false
|
||||
case !wire_chop_uint64(&m.hop, &bs): return false
|
||||
case !wire_chop_uint64((*uint64)(&m.port), &bs): return false
|
||||
case !wire_chop_slice(m.next[:], &bs): return false
|
||||
case !wire_chop_slice(m.sig[:], &bs): return false
|
||||
}
|
||||
m.tstamp = wire_intFromUint(tstamp)
|
||||
return true
|
||||
var pType uint64
|
||||
var tstamp uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_SwitchHop:
|
||||
return false
|
||||
case !wire_chop_slice(m.root[:], &bs):
|
||||
return false
|
||||
case !wire_chop_uint64(&tstamp, &bs):
|
||||
return false
|
||||
case !wire_chop_uint64(&m.seq, &bs):
|
||||
return false
|
||||
case !wire_chop_uint64(&m.hop, &bs):
|
||||
return false
|
||||
case !wire_chop_uint64((*uint64)(&m.port), &bs):
|
||||
return false
|
||||
case !wire_chop_slice(m.next[:], &bs):
|
||||
return false
|
||||
case !wire_chop_slice(m.sig[:], &bs):
|
||||
return false
|
||||
}
|
||||
m.tstamp = wire_intFromUint(tstamp)
|
||||
return true
|
||||
}
|
||||
|
||||
// Format used to check signatures only, so no need to also support decoding
|
||||
func wire_encode_locator(loc *switchLocator) []byte {
|
||||
coords := wire_encode_coords(loc.getCoords())
|
||||
var bs []byte
|
||||
bs = append(bs, loc.root[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(loc.tstamp))...)
|
||||
bs = append(bs, coords...)
|
||||
return bs
|
||||
coords := wire_encode_coords(loc.getCoords())
|
||||
var bs []byte
|
||||
bs = append(bs, loc.root[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(loc.tstamp))...)
|
||||
bs = append(bs, coords...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func wire_chop_slice(toSlice []byte, fromSlice *[]byte) bool {
|
||||
if len(*fromSlice) < len(toSlice) { return false }
|
||||
copy(toSlice, *fromSlice)
|
||||
*fromSlice = (*fromSlice)[len(toSlice):]
|
||||
return true
|
||||
if len(*fromSlice) < len(toSlice) {
|
||||
return false
|
||||
}
|
||||
copy(toSlice, *fromSlice)
|
||||
*fromSlice = (*fromSlice)[len(toSlice):]
|
||||
return true
|
||||
}
|
||||
|
||||
func wire_chop_coords(toCoords *[]byte, fromSlice *[]byte) bool {
|
||||
coords, coordLen := wire_decode_coords(*fromSlice)
|
||||
if coordLen == 0 { return false }
|
||||
*toCoords = append((*toCoords)[:0], coords...)
|
||||
*fromSlice = (*fromSlice)[coordLen:]
|
||||
return true
|
||||
coords, coordLen := wire_decode_coords(*fromSlice)
|
||||
if coordLen == 0 {
|
||||
return false
|
||||
}
|
||||
*toCoords = append((*toCoords)[:0], coords...)
|
||||
*fromSlice = (*fromSlice)[coordLen:]
|
||||
return true
|
||||
}
|
||||
|
||||
func wire_chop_uint64(toUInt64 *uint64, fromSlice *[]byte) bool {
|
||||
dec, decLen := wire_decode_uint64(*fromSlice)
|
||||
if decLen == 0 { return false }
|
||||
*toUInt64 = dec
|
||||
*fromSlice = (*fromSlice)[decLen:]
|
||||
return true
|
||||
dec, decLen := wire_decode_uint64(*fromSlice)
|
||||
if decLen == 0 {
|
||||
return false
|
||||
}
|
||||
*toUInt64 = dec
|
||||
*fromSlice = (*fromSlice)[decLen:]
|
||||
return true
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
@ -254,239 +287,289 @@ func wire_chop_uint64(toUInt64 *uint64, fromSlice *[]byte) bool {
|
||||
// Wire traffic packets
|
||||
|
||||
type wire_trafficPacket struct {
|
||||
ttl uint64 // TODO? hide this as a wire format detail, not set by user
|
||||
coords []byte
|
||||
handle handle
|
||||
nonce boxNonce
|
||||
payload []byte
|
||||
ttl uint64 // TODO? hide this as a wire format detail, not set by user
|
||||
coords []byte
|
||||
handle handle
|
||||
nonce boxNonce
|
||||
payload []byte
|
||||
}
|
||||
|
||||
// This is basically MarshalBinary, but decode doesn't allow that...
|
||||
func (p *wire_trafficPacket) encode() []byte {
|
||||
bs := util_getBytes()
|
||||
bs = wire_put_uint64(wire_Traffic, bs)
|
||||
bs = wire_put_uint64(p.ttl, bs)
|
||||
bs = wire_put_coords(p.coords, bs)
|
||||
bs = append(bs, p.handle[:]...)
|
||||
bs = append(bs, p.nonce[:]...)
|
||||
bs = append(bs, p.payload...)
|
||||
return bs
|
||||
bs := util_getBytes()
|
||||
bs = wire_put_uint64(wire_Traffic, bs)
|
||||
bs = wire_put_uint64(p.ttl, bs)
|
||||
bs = wire_put_coords(p.coords, bs)
|
||||
bs = append(bs, p.handle[:]...)
|
||||
bs = append(bs, p.nonce[:]...)
|
||||
bs = append(bs, p.payload...)
|
||||
return bs
|
||||
}
|
||||
|
||||
// Not just UnmarshalBinary becuase the original slice isn't always copied from
|
||||
func (p *wire_trafficPacket) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_Traffic: return false
|
||||
case !wire_chop_uint64(&p.ttl, &bs): return false
|
||||
case !wire_chop_coords(&p.coords, &bs): return false
|
||||
case !wire_chop_slice(p.handle[:], &bs): return false
|
||||
case !wire_chop_slice(p.nonce[:], &bs): return false
|
||||
}
|
||||
p.payload = append(util_getBytes(), bs...)
|
||||
return true
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_Traffic:
|
||||
return false
|
||||
case !wire_chop_uint64(&p.ttl, &bs):
|
||||
return false
|
||||
case !wire_chop_coords(&p.coords, &bs):
|
||||
return false
|
||||
case !wire_chop_slice(p.handle[:], &bs):
|
||||
return false
|
||||
case !wire_chop_slice(p.nonce[:], &bs):
|
||||
return false
|
||||
}
|
||||
p.payload = append(util_getBytes(), bs...)
|
||||
return true
|
||||
}
|
||||
|
||||
type wire_protoTrafficPacket struct {
|
||||
ttl uint64 // TODO? hide this as a wire format detail, not set by user
|
||||
coords []byte
|
||||
toKey boxPubKey
|
||||
fromKey boxPubKey
|
||||
nonce boxNonce
|
||||
payload []byte
|
||||
ttl uint64 // TODO? hide this as a wire format detail, not set by user
|
||||
coords []byte
|
||||
toKey boxPubKey
|
||||
fromKey boxPubKey
|
||||
nonce boxNonce
|
||||
payload []byte
|
||||
}
|
||||
|
||||
func (p *wire_protoTrafficPacket) encode() []byte {
|
||||
coords := wire_encode_coords(p.coords)
|
||||
bs := wire_encode_uint64(wire_ProtocolTraffic)
|
||||
bs = append(bs, wire_encode_uint64(p.ttl)...)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, p.toKey[:]...)
|
||||
bs = append(bs, p.fromKey[:]...)
|
||||
bs = append(bs, p.nonce[:]...)
|
||||
bs = append(bs, p.payload...)
|
||||
return bs
|
||||
coords := wire_encode_coords(p.coords)
|
||||
bs := wire_encode_uint64(wire_ProtocolTraffic)
|
||||
bs = append(bs, wire_encode_uint64(p.ttl)...)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, p.toKey[:]...)
|
||||
bs = append(bs, p.fromKey[:]...)
|
||||
bs = append(bs, p.nonce[:]...)
|
||||
bs = append(bs, p.payload...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func(p *wire_protoTrafficPacket) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_ProtocolTraffic: return false
|
||||
case !wire_chop_uint64(&p.ttl, &bs): return false
|
||||
case !wire_chop_coords(&p.coords, &bs): return false
|
||||
case !wire_chop_slice(p.toKey[:], &bs): return false
|
||||
case !wire_chop_slice(p.fromKey[:], &bs): return false
|
||||
case !wire_chop_slice(p.nonce[:], &bs): return false
|
||||
}
|
||||
p.payload = bs
|
||||
return true
|
||||
func (p *wire_protoTrafficPacket) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_ProtocolTraffic:
|
||||
return false
|
||||
case !wire_chop_uint64(&p.ttl, &bs):
|
||||
return false
|
||||
case !wire_chop_coords(&p.coords, &bs):
|
||||
return false
|
||||
case !wire_chop_slice(p.toKey[:], &bs):
|
||||
return false
|
||||
case !wire_chop_slice(p.fromKey[:], &bs):
|
||||
return false
|
||||
case !wire_chop_slice(p.nonce[:], &bs):
|
||||
return false
|
||||
}
|
||||
p.payload = bs
|
||||
return true
|
||||
}
|
||||
|
||||
type wire_linkProtoTrafficPacket struct {
|
||||
toKey boxPubKey
|
||||
fromKey boxPubKey
|
||||
nonce boxNonce
|
||||
payload []byte
|
||||
toKey boxPubKey
|
||||
fromKey boxPubKey
|
||||
nonce boxNonce
|
||||
payload []byte
|
||||
}
|
||||
|
||||
func (p *wire_linkProtoTrafficPacket) encode() []byte {
|
||||
bs := wire_encode_uint64(wire_LinkProtocolTraffic)
|
||||
bs = append(bs, p.toKey[:]...)
|
||||
bs = append(bs, p.fromKey[:]...)
|
||||
bs = append(bs, p.nonce[:]...)
|
||||
bs = append(bs, p.payload...)
|
||||
return bs
|
||||
bs := wire_encode_uint64(wire_LinkProtocolTraffic)
|
||||
bs = append(bs, p.toKey[:]...)
|
||||
bs = append(bs, p.fromKey[:]...)
|
||||
bs = append(bs, p.nonce[:]...)
|
||||
bs = append(bs, p.payload...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func(p *wire_linkProtoTrafficPacket) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_LinkProtocolTraffic: return false
|
||||
case !wire_chop_slice(p.toKey[:], &bs): return false
|
||||
case !wire_chop_slice(p.fromKey[:], &bs): return false
|
||||
case !wire_chop_slice(p.nonce[:], &bs): return false
|
||||
}
|
||||
p.payload = bs
|
||||
return true
|
||||
func (p *wire_linkProtoTrafficPacket) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_LinkProtocolTraffic:
|
||||
return false
|
||||
case !wire_chop_slice(p.toKey[:], &bs):
|
||||
return false
|
||||
case !wire_chop_slice(p.fromKey[:], &bs):
|
||||
return false
|
||||
case !wire_chop_slice(p.nonce[:], &bs):
|
||||
return false
|
||||
}
|
||||
p.payload = bs
|
||||
return true
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (p *sessionPing) encode() []byte {
|
||||
var pTypeVal uint64
|
||||
if p.isPong {
|
||||
pTypeVal = wire_SessionPong
|
||||
} else {
|
||||
pTypeVal = wire_SessionPing
|
||||
}
|
||||
bs := wire_encode_uint64(pTypeVal)
|
||||
//p.sendPermPub used in top level (crypto), so skipped here
|
||||
bs = append(bs, p.handle[:]...)
|
||||
bs = append(bs, p.sendSesPub[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(p.tstamp))...)
|
||||
coords := wire_encode_coords(p.coords)
|
||||
bs = append(bs, coords...)
|
||||
return bs
|
||||
var pTypeVal uint64
|
||||
if p.isPong {
|
||||
pTypeVal = wire_SessionPong
|
||||
} else {
|
||||
pTypeVal = wire_SessionPing
|
||||
}
|
||||
bs := wire_encode_uint64(pTypeVal)
|
||||
//p.sendPermPub used in top level (crypto), so skipped here
|
||||
bs = append(bs, p.handle[:]...)
|
||||
bs = append(bs, p.sendSesPub[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(p.tstamp))...)
|
||||
coords := wire_encode_coords(p.coords)
|
||||
bs = append(bs, coords...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func (p *sessionPing) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
var tstamp uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_SessionPing && pType != wire_SessionPong: return false
|
||||
//p.sendPermPub used in top level (crypto), so skipped here
|
||||
case !wire_chop_slice(p.handle[:], &bs): return false
|
||||
case !wire_chop_slice(p.sendSesPub[:], &bs): return false
|
||||
case !wire_chop_uint64(&tstamp, &bs): return false
|
||||
case !wire_chop_coords(&p.coords, &bs): return false
|
||||
}
|
||||
p.tstamp = wire_intFromUint(tstamp)
|
||||
if pType == wire_SessionPong { p.isPong = true }
|
||||
return true
|
||||
var pType uint64
|
||||
var tstamp uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_SessionPing && pType != wire_SessionPong:
|
||||
return false
|
||||
//p.sendPermPub used in top level (crypto), so skipped here
|
||||
case !wire_chop_slice(p.handle[:], &bs):
|
||||
return false
|
||||
case !wire_chop_slice(p.sendSesPub[:], &bs):
|
||||
return false
|
||||
case !wire_chop_uint64(&tstamp, &bs):
|
||||
return false
|
||||
case !wire_chop_coords(&p.coords, &bs):
|
||||
return false
|
||||
}
|
||||
p.tstamp = wire_intFromUint(tstamp)
|
||||
if pType == wire_SessionPong {
|
||||
p.isPong = true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (r *dhtReq) encode() []byte {
|
||||
coords := wire_encode_coords(r.coords)
|
||||
bs := wire_encode_uint64(wire_DHTLookupRequest)
|
||||
bs = append(bs, r.key[:]...)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, r.dest[:]...)
|
||||
return bs
|
||||
coords := wire_encode_coords(r.coords)
|
||||
bs := wire_encode_uint64(wire_DHTLookupRequest)
|
||||
bs = append(bs, r.key[:]...)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, r.dest[:]...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func (r *dhtReq) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_DHTLookupRequest: return false
|
||||
case !wire_chop_slice(r.key[:], &bs): return false
|
||||
case !wire_chop_coords(&r.coords, &bs): return false
|
||||
case !wire_chop_slice(r.dest[:], &bs): return false
|
||||
default: return true
|
||||
}
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_DHTLookupRequest:
|
||||
return false
|
||||
case !wire_chop_slice(r.key[:], &bs):
|
||||
return false
|
||||
case !wire_chop_coords(&r.coords, &bs):
|
||||
return false
|
||||
case !wire_chop_slice(r.dest[:], &bs):
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (r *dhtRes) encode() []byte {
|
||||
coords := wire_encode_coords(r.coords)
|
||||
bs := wire_encode_uint64(wire_DHTLookupResponse)
|
||||
bs = append(bs, r.key[:]...)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, r.dest[:]...)
|
||||
for _, info := range r.infos {
|
||||
coords = wire_encode_coords(info.coords)
|
||||
bs = append(bs, info.key[:]...)
|
||||
bs = append(bs, coords...)
|
||||
}
|
||||
return bs
|
||||
coords := wire_encode_coords(r.coords)
|
||||
bs := wire_encode_uint64(wire_DHTLookupResponse)
|
||||
bs = append(bs, r.key[:]...)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, r.dest[:]...)
|
||||
for _, info := range r.infos {
|
||||
coords = wire_encode_coords(info.coords)
|
||||
bs = append(bs, info.key[:]...)
|
||||
bs = append(bs, coords...)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (r *dhtRes) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_DHTLookupResponse: return false
|
||||
case !wire_chop_slice(r.key[:], &bs): return false
|
||||
case !wire_chop_coords(&r.coords, &bs): return false
|
||||
case !wire_chop_slice(r.dest[:], &bs): return false
|
||||
}
|
||||
for len(bs) > 0 {
|
||||
info := dhtInfo{}
|
||||
switch {
|
||||
case !wire_chop_slice(info.key[:], &bs): return false
|
||||
case !wire_chop_coords(&info.coords, &bs): return false
|
||||
}
|
||||
r.infos = append(r.infos, &info)
|
||||
}
|
||||
return true
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_DHTLookupResponse:
|
||||
return false
|
||||
case !wire_chop_slice(r.key[:], &bs):
|
||||
return false
|
||||
case !wire_chop_coords(&r.coords, &bs):
|
||||
return false
|
||||
case !wire_chop_slice(r.dest[:], &bs):
|
||||
return false
|
||||
}
|
||||
for len(bs) > 0 {
|
||||
info := dhtInfo{}
|
||||
switch {
|
||||
case !wire_chop_slice(info.key[:], &bs):
|
||||
return false
|
||||
case !wire_chop_coords(&info.coords, &bs):
|
||||
return false
|
||||
}
|
||||
r.infos = append(r.infos, &info)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (r *searchReq) encode() []byte {
|
||||
coords := wire_encode_coords(r.coords)
|
||||
bs := wire_encode_uint64(wire_SearchRequest)
|
||||
bs = append(bs, r.key[:]...)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, r.dest[:]...)
|
||||
return bs
|
||||
coords := wire_encode_coords(r.coords)
|
||||
bs := wire_encode_uint64(wire_SearchRequest)
|
||||
bs = append(bs, r.key[:]...)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, r.dest[:]...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func (r *searchReq) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_SearchRequest: return false
|
||||
case !wire_chop_slice(r.key[:], &bs): return false
|
||||
case !wire_chop_coords(&r.coords, &bs): return false
|
||||
case !wire_chop_slice(r.dest[:], &bs): return false
|
||||
default: return true
|
||||
}
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_SearchRequest:
|
||||
return false
|
||||
case !wire_chop_slice(r.key[:], &bs):
|
||||
return false
|
||||
case !wire_chop_coords(&r.coords, &bs):
|
||||
return false
|
||||
case !wire_chop_slice(r.dest[:], &bs):
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (r *searchRes) encode() []byte {
|
||||
coords := wire_encode_coords(r.coords)
|
||||
bs := wire_encode_uint64(wire_SearchResponse)
|
||||
bs = append(bs, r.key[:]...)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, r.dest[:]...)
|
||||
return bs
|
||||
coords := wire_encode_coords(r.coords)
|
||||
bs := wire_encode_uint64(wire_SearchResponse)
|
||||
bs = append(bs, r.key[:]...)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, r.dest[:]...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func (r *searchRes) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_SearchResponse: return false
|
||||
case !wire_chop_slice(r.key[:], &bs): return false
|
||||
case !wire_chop_coords(&r.coords, &bs): return false
|
||||
case !wire_chop_slice(r.dest[:], &bs): return false
|
||||
default: return true
|
||||
}
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs):
|
||||
return false
|
||||
case pType != wire_SearchResponse:
|
||||
return false
|
||||
case !wire_chop_slice(r.key[:], &bs):
|
||||
return false
|
||||
case !wire_chop_coords(&r.coords, &bs):
|
||||
return false
|
||||
case !wire_chop_slice(r.dest[:], &bs):
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
|
359
yggdrasil.go
359
yggdrasil.go
@ -25,131 +25,172 @@ import . "yggdrasil"
|
||||
* It can generate a new config (--genconf)
|
||||
* It can read a config from stdin (--useconf)
|
||||
* It can run with an automatic config (--autoconf)
|
||||
*/
|
||||
*/
|
||||
|
||||
type nodeConfig struct {
|
||||
Listen string
|
||||
Peers []string
|
||||
BoxPub string
|
||||
BoxPriv string
|
||||
SigPub string
|
||||
SigPriv string
|
||||
Multicast bool
|
||||
Listen string
|
||||
Peers []string
|
||||
BoxPub string
|
||||
BoxPriv string
|
||||
SigPub string
|
||||
SigPriv string
|
||||
Multicast bool
|
||||
IfName string
|
||||
}
|
||||
|
||||
type node struct {
|
||||
core Core
|
||||
sock *ipv6.PacketConn
|
||||
core Core
|
||||
sock *ipv6.PacketConn
|
||||
}
|
||||
|
||||
func (n *node) init(cfg *nodeConfig, logger *log.Logger) {
|
||||
boxPub, err := hex.DecodeString(cfg.BoxPub)
|
||||
if err != nil { panic(err) }
|
||||
boxPriv, err := hex.DecodeString(cfg.BoxPriv)
|
||||
if err != nil { panic(err) }
|
||||
sigPub, err := hex.DecodeString(cfg.SigPub)
|
||||
if err != nil { panic(err) }
|
||||
sigPriv, err := hex.DecodeString(cfg.SigPriv)
|
||||
if err != nil { panic(err) }
|
||||
n.core.DEBUG_init(boxPub, boxPriv, sigPub, sigPriv)
|
||||
n.core.DEBUG_setLogger(logger)
|
||||
logger.Println("Starting interface...")
|
||||
n.core.DEBUG_setupAndStartGlobalUDPInterface(cfg.Listen)
|
||||
logger.Println("Started interface")
|
||||
go func () {
|
||||
if len(cfg.Peers) == 0 { return }
|
||||
for {
|
||||
for _, p := range cfg.Peers {
|
||||
n.core.DEBUG_maybeSendUDPKeys(p)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
time.Sleep(time.Minute)
|
||||
}
|
||||
}()
|
||||
boxPub, err := hex.DecodeString(cfg.BoxPub)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
boxPriv, err := hex.DecodeString(cfg.BoxPriv)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sigPub, err := hex.DecodeString(cfg.SigPub)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sigPriv, err := hex.DecodeString(cfg.SigPriv)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
n.core.DEBUG_init(boxPub, boxPriv, sigPub, sigPriv)
|
||||
n.core.DEBUG_setLogger(logger)
|
||||
logger.Println("Starting interface...")
|
||||
n.core.DEBUG_setupAndStartGlobalUDPInterface(cfg.Listen)
|
||||
logger.Println("Started interface")
|
||||
go func() {
|
||||
if len(cfg.Peers) == 0 {
|
||||
return
|
||||
}
|
||||
for {
|
||||
for _, p := range cfg.Peers {
|
||||
n.core.DEBUG_maybeSendUDPKeys(p)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
time.Sleep(time.Minute)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func generateConfig() *nodeConfig {
|
||||
core := Core{}
|
||||
bpub, bpriv := core.DEBUG_newBoxKeys()
|
||||
spub, spriv := core.DEBUG_newSigKeys()
|
||||
cfg := nodeConfig{}
|
||||
cfg.Listen = "[::]:0"
|
||||
cfg.BoxPub = hex.EncodeToString(bpub[:])
|
||||
cfg.BoxPriv = hex.EncodeToString(bpriv[:])
|
||||
cfg.SigPub = hex.EncodeToString(spub[:])
|
||||
cfg.SigPriv = hex.EncodeToString(spriv[:])
|
||||
cfg.Peers = []string{}
|
||||
cfg.Multicast = true
|
||||
return &cfg
|
||||
core := Core{}
|
||||
bpub, bpriv := core.DEBUG_newBoxKeys()
|
||||
spub, spriv := core.DEBUG_newSigKeys()
|
||||
cfg := nodeConfig{}
|
||||
cfg.Listen = "[::]:0"
|
||||
cfg.BoxPub = hex.EncodeToString(bpub[:])
|
||||
cfg.BoxPriv = hex.EncodeToString(bpriv[:])
|
||||
cfg.SigPub = hex.EncodeToString(spub[:])
|
||||
cfg.SigPriv = hex.EncodeToString(spriv[:])
|
||||
cfg.Peers = []string{}
|
||||
cfg.Multicast = true
|
||||
cfg.IfName = "auto"
|
||||
return &cfg
|
||||
}
|
||||
|
||||
func doGenconf() string {
|
||||
cfg := generateConfig()
|
||||
bs, err := json.MarshalIndent(cfg, "", " ")
|
||||
if err != nil { panic(err) }
|
||||
return string(bs)
|
||||
cfg := generateConfig()
|
||||
bs, err := json.MarshalIndent(cfg, "", " ")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
var multicastAddr = "[ff02::114]:9001"
|
||||
|
||||
func (n *node) listen() {
|
||||
groupAddr, err := net.ResolveUDPAddr("udp6", multicastAddr)
|
||||
if err != nil { panic(err) }
|
||||
bs := make([]byte, 2048)
|
||||
for {
|
||||
nBytes, rcm, fromAddr, err := n.sock.ReadFrom(bs)
|
||||
if err != nil { panic(err) }
|
||||
//if rcm == nil { continue } // wat
|
||||
//fmt.Println("DEBUG:", "packet from:", fromAddr.String())
|
||||
if !rcm.Dst.IsLinkLocalMulticast() { continue }
|
||||
if !rcm.Dst.Equal(groupAddr.IP) { continue }
|
||||
anAddr := string(bs[:nBytes])
|
||||
addr, err := net.ResolveUDPAddr("udp6", anAddr)
|
||||
if err != nil { panic(err) ; continue } // Panic for testing, remove later
|
||||
from := fromAddr.(*net.UDPAddr)
|
||||
//fmt.Println("DEBUG:", "heard:", addr.IP.String(), "from:", from.IP.String())
|
||||
if addr.IP.String() != from.IP.String() { continue }
|
||||
addr.Zone = from.Zone
|
||||
saddr := addr.String()
|
||||
//if _, isIn := n.peers[saddr]; isIn { continue }
|
||||
//n.peers[saddr] = struct{}{}
|
||||
n.core.DEBUG_maybeSendUDPKeys(saddr)
|
||||
//fmt.Println("DEBUG:", "added multicast peer:", saddr)
|
||||
}
|
||||
groupAddr, err := net.ResolveUDPAddr("udp6", multicastAddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bs := make([]byte, 2048)
|
||||
for {
|
||||
nBytes, rcm, fromAddr, err := n.sock.ReadFrom(bs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
//if rcm == nil { continue } // wat
|
||||
//fmt.Println("DEBUG:", "packet from:", fromAddr.String())
|
||||
if !rcm.Dst.IsLinkLocalMulticast() {
|
||||
continue
|
||||
}
|
||||
if !rcm.Dst.Equal(groupAddr.IP) {
|
||||
continue
|
||||
}
|
||||
anAddr := string(bs[:nBytes])
|
||||
addr, err := net.ResolveUDPAddr("udp6", anAddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
continue
|
||||
} // Panic for testing, remove later
|
||||
from := fromAddr.(*net.UDPAddr)
|
||||
//fmt.Println("DEBUG:", "heard:", addr.IP.String(), "from:", from.IP.String())
|
||||
if addr.IP.String() != from.IP.String() {
|
||||
continue
|
||||
}
|
||||
addr.Zone = from.Zone
|
||||
saddr := addr.String()
|
||||
//if _, isIn := n.peers[saddr]; isIn { continue }
|
||||
//n.peers[saddr] = struct{}{}
|
||||
n.core.DEBUG_maybeSendUDPKeys(saddr)
|
||||
//fmt.Println("DEBUG:", "added multicast peer:", saddr)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node) announce() {
|
||||
groupAddr, err := net.ResolveUDPAddr("udp6", multicastAddr)
|
||||
if err != nil { panic(err) }
|
||||
udpaddr := n.core.DEBUG_getGlobalUDPAddr()
|
||||
anAddr, err := net.ResolveUDPAddr("udp6", udpaddr.String())
|
||||
if err != nil { panic(err) }
|
||||
destAddr, err := net.ResolveUDPAddr("udp6", multicastAddr)
|
||||
if err != nil { panic(err) }
|
||||
for {
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil { panic(err) }
|
||||
for _, iface := range ifaces {
|
||||
n.sock.JoinGroup(&iface, groupAddr)
|
||||
//err := n.sock.JoinGroup(&iface, groupAddr)
|
||||
//if err != nil { panic(err) }
|
||||
addrs, err := iface.Addrs()
|
||||
if err != nil { panic(err) }
|
||||
for _, addr := range addrs {
|
||||
addrIP, _, _ := net.ParseCIDR(addr.String())
|
||||
if addrIP.To4() != nil { continue } // IPv6 only
|
||||
if !addrIP.IsLinkLocalUnicast() { continue }
|
||||
anAddr.IP = addrIP
|
||||
anAddr.Zone = iface.Name
|
||||
destAddr.Zone = iface.Name
|
||||
msg := []byte(anAddr.String())
|
||||
n.sock.WriteTo(msg, nil, destAddr)
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
groupAddr, err := net.ResolveUDPAddr("udp6", multicastAddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
udpaddr := n.core.DEBUG_getGlobalUDPAddr()
|
||||
anAddr, err := net.ResolveUDPAddr("udp6", udpaddr.String())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
destAddr, err := net.ResolveUDPAddr("udp6", multicastAddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for {
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for _, iface := range ifaces {
|
||||
n.sock.JoinGroup(&iface, groupAddr)
|
||||
//err := n.sock.JoinGroup(&iface, groupAddr)
|
||||
//if err != nil { panic(err) }
|
||||
addrs, err := iface.Addrs()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
addrIP, _, _ := net.ParseCIDR(addr.String())
|
||||
if addrIP.To4() != nil {
|
||||
continue
|
||||
} // IPv6 only
|
||||
if !addrIP.IsLinkLocalUnicast() {
|
||||
continue
|
||||
}
|
||||
anAddr.IP = addrIP
|
||||
anAddr.Zone = iface.Name
|
||||
destAddr.Zone = iface.Name
|
||||
msg := []byte(anAddr.String())
|
||||
n.sock.WriteTo(msg, nil, destAddr)
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
var pprof = flag.Bool("pprof", false, "Run pprof, see http://localhost:6060/debug/pprof/")
|
||||
@ -158,53 +199,67 @@ var useconf = flag.Bool("useconf", false, "read config from stdin")
|
||||
var autoconf = flag.Bool("autoconf", false, "automatic mode (dynamic IP, peer with IPv6 neighbors)")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
var cfg *nodeConfig
|
||||
switch {
|
||||
case *autoconf: cfg = generateConfig()
|
||||
case *useconf:
|
||||
config, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil { panic(err) }
|
||||
decoder := json.NewDecoder(bytes.NewReader(config))
|
||||
err = decoder.Decode(&cfg)
|
||||
if err != nil { panic(err) }
|
||||
case *genconf: fmt.Println(doGenconf())
|
||||
default: flag.PrintDefaults()
|
||||
}
|
||||
if cfg == nil { return }
|
||||
logger := log.New(os.Stdout, "", log.Flags())
|
||||
if *pprof {
|
||||
runtime.SetBlockProfileRate(1)
|
||||
go func() { log.Println(http.ListenAndServe("localhost:6060", nil)) }()
|
||||
}
|
||||
// Setup
|
||||
logger.Println("Initializing...")
|
||||
n := node{}
|
||||
n.init(cfg, logger)
|
||||
logger.Println("Starting tun...")
|
||||
n.core.DEBUG_startTun() // 1280, the smallest supported MTU
|
||||
//n.core.DEBUG_startTunWithMTU(65535) // Largest supported MTU
|
||||
defer func() {
|
||||
logger.Println("Closing...")
|
||||
n.core.DEBUG_stopTun()
|
||||
}()
|
||||
logger.Println("Started...")
|
||||
if cfg.Multicast {
|
||||
addr, err := net.ResolveUDPAddr("udp", multicastAddr)
|
||||
if err != nil { panic(err) }
|
||||
listenString := fmt.Sprintf("[::]:%v", addr.Port)
|
||||
conn, err := net.ListenPacket("udp6", listenString)
|
||||
if err != nil { panic(err) }
|
||||
//defer conn.Close() // Let it close on its own when the application exits
|
||||
n.sock = ipv6.NewPacketConn(conn)
|
||||
if err = n.sock.SetControlMessage(ipv6.FlagDst, true) ; err != nil { panic(err) }
|
||||
go n.listen()
|
||||
go n.announce()
|
||||
}
|
||||
// Catch interrupt to exit gracefully
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt)
|
||||
<-c
|
||||
logger.Println("Stopping...")
|
||||
flag.Parse()
|
||||
var cfg *nodeConfig
|
||||
switch {
|
||||
case *autoconf:
|
||||
cfg = generateConfig()
|
||||
case *useconf:
|
||||
config, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
decoder := json.NewDecoder(bytes.NewReader(config))
|
||||
err = decoder.Decode(&cfg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
case *genconf:
|
||||
fmt.Println(doGenconf())
|
||||
default:
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
if cfg == nil {
|
||||
return
|
||||
}
|
||||
logger := log.New(os.Stdout, "", log.Flags())
|
||||
if *pprof {
|
||||
runtime.SetBlockProfileRate(1)
|
||||
go func() { log.Println(http.ListenAndServe("localhost:6060", nil)) }()
|
||||
}
|
||||
// Setup
|
||||
logger.Println("Initializing...")
|
||||
n := node{}
|
||||
n.init(cfg, logger)
|
||||
logger.Println("Starting tun...")
|
||||
n.core.DEBUG_startTun(cfg.IfName) // 1280, the smallest supported MTU
|
||||
//n.core.DEBUG_startTunWithMTU(cfg.IfName, 65535) // Largest supported MTU
|
||||
defer func() {
|
||||
logger.Println("Closing...")
|
||||
n.core.DEBUG_stopTun()
|
||||
}()
|
||||
logger.Println("Started...")
|
||||
if cfg.Multicast {
|
||||
addr, err := net.ResolveUDPAddr("udp", multicastAddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
listenString := fmt.Sprintf("[::]:%v", addr.Port)
|
||||
conn, err := net.ListenPacket("udp6", listenString)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
//defer conn.Close() // Let it close on its own when the application exits
|
||||
n.sock = ipv6.NewPacketConn(conn)
|
||||
if err = n.sock.SetControlMessage(ipv6.FlagDst, true); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go n.listen()
|
||||
go n.announce()
|
||||
}
|
||||
// Catch interrupt to exit gracefully
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt)
|
||||
<-c
|
||||
logger.Println("Stopping...")
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user