mirror of
https://github.com/cwinfo/yggdrasil-go.git
synced 2024-11-25 20:41:36 +00:00
cleanup of misc files
This commit is contained in:
parent
c1fe7d271e
commit
c74ec0e32f
@ -1,23 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
ip netns add peerns
|
|
||||||
ip link add veth0 type veth peer name veth1
|
|
||||||
ifconfig veth0 192.168.2.1/24 up
|
|
||||||
echo "1"
|
|
||||||
#tc qdisc add dev veth0 root tbf rate 8mbit burst 8192 latency 1ms
|
|
||||||
#tc qdisc add dev veth0 root netem delay 50ms 5ms distribution normal
|
|
||||||
echo "2"
|
|
||||||
ip link set veth1 netns peerns
|
|
||||||
ip netns exec peerns ifconfig veth1 192.168.2.2/24 up
|
|
||||||
echo "3"
|
|
||||||
#ip netns exec peerns tc qdisc add dev veth1 root tbf rate 8mbit burst 8192 latency 1ms
|
|
||||||
#ip netns exec peerns tc qdisc add dev veth1 root netem delay 50ms 5ms distribution normal
|
|
||||||
echo "4"
|
|
||||||
ip netns exec peerns ip addr list
|
|
||||||
#ip netns exec peerns ./run -useconf=conf2.json
|
|
||||||
ip netns exec peerns ip link set dev lo up
|
|
||||||
ip netns exec peerns ./run -autoconf -pprof
|
|
||||||
#GODEBUG=gctrace=1 ip netns exec peerns ./run -autoconf
|
|
||||||
#ip netns exec peerns ./run -useconf=conf2.json -cpuprofile=cpu2.prof -memprofile=mem2.prof
|
|
||||||
#ip netns delete peerns
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
ip netns add peerns3
|
|
||||||
ip link add veth23 type veth peer name veth32
|
|
||||||
ip link set veth23 netns peerns
|
|
||||||
ip netns exec peerns ifconfig veth23 192.168.3.1/24 up
|
|
||||||
#ip netns exec peerns tc qdisc add dev veth23 root tbf rate 8mbit burst 8192 latency 1ms
|
|
||||||
#ip netns exec peerns tc qdisc add dev veth23 root netem delay 50ms 5ms distribution normal
|
|
||||||
ip link set veth32 netns peerns3
|
|
||||||
ip netns exec peerns3 ifconfig veth32 192.168.3.2/24 up
|
|
||||||
#ip netns exec peerns3 tc qdisc add dev veth32 root tbf rate 8mbit burst 8192 latency 1ms
|
|
||||||
#ip netns exec peerns3 tc qdisc add dev veth32 root netem delay 50ms 5ms distribution normal
|
|
||||||
ip netns exec peerns3 ip route add 192.168.2.0/24 via 192.168.3.1
|
|
||||||
|
|
||||||
#ip link add veth13 type veth peer name veth31
|
|
||||||
#ifconfig veth13 192.168.4.1/24 up
|
|
||||||
#ip netns exec peerns tc qdisc add dev veth23 root tbf rate 8mbit burst 8192 latency 1ms
|
|
||||||
#ip netns exec peerns tc qdisc add dev veth23 root netem delay 50ms 5ms distribution normal
|
|
||||||
#ip link set veth31 netns peerns3
|
|
||||||
#ip netns exec peerns3 ifconfig veth32 192.168.4.3/24 up
|
|
||||||
#ip netns exec peerns3 tc qdisc add dev veth32 root tbf rate 8mbit burst 8192 latency 1ms
|
|
||||||
#ip netns exec peerns3 tc qdisc add dev veth32 root netem delay 50ms 5ms distribution normal
|
|
||||||
#ip netns exec peerns3 ip route add 192.168.2.0/24 via 192.168.3.1
|
|
||||||
|
|
||||||
ip netns exec peerns3 ip addr list
|
|
||||||
#ip netns exec peerns3 ./run -useconf=conf3.json
|
|
||||||
ip netns exec peerns3 ifconfig lo up
|
|
||||||
ip netns exec peerns3 ./run -autoconf
|
|
||||||
#ip netns delete peerns3
|
|
@ -1,28 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
ip netns add peerns4
|
|
||||||
ip link add veth34 type veth peer name veth43
|
|
||||||
ip link set veth34 netns peerns3
|
|
||||||
ip netns exec peerns3 ifconfig veth34 192.168.4.3/24 up
|
|
||||||
#ip netns exec peerns tc qdisc add dev veth23 root tbf rate 8mbit burst 8192 latency 1ms
|
|
||||||
#ip netns exec peerns tc qdisc add dev veth23 root netem delay 50ms 5ms distribution normal
|
|
||||||
ip link set veth43 netns peerns4
|
|
||||||
ip netns exec peerns4 ifconfig veth43 192.168.4.4/24 up
|
|
||||||
#ip netns exec peerns3 tc qdisc add dev veth32 root tbf rate 8mbit burst 8192 latency 1ms
|
|
||||||
#ip netns exec peerns3 tc qdisc add dev veth32 root netem delay 50ms 5ms distribution normal
|
|
||||||
#ip netns exec peerns4 ip route add 192.168.3.0/24 via 192.168.4.3
|
|
||||||
|
|
||||||
#ip link add veth13 type veth peer name veth31
|
|
||||||
#ifconfig veth13 192.168.4.1/24 up
|
|
||||||
#ip netns exec peerns tc qdisc add dev veth23 root tbf rate 8mbit burst 8192 latency 1ms
|
|
||||||
#ip netns exec peerns tc qdisc add dev veth23 root netem delay 50ms 5ms distribution normal
|
|
||||||
#ip link set veth31 netns peerns3
|
|
||||||
#ip netns exec peerns3 ifconfig veth32 192.168.4.3/24 up
|
|
||||||
#ip netns exec peerns3 tc qdisc add dev veth32 root tbf rate 8mbit burst 8192 latency 1ms
|
|
||||||
#ip netns exec peerns3 tc qdisc add dev veth32 root netem delay 50ms 5ms distribution normal
|
|
||||||
#ip netns exec peerns3 ip route add 192.168.2.0/24 via 192.168.3.1
|
|
||||||
|
|
||||||
ip netns exec peerns4 ip addr list
|
|
||||||
#ip netns exec peerns3 ./run -useconf=conf3.json
|
|
||||||
ip netns exec peerns4 ./run -autoconf
|
|
||||||
#ip netns delete peerns3
|
|
@ -1,60 +0,0 @@
|
|||||||
import glob
|
|
||||||
inputDirPath = "out-skitter"
|
|
||||||
|
|
||||||
inputFilePaths = glob.glob(inputDirPath+"/*")
|
|
||||||
inputFilePaths.sort()
|
|
||||||
|
|
||||||
merged = dict()
|
|
||||||
|
|
||||||
stretches = []
|
|
||||||
|
|
||||||
total = 0
|
|
||||||
for inputFilePath in inputFilePaths:
|
|
||||||
print "Processing file {}".format(inputFilePath)
|
|
||||||
with open(inputFilePath, 'r') as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
pathsChecked = 0.
|
|
||||||
avgStretch = 0.
|
|
||||||
for line in inData:
|
|
||||||
dat = line.rstrip('\n').split(' ')
|
|
||||||
eHops = int(dat[0])
|
|
||||||
nHops = int(dat[1])
|
|
||||||
count = int(dat[2])
|
|
||||||
if eHops not in merged: merged[eHops] = dict()
|
|
||||||
if nHops not in merged[eHops]: merged[eHops][nHops] = 0
|
|
||||||
merged[eHops][nHops] += count
|
|
||||||
total += count
|
|
||||||
pathsChecked += count
|
|
||||||
stretch = float(nHops)/eHops
|
|
||||||
avgStretch += stretch*count
|
|
||||||
finStretch = avgStretch / max(1, pathsChecked)
|
|
||||||
stretches.append(str(finStretch))
|
|
||||||
|
|
||||||
hopsUsed = 0.
|
|
||||||
hopsNeeded = 0.
|
|
||||||
avgStretch = 0.
|
|
||||||
results = []
|
|
||||||
for eHops in sorted(merged.keys()):
|
|
||||||
for nHops in sorted(merged[eHops].keys()):
|
|
||||||
count = merged[eHops][nHops]
|
|
||||||
result = "{} {} {}".format(eHops, nHops, count)
|
|
||||||
results.append(result)
|
|
||||||
hopsUsed += nHops*count
|
|
||||||
hopsNeeded += eHops*count
|
|
||||||
stretch = float(nHops)/eHops
|
|
||||||
avgStretch += stretch*count
|
|
||||||
print result
|
|
||||||
bandwidthUsage = hopsUsed/max(1, hopsNeeded)
|
|
||||||
avgStretch /= max(1, total)
|
|
||||||
|
|
||||||
with open("results.txt", "w") as f:
|
|
||||||
f.write('\n'.join(results))
|
|
||||||
|
|
||||||
with open("stretches.txt", "w") as f:
|
|
||||||
f.write('\n'.join(stretches))
|
|
||||||
|
|
||||||
print "Total files processed: {}".format(len(inputFilePaths))
|
|
||||||
print "Total paths found: {}".format(total)
|
|
||||||
print "Bandwidth usage: {}".format(bandwidthUsage)
|
|
||||||
print "Average stretch: {}".format(avgStretch)
|
|
||||||
|
|
@ -1,61 +0,0 @@
|
|||||||
import glob
|
|
||||||
import sys
|
|
||||||
inputDirPath = sys.argv[1]
|
|
||||||
|
|
||||||
inputFilePaths = glob.glob(inputDirPath+"/*")
|
|
||||||
inputFilePaths.sort()
|
|
||||||
|
|
||||||
merged = dict()
|
|
||||||
|
|
||||||
stretches = []
|
|
||||||
|
|
||||||
total = 0
|
|
||||||
for inputFilePath in inputFilePaths:
|
|
||||||
print "Processing file {}".format(inputFilePath)
|
|
||||||
with open(inputFilePath, 'r') as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
pathsChecked = 0.
|
|
||||||
avgStretch = 0.
|
|
||||||
for line in inData:
|
|
||||||
dat = line.rstrip('\n').split(' ')
|
|
||||||
eHops = int(dat[0])
|
|
||||||
nHops = int(dat[1])
|
|
||||||
count = int(dat[2])
|
|
||||||
if eHops not in merged: merged[eHops] = dict()
|
|
||||||
if nHops not in merged[eHops]: merged[eHops][nHops] = 0
|
|
||||||
merged[eHops][nHops] += count
|
|
||||||
total += count
|
|
||||||
pathsChecked += count
|
|
||||||
stretch = float(nHops)/eHops
|
|
||||||
avgStretch += stretch*count
|
|
||||||
finStretch = avgStretch / max(1, pathsChecked)
|
|
||||||
stretches.append(str(finStretch))
|
|
||||||
|
|
||||||
hopsUsed = 0.
|
|
||||||
hopsNeeded = 0.
|
|
||||||
avgStretch = 0.
|
|
||||||
results = []
|
|
||||||
for eHops in sorted(merged.keys()):
|
|
||||||
for nHops in sorted(merged[eHops].keys()):
|
|
||||||
count = merged[eHops][nHops]
|
|
||||||
result = "{} {} {}".format(eHops, nHops, count)
|
|
||||||
results.append(result)
|
|
||||||
hopsUsed += nHops*count
|
|
||||||
hopsNeeded += eHops*count
|
|
||||||
stretch = float(nHops)/eHops
|
|
||||||
avgStretch += stretch*count
|
|
||||||
print result
|
|
||||||
bandwidthUsage = hopsUsed/max(1, hopsNeeded)
|
|
||||||
avgStretch /= max(1, total)
|
|
||||||
|
|
||||||
with open("results.txt", "w") as f:
|
|
||||||
f.write('\n'.join(results))
|
|
||||||
|
|
||||||
with open("stretches.txt", "w") as f:
|
|
||||||
f.write('\n'.join(stretches))
|
|
||||||
|
|
||||||
print "Total files processed: {}".format(len(inputFilePaths))
|
|
||||||
print "Total paths found: {}".format(total)
|
|
||||||
print "Bandwidth usage: {}".format(bandwidthUsage)
|
|
||||||
print "Average stretch: {}".format(avgStretch)
|
|
||||||
|
|
@ -1,197 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
import "bufio"
|
|
||||||
import "os"
|
|
||||||
import "strings"
|
|
||||||
import "strconv"
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
import "runtime/pprof"
|
|
||||||
import "flag"
|
|
||||||
|
|
||||||
import "router"
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
type Node struct {
|
|
||||||
nodeID router.NodeID
|
|
||||||
table router.Table
|
|
||||||
links []*Node
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Node) init(nodeID router.NodeID) {
|
|
||||||
n.nodeID = nodeID
|
|
||||||
n.table.Init(nodeID)
|
|
||||||
n.links = append(n.links, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func linkNodes(m, n *Node) {
|
|
||||||
for _, o := range m.links {
|
|
||||||
if o.nodeID == n.nodeID {
|
|
||||||
// Don't allow duplicates
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.links = append(m.links, n)
|
|
||||||
n.links = append(n.links, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeStoreSquareGrid(sideLength int) map[router.NodeID]*Node {
|
|
||||||
store := make(map[router.NodeID]*Node)
|
|
||||||
nNodes := sideLength * sideLength
|
|
||||||
nodeIDs := make([]router.NodeID, 0, nNodes)
|
|
||||||
// TODO shuffle nodeIDs
|
|
||||||
for nodeID := 1; nodeID <= nNodes; nodeID++ {
|
|
||||||
nodeIDs = append(nodeIDs, router.NodeID(nodeID))
|
|
||||||
}
|
|
||||||
for _, nodeID := range nodeIDs {
|
|
||||||
node := &Node{}
|
|
||||||
node.init(nodeID)
|
|
||||||
store[nodeID] = node
|
|
||||||
}
|
|
||||||
for idx := 0; idx < nNodes; idx++ {
|
|
||||||
if (idx % sideLength) != 0 {
|
|
||||||
linkNodes(store[nodeIDs[idx]], store[nodeIDs[idx-1]])
|
|
||||||
}
|
|
||||||
if idx >= sideLength {
|
|
||||||
linkNodes(store[nodeIDs[idx]], store[nodeIDs[idx-sideLength]])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return store
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadGraph(path string) map[router.NodeID]*Node {
|
|
||||||
f, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
store := make(map[router.NodeID]*Node)
|
|
||||||
s := bufio.NewScanner(f)
|
|
||||||
for s.Scan() {
|
|
||||||
line := s.Text()
|
|
||||||
nodeIDstrs := strings.Split(line, " ")
|
|
||||||
nodeIDi0, _ := strconv.Atoi(nodeIDstrs[0])
|
|
||||||
nodeIDi1, _ := strconv.Atoi(nodeIDstrs[1])
|
|
||||||
nodeID0 := router.NodeID(nodeIDi0)
|
|
||||||
nodeID1 := router.NodeID(nodeIDi1)
|
|
||||||
if store[nodeID0] == nil {
|
|
||||||
node := &Node{}
|
|
||||||
node.init(nodeID0)
|
|
||||||
store[nodeID0] = node
|
|
||||||
}
|
|
||||||
if store[nodeID1] == nil {
|
|
||||||
node := &Node{}
|
|
||||||
node.init(nodeID1)
|
|
||||||
store[nodeID1] = node
|
|
||||||
}
|
|
||||||
linkNodes(store[nodeID0], store[nodeID1])
|
|
||||||
}
|
|
||||||
return store
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
func idleUntilConverged(store map[router.NodeID]*Node) {
|
|
||||||
timeOfLastChange := 0
|
|
||||||
step := 0
|
|
||||||
// Idle untl the network has converged
|
|
||||||
for step-timeOfLastChange < 4*router.TIMEOUT {
|
|
||||||
step++
|
|
||||||
fmt.Println("Step:", step, "--", "last change:", timeOfLastChange)
|
|
||||||
for _, node := range store {
|
|
||||||
node.table.Tick()
|
|
||||||
for idx, link := range node.links[1:] {
|
|
||||||
msg := node.table.CreateMessage(router.Iface(idx))
|
|
||||||
for idx, fromNode := range link.links {
|
|
||||||
if fromNode == node {
|
|
||||||
//fmt.Println("Sending from node", node.nodeID, "to", link.nodeID)
|
|
||||||
link.table.HandleMessage(msg, router.Iface(idx))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
//for _, node := range store {
|
|
||||||
// if node.table.DEBUG_isDirty() { timeOfLastChange = step }
|
|
||||||
//}
|
|
||||||
//time.Sleep(10*time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testPaths(store map[router.NodeID]*Node) {
|
|
||||||
nNodes := len(store)
|
|
||||||
nodeIDs := make([]router.NodeID, 0, nNodes)
|
|
||||||
for nodeID := range store {
|
|
||||||
nodeIDs = append(nodeIDs, nodeID)
|
|
||||||
}
|
|
||||||
lookups := 0
|
|
||||||
count := 0
|
|
||||||
start := time.Now()
|
|
||||||
for _, source := range store {
|
|
||||||
count++
|
|
||||||
fmt.Printf("Testing paths from node %d / %d (%d)\n", count, nNodes, source.nodeID)
|
|
||||||
for _, dest := range store {
|
|
||||||
//if source == dest { continue }
|
|
||||||
destLoc := dest.table.GetLocator()
|
|
||||||
temp := 0
|
|
||||||
for here := source; here != dest; {
|
|
||||||
temp++
|
|
||||||
if temp > 16 {
|
|
||||||
panic("Loop?")
|
|
||||||
}
|
|
||||||
next := here.links[here.table.Lookup(destLoc)]
|
|
||||||
if next == here {
|
|
||||||
//for idx, link := range here.links {
|
|
||||||
// fmt.Println("DUMP:", idx, link.nodeID)
|
|
||||||
//}
|
|
||||||
panic(fmt.Sprintln("Routing Loop:",
|
|
||||||
source.nodeID,
|
|
||||||
here.nodeID,
|
|
||||||
dest.nodeID))
|
|
||||||
}
|
|
||||||
//fmt.Println("DEBUG:", source.nodeID, here.nodeID, dest.nodeID)
|
|
||||||
here = next
|
|
||||||
lookups++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
timed := time.Since(start)
|
|
||||||
fmt.Printf("%f lookups per second\n", float64(lookups)/timed.Seconds())
|
|
||||||
}
|
|
||||||
|
|
||||||
func dumpStore(store map[router.NodeID]*Node) {
|
|
||||||
for _, node := range store {
|
|
||||||
fmt.Println("DUMPSTORE:", node.nodeID, node.table.GetLocator())
|
|
||||||
node.table.DEBUG_dumpTable()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
if *cpuprofile != "" {
|
|
||||||
f, err := os.Create(*cpuprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
|
||||||
}
|
|
||||||
if err := pprof.StartCPUProfile(f); err != nil {
|
|
||||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
|
||||||
}
|
|
||||||
defer pprof.StopCPUProfile()
|
|
||||||
}
|
|
||||||
fmt.Println("Test")
|
|
||||||
store := makeStoreSquareGrid(4)
|
|
||||||
idleUntilConverged(store)
|
|
||||||
dumpStore(store)
|
|
||||||
testPaths(store)
|
|
||||||
//panic("DYING")
|
|
||||||
store = loadGraph("hype-2016-09-19.list")
|
|
||||||
idleUntilConverged(store)
|
|
||||||
dumpStore(store)
|
|
||||||
testPaths(store)
|
|
||||||
}
|
|
@ -1,903 +0,0 @@
|
|||||||
# Tree routing scheme (named Yggdrasil, after the world tree from Norse mythology)
|
|
||||||
# Steps:
|
|
||||||
# 1: Pick any node, here I'm using highest nodeID
|
|
||||||
# 2: Build spanning tree, each node stores path back to root
|
|
||||||
# Optionally with weights for each hop
|
|
||||||
# Ties broken by preferring a parent with higher degree
|
|
||||||
# 3: Distance metric: self->peer + (via tree) peer->dest
|
|
||||||
# 4: Perform (modified) greedy lookup via this metric for each direction (A->B and B->A)
|
|
||||||
# 5: Source-route traffic using the better of those two paths
|
|
||||||
|
|
||||||
# Note: This makes no attempt to simulate a dynamic network
|
|
||||||
# E.g. A node's peers cannot be disconnected
|
|
||||||
|
|
||||||
# TODO:
|
|
||||||
# Make better use of drop?
|
|
||||||
# In particular, we should be ignoring *all* recently dropped *paths* to the root
|
|
||||||
# To minimize route flapping
|
|
||||||
# Not really an issue in the sim, but probably needed for a real network
|
|
||||||
|
|
||||||
import array
|
|
||||||
import gc
|
|
||||||
import glob
|
|
||||||
import gzip
|
|
||||||
import heapq
|
|
||||||
import os
|
|
||||||
import random
|
|
||||||
import time
|
|
||||||
|
|
||||||
#############
|
|
||||||
# Constants #
|
|
||||||
#############
|
|
||||||
|
|
||||||
# Reminder of where link cost comes in
|
|
||||||
LINK_COST = 1
|
|
||||||
|
|
||||||
# Timeout before dropping something, in simulated seconds
|
|
||||||
TIMEOUT = 60
|
|
||||||
|
|
||||||
###########
|
|
||||||
# Classes #
|
|
||||||
###########
|
|
||||||
|
|
||||||
class PathInfo:
|
|
||||||
def __init__(self, nodeID):
|
|
||||||
self.nodeID = nodeID # e.g. IP
|
|
||||||
self.coords = [] # Position in tree
|
|
||||||
self.tstamp = 0 # Timestamp from sender, to keep track of old vs new info
|
|
||||||
self.degree = 0 # Number of peers the sender has, used to break ties
|
|
||||||
# The above should be signed
|
|
||||||
self.path = [nodeID] # Path to node (in path-vector route)
|
|
||||||
self.time = 0 # Time info was updated, to keep track of e.g. timeouts
|
|
||||||
self.treeID = nodeID # Hack, let tree use different ID than IP, used so we can dijkstra once and test many roots
|
|
||||||
def clone(self):
|
|
||||||
# Return a deep-enough copy of the path
|
|
||||||
clone = PathInfo(None)
|
|
||||||
clone.nodeID = self.nodeID
|
|
||||||
clone.coords = self.coords[:]
|
|
||||||
clone.tstamp = self.tstamp
|
|
||||||
clone.degree = self.degree
|
|
||||||
clone.path = self.path[:]
|
|
||||||
clone.time = self.time
|
|
||||||
clone.treeID = self.treeID
|
|
||||||
return clone
|
|
||||||
# End class PathInfo
|
|
||||||
|
|
||||||
class Node:
|
|
||||||
def __init__(self, nodeID):
|
|
||||||
self.info = PathInfo(nodeID) # Self NodeInfo
|
|
||||||
self.root = None # PathInfo to node at root of tree
|
|
||||||
self.drop = dict() # PathInfo to nodes from clus that have timed out
|
|
||||||
self.peers = dict() # PathInfo to peers
|
|
||||||
self.links = dict() # Links to peers (to pass messages)
|
|
||||||
self.msgs = [] # Said messages
|
|
||||||
self.table = dict() # Pre-computed lookup table of peer info
|
|
||||||
|
|
||||||
def tick(self):
|
|
||||||
# Do periodic maintenance stuff, including push updates
|
|
||||||
self.info.time += 1
|
|
||||||
if self.info.time > self.info.tstamp + TIMEOUT/4:
|
|
||||||
# Update timestamp at least once every 1/4 timeout period
|
|
||||||
# This should probably be randomized in a real implementation
|
|
||||||
self.info.tstamp = self.info.time
|
|
||||||
self.info.degree = 0# TODO decide if degree should be used, len(self.peers)
|
|
||||||
changed = False # Used to track when the network has converged
|
|
||||||
changed |= self.cleanRoot()
|
|
||||||
self.cleanDropped()
|
|
||||||
# Should probably send messages infrequently if there's nothing new to report
|
|
||||||
if self.info.tstamp == self.info.time:
|
|
||||||
msg = self.createMessage()
|
|
||||||
self.sendMessage(msg)
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def cleanRoot(self):
|
|
||||||
changed = False
|
|
||||||
if self.root and self.info.time - self.root.time > TIMEOUT:
|
|
||||||
print "DEBUG: clean root,", self.root.path
|
|
||||||
self.drop[self.root.treeID] = self.root
|
|
||||||
self.root = None
|
|
||||||
changed = True
|
|
||||||
if not self.root or self.root.treeID < self.info.treeID:
|
|
||||||
# No need to drop someone who'se worse than us
|
|
||||||
self.info.coords = [self.info.nodeID]
|
|
||||||
self.root = self.info.clone()
|
|
||||||
changed = True
|
|
||||||
elif self.root.treeID == self.info.treeID:
|
|
||||||
self.root = self.info.clone()
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def cleanDropped(self):
|
|
||||||
# May actually be a treeID... better to iterate over keys explicitly
|
|
||||||
nodeIDs = sorted(self.drop.keys())
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
node = self.drop[nodeID]
|
|
||||||
if self.info.time - node.time > 4*TIMEOUT:
|
|
||||||
del self.drop[nodeID]
|
|
||||||
return None
|
|
||||||
|
|
||||||
def createMessage(self):
|
|
||||||
# Message is just a tuple
|
|
||||||
# First element is the sender
|
|
||||||
# Second element is the root
|
|
||||||
# We will .clone() everything during the send operation
|
|
||||||
msg = (self.info, self.root)
|
|
||||||
return msg
|
|
||||||
|
|
||||||
def sendMessage(self, msg):
|
|
||||||
for link in self.links.values():
|
|
||||||
newMsg = (msg[0].clone(), msg[1].clone())
|
|
||||||
link.msgs.append(newMsg)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def handleMessages(self):
|
|
||||||
changed = False
|
|
||||||
while self.msgs:
|
|
||||||
changed |= self.handleMessage(self.msgs.pop())
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def handleMessage(self, msg):
|
|
||||||
changed = False
|
|
||||||
for node in msg:
|
|
||||||
# Update the path and timestamp for the sender and root info
|
|
||||||
node.path.append(self.info.nodeID)
|
|
||||||
node.time = self.info.time
|
|
||||||
# Update the sender's info in our list of peers
|
|
||||||
sender = msg[0]
|
|
||||||
self.peers[sender.nodeID] = sender
|
|
||||||
# Decide if we want to update the root
|
|
||||||
root = msg[1]
|
|
||||||
updateRoot = False
|
|
||||||
isSameParent = False
|
|
||||||
isBetterParent = False
|
|
||||||
if len(self.root.path) > 1 and len(root.path) > 1:
|
|
||||||
parent = self.peers[self.root.path[-2]]
|
|
||||||
if parent.nodeID == sender.nodeID: isSameParent = True
|
|
||||||
if sender.degree > parent.degree:
|
|
||||||
# This would also be where you check path uptime/reliability/whatever
|
|
||||||
# All else being equal, we prefer parents with high degree
|
|
||||||
# We are trusting peers to report degree correctly in this case
|
|
||||||
# So expect some performance reduction if your peers aren't trustworthy
|
|
||||||
# (Lies can increase average stretch by a few %)
|
|
||||||
isBetterParent = True
|
|
||||||
if self.info.nodeID in root.path[:-1]: pass # No loopy routes allowed
|
|
||||||
elif root.treeID in self.drop and self.drop[root.treeID].tstamp >= root.tstamp: pass
|
|
||||||
elif not self.root: updateRoot = True
|
|
||||||
elif self.root.treeID < root.treeID: updateRoot = True
|
|
||||||
elif self.root.treeID != root.treeID: pass
|
|
||||||
elif self.root.tstamp > root.tstamp: pass
|
|
||||||
elif len(root.path) < len(self.root.path): updateRoot = True
|
|
||||||
elif isBetterParent and len(root.path) == len(self.root.path): updateRoot = True
|
|
||||||
elif isSameParent and self.root.tstamp < root.tstamp: updateRoot = True
|
|
||||||
if updateRoot:
|
|
||||||
if not self.root or self.root.path != root.path: changed = True
|
|
||||||
self.root = root
|
|
||||||
self.info.coords = self.root.path
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def lookup(self, dest):
|
|
||||||
# Note: Can loop in an unconverged network
|
|
||||||
# The person looking up the route is responsible for checking for loops
|
|
||||||
best = None
|
|
||||||
bestDist = 0
|
|
||||||
for node in self.peers.itervalues():
|
|
||||||
# dist = distance to node + dist (on tree) from node to dest
|
|
||||||
dist = len(node.path)-1 + treeDist(node.coords, dest.coords)
|
|
||||||
if not best or dist < bestDist:
|
|
||||||
best = node
|
|
||||||
bestDist = dist
|
|
||||||
if best:
|
|
||||||
next = best.path[-2]
|
|
||||||
assert next in self.peers
|
|
||||||
return next
|
|
||||||
else:
|
|
||||||
# We failed to look something up
|
|
||||||
# TODO some way to signal this which doesn't crash
|
|
||||||
assert False
|
|
||||||
|
|
||||||
def initTable(self):
|
|
||||||
# Pre-computes a lookup table for destination coords
|
|
||||||
# Insert parent first so you prefer them as a next-hop
|
|
||||||
self.table.clear()
|
|
||||||
parent = self.info.nodeID
|
|
||||||
if len(self.info.coords) >= 2: parent = self.info.coords[-2]
|
|
||||||
for peer in self.peers.itervalues():
|
|
||||||
current = self.table
|
|
||||||
for coord in peer.coords:
|
|
||||||
if coord not in current: current[coord] = (peer.nodeID, dict())
|
|
||||||
old = current[coord]
|
|
||||||
next = old[1]
|
|
||||||
oldPeer = self.peers[old[0]]
|
|
||||||
oldDist = len(oldPeer.coords)
|
|
||||||
oldDeg = oldPeer.degree
|
|
||||||
newDist = len(peer.coords)
|
|
||||||
newDeg = peer.degree
|
|
||||||
# Prefer parent
|
|
||||||
# Else prefer short distance from root
|
|
||||||
# If equal distance, prefer high degree
|
|
||||||
if peer.nodeID == parent: current[coord] = (peer.nodeID, next)
|
|
||||||
elif newDist < oldDist: current[coord] = (peer.nodeID, next)
|
|
||||||
elif newDist == oldDist and newDeg > oldDeg: current[coord] = (peer.nodeID, next)
|
|
||||||
current = next
|
|
||||||
return None
|
|
||||||
|
|
||||||
def lookup_new(self, dest):
|
|
||||||
# Use pre-computed lookup table to look up next hop for dest coords
|
|
||||||
assert self.table
|
|
||||||
if len(self.info.coords) >= 2: parent = self.info.coords[-2]
|
|
||||||
else: parent = None
|
|
||||||
current = (parent, self.table)
|
|
||||||
c = None
|
|
||||||
for coord in dest.coords:
|
|
||||||
c = coord
|
|
||||||
if coord not in current[1]: break
|
|
||||||
current = current[1][coord]
|
|
||||||
next = current[0]
|
|
||||||
if c in self.peers: next = c
|
|
||||||
if next not in self.peers:
|
|
||||||
assert next == None
|
|
||||||
# You're the root of a different connected component
|
|
||||||
# You'd drop the packet in this case
|
|
||||||
# To make the path cache not die, need to return a valid next hop...
|
|
||||||
# Returning self for that reason
|
|
||||||
next = self.info.nodeID
|
|
||||||
return next
|
|
||||||
# End class Node
|
|
||||||
|
|
||||||
####################
|
|
||||||
# Helper Functions #
|
|
||||||
####################
|
|
||||||
|
|
||||||
def getIndexOfLCA(source, dest):
|
|
||||||
# Return index of last common ancestor in source/dest coords
|
|
||||||
# -1 if no common ancestor (e.g. different roots)
|
|
||||||
lcaIdx = -1
|
|
||||||
minLen = min(len(source), len(dest))
|
|
||||||
for idx in xrange(minLen):
|
|
||||||
if source[idx] == dest[idx]: lcaIdx = idx
|
|
||||||
else: break
|
|
||||||
return lcaIdx
|
|
||||||
|
|
||||||
def treePath(source, dest):
|
|
||||||
# Return path with source at head and dest at tail
|
|
||||||
lastMatch = getIndexOfLCA(source, dest)
|
|
||||||
path = dest[-1:lastMatch:-1] + source[lastMatch:]
|
|
||||||
assert path[0] == dest[-1]
|
|
||||||
assert path[-1] == source[-1]
|
|
||||||
return path
|
|
||||||
|
|
||||||
def treeDist(source, dest):
|
|
||||||
dist = len(source) + len(dest)
|
|
||||||
lcaIdx = getIndexOfLCA(source, dest)
|
|
||||||
dist -= 2*(lcaIdx+1)
|
|
||||||
return dist
|
|
||||||
|
|
||||||
def dijkstra(nodestore, startingNodeID):
|
|
||||||
# Idea to use heapq and basic implementation taken from stackexchange post
|
|
||||||
# http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python
|
|
||||||
nodeIDs = sorted(nodestore.keys())
|
|
||||||
nNodes = len(nodeIDs)
|
|
||||||
idxs = dict()
|
|
||||||
for nodeIdx in xrange(nNodes):
|
|
||||||
nodeID = nodeIDs[nodeIdx]
|
|
||||||
idxs[nodeID] = nodeIdx
|
|
||||||
dists = array.array("H", [0]*nNodes)
|
|
||||||
queue = [(0, startingNodeID)]
|
|
||||||
while queue:
|
|
||||||
dist, nodeID = heapq.heappop(queue)
|
|
||||||
idx = idxs[nodeID]
|
|
||||||
if not dists[idx]: # Unvisited, otherwise we skip it
|
|
||||||
dists[idx] = dist
|
|
||||||
for peer in nodestore[nodeID].links:
|
|
||||||
if not dists[idxs[peer]]:
|
|
||||||
# Peer is also unvisited, so add to queue
|
|
||||||
heapq.heappush(queue, (dist+LINK_COST, peer))
|
|
||||||
return dists
|
|
||||||
|
|
||||||
def dijkstrall(nodestore):
|
|
||||||
# Idea to use heapq and basic implementation taken from stackexchange post
|
|
||||||
# http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python
|
|
||||||
nodeIDs = sorted(nodestore.keys())
|
|
||||||
nNodes = len(nodeIDs)
|
|
||||||
idxs = dict()
|
|
||||||
for nodeIdx in xrange(nNodes):
|
|
||||||
nodeID = nodeIDs[nodeIdx]
|
|
||||||
idxs[nodeID] = nodeIdx
|
|
||||||
dists = array.array("H", [0]*nNodes*nNodes) # use GetCacheIndex(nNodes, start, end)
|
|
||||||
for sourceIdx in xrange(nNodes):
|
|
||||||
print "Finding shortest paths for node {} / {} ({})".format(sourceIdx+1, nNodes, nodeIDs[sourceIdx])
|
|
||||||
queue = [(0, sourceIdx)]
|
|
||||||
while queue:
|
|
||||||
dist, nodeIdx = heapq.heappop(queue)
|
|
||||||
distIdx = getCacheIndex(nNodes, sourceIdx, nodeIdx)
|
|
||||||
if not dists[distIdx]: # Unvisited, otherwise we skip it
|
|
||||||
dists[distIdx] = dist
|
|
||||||
for peer in nodestore[nodeIDs[nodeIdx]].links:
|
|
||||||
pIdx = idxs[peer]
|
|
||||||
pdIdx = getCacheIndex(nNodes, sourceIdx, pIdx)
|
|
||||||
if not dists[pdIdx]:
|
|
||||||
# Peer is also unvisited, so add to queue
|
|
||||||
heapq.heappush(queue, (dist+LINK_COST, pIdx))
|
|
||||||
return dists
|
|
||||||
|
|
||||||
def linkNodes(node1, node2):
|
|
||||||
node1.links[node2.info.nodeID] = node2
|
|
||||||
node2.links[node1.info.nodeID] = node1
|
|
||||||
|
|
||||||
############################
|
|
||||||
# Store topology functions #
|
|
||||||
############################
|
|
||||||
|
|
||||||
def makeStoreSquareGrid(sideLength, randomize=True):
|
|
||||||
# Simple grid in a sideLength*sideLength square
|
|
||||||
# Just used to validate that the code runs
|
|
||||||
store = dict()
|
|
||||||
nodeIDs = list(range(sideLength*sideLength))
|
|
||||||
if randomize: random.shuffle(nodeIDs)
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
store[nodeID] = Node(nodeID)
|
|
||||||
for index in xrange(len(nodeIDs)):
|
|
||||||
if (index % sideLength != 0): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-1]])
|
|
||||||
if (index >= sideLength): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-sideLength]])
|
|
||||||
print "Grid store created, size {}".format(len(store))
|
|
||||||
return store
|
|
||||||
|
|
||||||
def makeStoreASRelGraph(pathToGraph):
|
|
||||||
#Existing network graphs, in caida.org's asrel format (ASx|ASy|z per line, z denotes relationship type)
|
|
||||||
with open(pathToGraph, "r") as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
store = dict()
|
|
||||||
for line in inData:
|
|
||||||
if line.strip()[0] == "#": continue # Skip comment lines
|
|
||||||
line = line.replace('|'," ")
|
|
||||||
nodes = map(int, line.split()[0:2])
|
|
||||||
if nodes[0] not in store: store[nodes[0]] = Node(nodes[0])
|
|
||||||
if nodes[1] not in store: store[nodes[1]] = Node(nodes[1])
|
|
||||||
linkNodes(store[nodes[0]], store[nodes[1]])
|
|
||||||
print "CAIDA AS-relation graph successfully imported, size {}".format(len(store))
|
|
||||||
return store
|
|
||||||
|
|
||||||
def makeStoreASRelGraphMaxDeg(pathToGraph, degIdx=0):
|
|
||||||
with open(pathToGraph, "r") as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
store = dict()
|
|
||||||
nodeDeg = dict()
|
|
||||||
for line in inData:
|
|
||||||
if line.strip()[0] == "#": continue # Skip comment lines
|
|
||||||
line = line.replace('|'," ")
|
|
||||||
nodes = map(int, line.split()[0:2])
|
|
||||||
if nodes[0] not in nodeDeg: nodeDeg[nodes[0]] = 0
|
|
||||||
if nodes[1] not in nodeDeg: nodeDeg[nodes[1]] = 0
|
|
||||||
nodeDeg[nodes[0]] += 1
|
|
||||||
nodeDeg[nodes[1]] += 1
|
|
||||||
sortedNodes = sorted(nodeDeg.keys(), \
|
|
||||||
key=lambda x: (nodeDeg[x], x), \
|
|
||||||
reverse=True)
|
|
||||||
maxDegNodeID = sortedNodes[degIdx]
|
|
||||||
return makeStoreASRelGraphFixedRoot(pathToGraph, maxDegNodeID)
|
|
||||||
|
|
||||||
def makeStoreASRelGraphFixedRoot(pathToGraph, rootNodeID):
|
|
||||||
with open(pathToGraph, "r") as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
store = dict()
|
|
||||||
for line in inData:
|
|
||||||
if line.strip()[0] == "#": continue # Skip comment lines
|
|
||||||
line = line.replace('|'," ")
|
|
||||||
nodes = map(int, line.split()[0:2])
|
|
||||||
if nodes[0] not in store:
|
|
||||||
store[nodes[0]] = Node(nodes[0])
|
|
||||||
if nodes[0] == rootNodeID: store[nodes[0]].info.treeID += 1000000000
|
|
||||||
if nodes[1] not in store:
|
|
||||||
store[nodes[1]] = Node(nodes[1])
|
|
||||||
if nodes[1] == rootNodeID: store[nodes[1]].info.treeID += 1000000000
|
|
||||||
linkNodes(store[nodes[0]], store[nodes[1]])
|
|
||||||
print "CAIDA AS-relation graph successfully imported, size {}".format(len(store))
|
|
||||||
return store
|
|
||||||
|
|
||||||
def makeStoreDimesEdges(pathToGraph, rootNodeID=None):
|
|
||||||
# Read from a DIMES csv-formatted graph from a gzip file
|
|
||||||
store = dict()
|
|
||||||
with gzip.open(pathToGraph, "r") as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
size = len(inData)
|
|
||||||
index = 0
|
|
||||||
for edge in inData:
|
|
||||||
if not index % 1000:
|
|
||||||
pct = 100.0*index/size
|
|
||||||
print "Processing edge {}, {:.2f}%".format(index, pct)
|
|
||||||
index += 1
|
|
||||||
dat = edge.rstrip().split(',')
|
|
||||||
node1 = "N" + str(dat[0].strip())
|
|
||||||
node2 = "N" + str(dat[1].strip())
|
|
||||||
if '?' in node1 or '?' in node2: continue #Unknown node
|
|
||||||
if node1 == rootNodeID: node1 = "R" + str(dat[0].strip())
|
|
||||||
if node2 == rootNodeID: node2 = "R" + str(dat[1].strip())
|
|
||||||
if node1 not in store: store[node1] = Node(node1)
|
|
||||||
if node2 not in store: store[node2] = Node(node2)
|
|
||||||
if node1 != node2: linkNodes(store[node1], store[node2])
|
|
||||||
print "DIMES graph successfully imported, size {}".format(len(store))
|
|
||||||
return store
|
|
||||||
|
|
||||||
def makeStoreGeneratedGraph(pathToGraph, root=None):
|
|
||||||
with open(pathToGraph, "r") as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
store = dict()
|
|
||||||
for line in inData:
|
|
||||||
if line.strip()[0] == "#": continue # Skip comment lines
|
|
||||||
nodes = map(int, line.strip().split(' ')[0:2])
|
|
||||||
node1 = nodes[0]
|
|
||||||
node2 = nodes[1]
|
|
||||||
if node1 == root: node1 += 1000000
|
|
||||||
if node2 == root: node2 += 1000000
|
|
||||||
if node1 not in store: store[node1] = Node(node1)
|
|
||||||
if node2 not in store: store[node2] = Node(node2)
|
|
||||||
linkNodes(store[node1], store[node2])
|
|
||||||
print "Generated graph successfully imported, size {}".format(len(store))
|
|
||||||
return store
|
|
||||||
|
|
||||||
|
|
||||||
############################################
|
|
||||||
# Functions used as parts of network tests #
|
|
||||||
############################################
|
|
||||||
|
|
||||||
def idleUntilConverged(store):
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
timeOfLastChange = 0
|
|
||||||
step = 0
|
|
||||||
# Idle until the network has converged
|
|
||||||
while step - timeOfLastChange < 4*TIMEOUT:
|
|
||||||
step += 1
|
|
||||||
print "Step: {}, last change: {}".format(step, timeOfLastChange)
|
|
||||||
changed = False
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
# Update node status, send messages
|
|
||||||
changed |= store[nodeID].tick()
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
# Process messages
|
|
||||||
changed |= store[nodeID].handleMessages()
|
|
||||||
if changed: timeOfLastChange = step
|
|
||||||
initTables(store)
|
|
||||||
return store
|
|
||||||
|
|
||||||
def getCacheIndex(nodes, sourceIndex, destIndex):
|
|
||||||
return sourceIndex*nodes + destIndex
|
|
||||||
|
|
||||||
def initTables(store):
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
nNodes = len(nodeIDs)
|
|
||||||
print "Initializing routing tables for {} nodes".format(nNodes)
|
|
||||||
for idx in xrange(nNodes):
|
|
||||||
nodeID = nodeIDs[idx]
|
|
||||||
store[nodeID].initTable()
|
|
||||||
print "Routing tables initialized"
|
|
||||||
return None
|
|
||||||
|
|
||||||
def getCache(store):
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
nNodes = len(nodeIDs)
|
|
||||||
nodeIdxs = dict()
|
|
||||||
for nodeIdx in xrange(nNodes):
|
|
||||||
nodeIdxs[nodeIDs[nodeIdx]] = nodeIdx
|
|
||||||
cache = array.array("H", [0]*nNodes*nNodes)
|
|
||||||
for sourceIdx in xrange(nNodes):
|
|
||||||
sourceID = nodeIDs[sourceIdx]
|
|
||||||
print "Building fast lookup table for node {} / {} ({})".format(sourceIdx+1, nNodes, sourceID)
|
|
||||||
for destIdx in xrange(nNodes):
|
|
||||||
destID = nodeIDs[destIdx]
|
|
||||||
if sourceID == destID: nextHop = destID # lookup would fail
|
|
||||||
else: nextHop = store[sourceID].lookup(store[destID].info)
|
|
||||||
nextHopIdx = nodeIdxs[nextHop]
|
|
||||||
cache[getCacheIndex(nNodes, sourceIdx, destIdx)] = nextHopIdx
|
|
||||||
return cache
|
|
||||||
|
|
||||||
def testPaths(store, dists):
|
|
||||||
cache = getCache(store)
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
nNodes = len(nodeIDs)
|
|
||||||
idxs = dict()
|
|
||||||
for nodeIdx in xrange(nNodes):
|
|
||||||
nodeID = nodeIDs[nodeIdx]
|
|
||||||
idxs[nodeID] = nodeIdx
|
|
||||||
results = dict()
|
|
||||||
for sourceIdx in xrange(nNodes):
|
|
||||||
sourceID = nodeIDs[sourceIdx]
|
|
||||||
print "Testing paths from node {} / {} ({})".format(sourceIdx+1, len(nodeIDs), sourceID)
|
|
||||||
#dists = dijkstra(store, sourceID)
|
|
||||||
for destIdx in xrange(nNodes):
|
|
||||||
destID = nodeIDs[destIdx]
|
|
||||||
if destID == sourceID: continue # Skip self
|
|
||||||
distIdx = getCacheIndex(nNodes, sourceIdx, destIdx)
|
|
||||||
eHops = dists[distIdx]
|
|
||||||
if not eHops: continue # The network is split, no path exists
|
|
||||||
hops = 0
|
|
||||||
for pair in ((sourceIdx, destIdx),):
|
|
||||||
nHops = 0
|
|
||||||
locIdx = pair[0]
|
|
||||||
dIdx = pair[1]
|
|
||||||
while locIdx != dIdx:
|
|
||||||
locIdx = cache[getCacheIndex(nNodes, locIdx, dIdx)]
|
|
||||||
nHops += 1
|
|
||||||
if not hops or nHops < hops: hops = nHops
|
|
||||||
if eHops not in results: results[eHops] = dict()
|
|
||||||
if hops not in results[eHops]: results[eHops][hops] = 0
|
|
||||||
results[eHops][hops] += 1
|
|
||||||
return results
|
|
||||||
|
|
||||||
def getAvgStretch(pathMatrix):
|
|
||||||
avgStretch = 0.
|
|
||||||
checked = 0.
|
|
||||||
for eHops in sorted(pathMatrix.keys()):
|
|
||||||
for nHops in sorted(pathMatrix[eHops].keys()):
|
|
||||||
count = pathMatrix[eHops][nHops]
|
|
||||||
stretch = float(nHops)/float(max(1, eHops))
|
|
||||||
avgStretch += stretch*count
|
|
||||||
checked += count
|
|
||||||
avgStretch /= max(1, checked)
|
|
||||||
return avgStretch
|
|
||||||
|
|
||||||
def getMaxStretch(pathMatrix):
|
|
||||||
maxStretch = 0.
|
|
||||||
for eHops in sorted(pathMatrix.keys()):
|
|
||||||
for nHops in sorted(pathMatrix[eHops].keys()):
|
|
||||||
stretch = float(nHops)/float(max(1, eHops))
|
|
||||||
maxStretch = max(maxStretch, stretch)
|
|
||||||
return maxStretch
|
|
||||||
|
|
||||||
def getCertSizes(store):
|
|
||||||
# Returns nCerts frequency distribution
|
|
||||||
# De-duplicates common certs (for shared prefixes in the path)
|
|
||||||
sizes = dict()
|
|
||||||
for node in store.values():
|
|
||||||
certs = set()
|
|
||||||
for peer in node.peers.values():
|
|
||||||
pCerts = set()
|
|
||||||
assert len(peer.path) == 2
|
|
||||||
assert peer.coords[-1] == peer.path[0]
|
|
||||||
hops = peer.coords + peer.path[1:]
|
|
||||||
for hopIdx in xrange(len(hops)-1):
|
|
||||||
send = hops[hopIdx]
|
|
||||||
if send == node.info.nodeID: continue # We created it, already have it
|
|
||||||
path = hops[0:hopIdx+2]
|
|
||||||
# Each cert is signed by the sender
|
|
||||||
# Includes information about the path from the sender to the next hop
|
|
||||||
# Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2]
|
|
||||||
cert = "{}:{}".format(send, path)
|
|
||||||
certs.add(cert)
|
|
||||||
size = len(certs)
|
|
||||||
if size not in sizes: sizes[size] = 0
|
|
||||||
sizes[size] += 1
|
|
||||||
return sizes
|
|
||||||
|
|
||||||
def getMinLinkCertSizes(store):
|
|
||||||
# Returns nCerts frequency distribution
|
|
||||||
# De-duplicates common certs (for shared prefixes in the path)
|
|
||||||
# Based on the minimum number of certs that must be traded through a particular link
|
|
||||||
# Handled per link
|
|
||||||
sizes = dict()
|
|
||||||
for node in store.values():
|
|
||||||
peerCerts = dict()
|
|
||||||
for peer in node.peers.values():
|
|
||||||
pCerts = set()
|
|
||||||
assert len(peer.path) == 2
|
|
||||||
assert peer.coords[-1] == peer.path[0]
|
|
||||||
hops = peer.coords + peer.path[1:]
|
|
||||||
for hopIdx in xrange(len(hops)-1):
|
|
||||||
send = hops[hopIdx]
|
|
||||||
if send == node.info.nodeID: continue # We created it, already have it
|
|
||||||
path = hops[0:hopIdx+2]
|
|
||||||
# Each cert is signed by the sender
|
|
||||||
# Includes information about the path from the sender to the next hop
|
|
||||||
# Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2]
|
|
||||||
cert = "{}:{}".format(send, path)
|
|
||||||
pCerts.add(cert)
|
|
||||||
peerCerts[peer.nodeID] = pCerts
|
|
||||||
for peer in peerCerts:
|
|
||||||
size = 0
|
|
||||||
pCerts = peerCerts[peer]
|
|
||||||
for cert in pCerts:
|
|
||||||
required = True
|
|
||||||
for p2 in peerCerts:
|
|
||||||
if p2 == peer: continue
|
|
||||||
p2Certs = peerCerts[p2]
|
|
||||||
if cert in p2Certs: required = False
|
|
||||||
if required: size += 1
|
|
||||||
if size not in sizes: sizes[size] = 0
|
|
||||||
sizes[size] += 1
|
|
||||||
return sizes
|
|
||||||
|
|
||||||
def getPathSizes(store):
|
|
||||||
# Returns frequency distribution of the total number of hops the routing table
|
|
||||||
# I.e. a node with 3 peers, each with 5 hop coord+path, would count as 3x5=15
|
|
||||||
sizes = dict()
|
|
||||||
for node in store.values():
|
|
||||||
size = 0
|
|
||||||
for peer in node.peers.values():
|
|
||||||
assert len(peer.path) == 2
|
|
||||||
assert peer.coords[-1] == peer.path[0]
|
|
||||||
peerSize = len(peer.coords) + len(peer.path) - 1 # double-counts peer, -1
|
|
||||||
size += peerSize
|
|
||||||
if size not in sizes: sizes[size] = 0
|
|
||||||
sizes[size] += 1
|
|
||||||
return sizes
|
|
||||||
|
|
||||||
def getPeerSizes(store):
|
|
||||||
# Returns frequency distribution of the number of peers each node has
|
|
||||||
sizes = dict()
|
|
||||||
for node in store.values():
|
|
||||||
nPeers = len(node.peers)
|
|
||||||
if nPeers not in sizes: sizes[nPeers] = 0
|
|
||||||
sizes[nPeers] += 1
|
|
||||||
return sizes
|
|
||||||
|
|
||||||
def getAvgSize(sizes):
|
|
||||||
sumSizes = 0
|
|
||||||
nNodes = 0
|
|
||||||
for size in sizes:
|
|
||||||
count = sizes[size]
|
|
||||||
sumSizes += size*count
|
|
||||||
nNodes += count
|
|
||||||
avgSize = float(sumSizes)/max(1, nNodes)
|
|
||||||
return avgSize
|
|
||||||
|
|
||||||
def getMaxSize(sizes):
|
|
||||||
return max(sizes.keys())
|
|
||||||
|
|
||||||
def getMinSize(sizes):
|
|
||||||
return min(sizes.keys())
|
|
||||||
|
|
||||||
def getResults(pathMatrix):
|
|
||||||
results = []
|
|
||||||
for eHops in sorted(pathMatrix.keys()):
|
|
||||||
for nHops in sorted(pathMatrix[eHops].keys()):
|
|
||||||
count = pathMatrix[eHops][nHops]
|
|
||||||
results.append("{} {} {}".format(eHops, nHops, count))
|
|
||||||
return '\n'.join(results)
|
|
||||||
|
|
||||||
####################################
|
|
||||||
# Functions to run different tests #
|
|
||||||
####################################
|
|
||||||
|
|
||||||
def runTest(store):
|
|
||||||
# Runs the usual set of tests on the store
|
|
||||||
# Does not save results, so only meant for quick tests
|
|
||||||
# To e.g. check the code works, maybe warm up the pypy jit
|
|
||||||
for node in store.values():
|
|
||||||
node.info.time = random.randint(0, TIMEOUT)
|
|
||||||
node.info.tstamp = TIMEOUT
|
|
||||||
print "Begin testing network"
|
|
||||||
dists = None
|
|
||||||
if not dists: dists = dijkstrall(store)
|
|
||||||
idleUntilConverged(store)
|
|
||||||
pathMatrix = testPaths(store, dists)
|
|
||||||
avgStretch = getAvgStretch(pathMatrix)
|
|
||||||
maxStretch = getMaxStretch(pathMatrix)
|
|
||||||
peers = getPeerSizes(store)
|
|
||||||
certs = getCertSizes(store)
|
|
||||||
paths = getPathSizes(store)
|
|
||||||
linkCerts = getMinLinkCertSizes(store)
|
|
||||||
avgPeerSize = getAvgSize(peers)
|
|
||||||
maxPeerSize = getMaxSize(peers)
|
|
||||||
avgCertSize = getAvgSize(certs)
|
|
||||||
maxCertSize = getMaxSize(certs)
|
|
||||||
avgPathSize = getAvgSize(paths)
|
|
||||||
maxPathSize = getMaxSize(paths)
|
|
||||||
avgLinkCert = getAvgSize(linkCerts)
|
|
||||||
maxLinkCert = getMaxSize(linkCerts)
|
|
||||||
totalCerts = sum(map(lambda x: x*certs[x], certs.keys()))
|
|
||||||
totalLinks = sum(map(lambda x: x*peers[x], peers.keys())) # one-way links
|
|
||||||
avgCertsPerLink = float(totalCerts)/max(1, totalLinks)
|
|
||||||
print "Finished testing network"
|
|
||||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
|
||||||
print "Avg / Max nPeers size: {} / {}".format(avgPeerSize, maxPeerSize)
|
|
||||||
print "Avg / Max nCerts size: {} / {}".format(avgCertSize, maxCertSize)
|
|
||||||
print "Avg / Max total hops in any node's routing table: {} / {}".format(avgPathSize, maxPathSize)
|
|
||||||
print "Avg / Max lower bound cert requests per link (one-way): {} / {}".format(avgLinkCert, maxLinkCert)
|
|
||||||
print "Avg certs per link (one-way): {}".format(avgCertsPerLink)
|
|
||||||
return # End of function
|
|
||||||
|
|
||||||
def rootNodeASTest(path, outDir="output-treesim-AS", dists=None, proc = 1):
|
|
||||||
# Checks performance for every possible choice of root node
|
|
||||||
# Saves output for each root node to a separate file on disk
|
|
||||||
# path = input path to some caida.org formatted AS-relationship graph
|
|
||||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
|
||||||
assert os.path.exists(outDir)
|
|
||||||
store = makeStoreASRelGraph(path)
|
|
||||||
nodes = sorted(store.keys())
|
|
||||||
for nodeIdx in xrange(len(nodes)):
|
|
||||||
if nodeIdx % proc != 0: continue # Work belongs to someone else
|
|
||||||
rootNodeID = nodes[nodeIdx]
|
|
||||||
outpath = outDir+"/{}".format(rootNodeID)
|
|
||||||
if os.path.exists(outpath):
|
|
||||||
print "Skipping {}, already processed".format(rootNodeID)
|
|
||||||
continue
|
|
||||||
store = makeStoreASRelGraphFixedRoot(path, rootNodeID)
|
|
||||||
for node in store.values():
|
|
||||||
node.info.time = random.randint(0, TIMEOUT)
|
|
||||||
node.info.tstamp = TIMEOUT
|
|
||||||
print "Beginning {}, size {}".format(nodeIdx, len(store))
|
|
||||||
if not dists: dists = dijkstrall(store)
|
|
||||||
idleUntilConverged(store)
|
|
||||||
pathMatrix = testPaths(store, dists)
|
|
||||||
avgStretch = getAvgStretch(pathMatrix)
|
|
||||||
maxStretch = getMaxStretch(pathMatrix)
|
|
||||||
results = getResults(pathMatrix)
|
|
||||||
with open(outpath, "w") as f:
|
|
||||||
f.write(results)
|
|
||||||
print "Finished test for root AS {} ({} / {})".format(rootNodeID, nodeIdx+1, len(store))
|
|
||||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
|
||||||
#break # Stop after 1, because they can take forever
|
|
||||||
return # End of function
|
|
||||||
|
|
||||||
def timelineASTest():
|
|
||||||
# Meant to study the performance of the network as a function of network size
|
|
||||||
# Loops over a set of AS-relationship graphs
|
|
||||||
# Runs a test on each graph, selecting highest-degree node as the root
|
|
||||||
# Saves results for each graph to a separate file on disk
|
|
||||||
outDir = "output-treesim-timeline-AS"
|
|
||||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
|
||||||
assert os.path.exists(outDir)
|
|
||||||
paths = sorted(glob.glob("asrel/datasets/*"))
|
|
||||||
for path in paths:
|
|
||||||
date = os.path.basename(path).split(".")[0]
|
|
||||||
outpath = outDir+"/{}".format(date)
|
|
||||||
if os.path.exists(outpath):
|
|
||||||
print "Skipping {}, already processed".format(date)
|
|
||||||
continue
|
|
||||||
store = makeStoreASRelGraphMaxDeg(path)
|
|
||||||
dists = None
|
|
||||||
for node in store.values():
|
|
||||||
node.info.time = random.randint(0, TIMEOUT)
|
|
||||||
node.info.tstamp = TIMEOUT
|
|
||||||
print "Beginning {}, size {}".format(date, len(store))
|
|
||||||
if not dists: dists = dijkstrall(store)
|
|
||||||
idleUntilConverged(store)
|
|
||||||
pathMatrix = testPaths(store, dists)
|
|
||||||
avgStretch = getAvgStretch(pathMatrix)
|
|
||||||
maxStretch = getMaxStretch(pathMatrix)
|
|
||||||
results = getResults(pathMatrix)
|
|
||||||
with open(outpath, "w") as f:
|
|
||||||
f.write(results)
|
|
||||||
print "Finished {} with {} nodes".format(date, len(store))
|
|
||||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
|
||||||
#break # Stop after 1, because they can take forever
|
|
||||||
return # End of function
|
|
||||||
|
|
||||||
def timelineDimesTest():
|
|
||||||
# Meant to study the performance of the network as a function of network size
|
|
||||||
# Loops over a set of AS-relationship graphs
|
|
||||||
# Runs a test on each graph, selecting highest-degree node as the root
|
|
||||||
# Saves results for each graph to a separate file on disk
|
|
||||||
outDir = "output-treesim-timeline-dimes"
|
|
||||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
|
||||||
assert os.path.exists(outDir)
|
|
||||||
# Input files are named ASEdgesX_Y where X = month (no leading 0), Y = year
|
|
||||||
paths = sorted(glob.glob("DIMES/ASEdges/*.gz"))
|
|
||||||
exists = set(glob.glob(outDir+"/*"))
|
|
||||||
for path in paths:
|
|
||||||
date = os.path.basename(path).split(".")[0]
|
|
||||||
outpath = outDir+"/{}".format(date)
|
|
||||||
if outpath in exists:
|
|
||||||
print "Skipping {}, already processed".format(date)
|
|
||||||
continue
|
|
||||||
store = makeStoreDimesEdges(path)
|
|
||||||
# Get the highest degree node and make it root
|
|
||||||
# Sorted by nodeID just to make it stable in the event of a tie
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
bestRoot = ""
|
|
||||||
bestDeg = 0
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
node = store[nodeID]
|
|
||||||
if len(node.links) > bestDeg:
|
|
||||||
bestRoot = nodeID
|
|
||||||
bestDeg = len(node.links)
|
|
||||||
assert bestRoot
|
|
||||||
store = makeStoreDimesEdges(path, bestRoot)
|
|
||||||
rootID = "R" + bestRoot[1:]
|
|
||||||
assert rootID in store
|
|
||||||
# Don't forget to set random seed before setitng times
|
|
||||||
# To make results reproducible
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
random.seed(12345)
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
node = store[nodeID]
|
|
||||||
node.info.time = random.randint(0, TIMEOUT)
|
|
||||||
node.info.tstamp = TIMEOUT
|
|
||||||
print "Beginning {}, size {}".format(date, len(store))
|
|
||||||
if not dists: dists = dijkstrall(store)
|
|
||||||
idleUntilConverged(store)
|
|
||||||
pathMatrix = testPaths(store, dists)
|
|
||||||
avgStretch = getAvgStretch(pathMatrix)
|
|
||||||
maxStretch = getMaxStretch(pathMatrix)
|
|
||||||
results = getResults(pathMatrix)
|
|
||||||
with open(outpath, "w") as f:
|
|
||||||
f.write(results)
|
|
||||||
print "Finished {} with {} nodes".format(date, len(store))
|
|
||||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
|
||||||
break # Stop after 1, because they can take forever
|
|
||||||
return # End of function
|
|
||||||
|
|
||||||
def scalingTest(maxTests=None, inputDir="graphs"):
|
|
||||||
# Meant to study the performance of the network as a function of network size
|
|
||||||
# Loops over a set of nodes in a previously generated graph
|
|
||||||
# Runs a test on each graph, testing each node as the root
|
|
||||||
# if maxTests is set, tests only that number of roots (highest degree first)
|
|
||||||
# Saves results for each graph to a separate file on disk
|
|
||||||
outDir = "output-treesim-{}".format(inputDir)
|
|
||||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
|
||||||
assert os.path.exists(outDir)
|
|
||||||
paths = sorted(glob.glob("{}/*".format(inputDir)))
|
|
||||||
exists = set(glob.glob(outDir+"/*"))
|
|
||||||
for path in paths:
|
|
||||||
gc.collect() # pypy waits for gc to close files
|
|
||||||
graph = os.path.basename(path).split(".")[0]
|
|
||||||
store = makeStoreGeneratedGraph(path)
|
|
||||||
# Get the highest degree node and make it root
|
|
||||||
# Sorted by nodeID just to make it stable in the event of a tie
|
|
||||||
nodeIDs = sorted(store.keys(), key=lambda x: len(store[x].links), reverse=True)
|
|
||||||
dists = None
|
|
||||||
if maxTests: nodeIDs = nodeIDs[:maxTests]
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
nodeIDStr = str(nodeID).zfill(len(str(len(store)-1)))
|
|
||||||
outpath = outDir+"/{}-{}".format(graph, nodeIDStr)
|
|
||||||
if outpath in exists:
|
|
||||||
print "Skipping {}-{}, already processed".format(graph, nodeIDStr)
|
|
||||||
continue
|
|
||||||
store = makeStoreGeneratedGraph(path, nodeID)
|
|
||||||
# Don't forget to set random seed before setting times
|
|
||||||
random.seed(12345) # To make results reproducible
|
|
||||||
nIDs = sorted(store.keys())
|
|
||||||
for nID in nIDs:
|
|
||||||
node = store[nID]
|
|
||||||
node.info.time = random.randint(0, TIMEOUT)
|
|
||||||
node.info.tstamp = TIMEOUT
|
|
||||||
print "Beginning {}, size {}".format(graph, len(store))
|
|
||||||
if not dists: dists = dijkstrall(store)
|
|
||||||
idleUntilConverged(store)
|
|
||||||
pathMatrix = testPaths(store, dists)
|
|
||||||
avgStretch = getAvgStretch(pathMatrix)
|
|
||||||
maxStretch = getMaxStretch(pathMatrix)
|
|
||||||
results = getResults(pathMatrix)
|
|
||||||
with open(outpath, "w") as f:
|
|
||||||
f.write(results)
|
|
||||||
print "Finished {} with {} nodes for root {}".format(graph, len(store), nodeID)
|
|
||||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
|
||||||
return # End of function
|
|
||||||
|
|
||||||
##################
|
|
||||||
# Main Execution #
|
|
||||||
##################
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
if True: # Run a quick test
|
|
||||||
random.seed(12345) # DEBUG
|
|
||||||
store = makeStoreSquareGrid(4)
|
|
||||||
runTest(store) # Quick test
|
|
||||||
store = None
|
|
||||||
# Do some real work
|
|
||||||
#runTest(makeStoreDimesEdges("DIMES/ASEdges/ASEdges1_2007.csv.gz"))
|
|
||||||
#timelineDimesTest()
|
|
||||||
#rootNodeASTest("asrel/datasets/19980101.as-rel.txt")
|
|
||||||
#timelineASTest()
|
|
||||||
#rootNodeASTest("hype-2016-09-19.list", "output-treesim-hype")
|
|
||||||
#scalingTest(None, "graphs-20") # First argument 1 to only test 1 root per graph
|
|
||||||
#store = makeStoreGeneratedGraph("bgp_tables")
|
|
||||||
#store = makeStoreGeneratedGraph("skitter")
|
|
||||||
#store = makeStoreASRelGraphMaxDeg("hype-2016-09-19.list") #http://hia.cjdns.ca/watchlist/c/walk.peers.20160919
|
|
||||||
#store = makeStoreGeneratedGraph("fc00-2017-08-12.txt")
|
|
||||||
if store: runTest(store)
|
|
||||||
#rootNodeASTest("skitter", "output-treesim-skitter", None, 0, 1)
|
|
||||||
#scalingTest(1, "graphs-20") # First argument 1 to only test 1 root per graph
|
|
||||||
#scalingTest(1, "graphs-21") # First argument 1 to only test 1 root per graph
|
|
||||||
#scalingTest(1, "graphs-22") # First argument 1 to only test 1 root per graph
|
|
||||||
#scalingTest(1, "graphs-23") # First argument 1 to only test 1 root per graph
|
|
||||||
if not store:
|
|
||||||
import sys
|
|
||||||
args = sys.argv
|
|
||||||
if len(args) == 2:
|
|
||||||
job_number = int(sys.argv[1])
|
|
||||||
#rootNodeASTest("fc00-2017-08-12.txt", "fc00", None, job_number)
|
|
||||||
#rootNodeASTest("skitter", "out-skitter", None, job_number)
|
|
||||||
rootNodeASTest("walk-1517414401.txt.map", "out-walk", None, job_number)
|
|
||||||
else:
|
|
||||||
print "Usage: {} job_number".format(args[0])
|
|
||||||
print "job_number = which job set to run on this node (1-indexed)"
|
|
||||||
|
|
@ -1,907 +0,0 @@
|
|||||||
# Tree routing scheme (named Yggdrasil, after the world tree from Norse mythology)
|
|
||||||
# Steps:
|
|
||||||
# 1: Pick any node, here I'm using highest nodeID
|
|
||||||
# 2: Build spanning tree, each node stores path back to root
|
|
||||||
# Optionally with weights for each hop
|
|
||||||
# Ties broken by preferring a parent with higher degree
|
|
||||||
# 3: Distance metric: self->peer + (via tree) peer->dest
|
|
||||||
# 4: Perform (modified) greedy lookup via this metric for each direction (A->B and B->A)
|
|
||||||
# 5: Source-route traffic using the better of those two paths
|
|
||||||
|
|
||||||
# Note: This makes no attempt to simulate a dynamic network
|
|
||||||
# E.g. A node's peers cannot be disconnected
|
|
||||||
|
|
||||||
# TODO:
|
|
||||||
# Make better use of drop?
|
|
||||||
# In particular, we should be ignoring *all* recently dropped *paths* to the root
|
|
||||||
# To minimize route flapping
|
|
||||||
# Not really an issue in the sim, but probably needed for a real network
|
|
||||||
|
|
||||||
import array
|
|
||||||
import gc
|
|
||||||
import glob
|
|
||||||
import gzip
|
|
||||||
import heapq
|
|
||||||
import os
|
|
||||||
import random
|
|
||||||
import time
|
|
||||||
|
|
||||||
#############
|
|
||||||
# Constants #
|
|
||||||
#############
|
|
||||||
|
|
||||||
# Reminder of where link cost comes in
|
|
||||||
LINK_COST = 1
|
|
||||||
|
|
||||||
# Timeout before dropping something, in simulated seconds
|
|
||||||
TIMEOUT = 60
|
|
||||||
|
|
||||||
###########
|
|
||||||
# Classes #
|
|
||||||
###########
|
|
||||||
|
|
||||||
class PathInfo:
|
|
||||||
def __init__(self, nodeID):
|
|
||||||
self.nodeID = nodeID # e.g. IP
|
|
||||||
self.coords = [] # Position in tree
|
|
||||||
self.tstamp = 0 # Timestamp from sender, to keep track of old vs new info
|
|
||||||
self.degree = 0 # Number of peers the sender has, used to break ties
|
|
||||||
# The above should be signed
|
|
||||||
self.path = [nodeID] # Path to node (in path-vector route)
|
|
||||||
self.time = 0 # Time info was updated, to keep track of e.g. timeouts
|
|
||||||
self.treeID = nodeID # Hack, let tree use different ID than IP, used so we can dijkstra once and test many roots
|
|
||||||
def clone(self):
|
|
||||||
# Return a deep-enough copy of the path
|
|
||||||
clone = PathInfo(None)
|
|
||||||
clone.nodeID = self.nodeID
|
|
||||||
clone.coords = self.coords[:]
|
|
||||||
clone.tstamp = self.tstamp
|
|
||||||
clone.degree = self.degree
|
|
||||||
clone.path = self.path[:]
|
|
||||||
clone.time = self.time
|
|
||||||
clone.treeID = self.treeID
|
|
||||||
return clone
|
|
||||||
# End class PathInfo
|
|
||||||
|
|
||||||
class Node:
|
|
||||||
def __init__(self, nodeID):
|
|
||||||
self.info = PathInfo(nodeID) # Self NodeInfo
|
|
||||||
self.root = None # PathInfo to node at root of tree
|
|
||||||
self.drop = dict() # PathInfo to nodes from clus that have timed out
|
|
||||||
self.peers = dict() # PathInfo to peers
|
|
||||||
self.links = dict() # Links to peers (to pass messages)
|
|
||||||
self.msgs = [] # Said messages
|
|
||||||
self.table = dict() # Pre-computed lookup table of peer info
|
|
||||||
|
|
||||||
def tick(self):
|
|
||||||
# Do periodic maintenance stuff, including push updates
|
|
||||||
self.info.time += 1
|
|
||||||
if self.info.time > self.info.tstamp + TIMEOUT/4:
|
|
||||||
# Update timestamp at least once every 1/4 timeout period
|
|
||||||
# This should probably be randomized in a real implementation
|
|
||||||
self.info.tstamp = self.info.time
|
|
||||||
self.info.degree = len(self.peers)
|
|
||||||
#self.info.degree = 0# TODO decide if degree should be used
|
|
||||||
changed = False # Used to track when the network has converged
|
|
||||||
changed |= self.cleanRoot()
|
|
||||||
self.cleanDropped()
|
|
||||||
# Should probably send messages infrequently if there's nothing new to report
|
|
||||||
if self.info.tstamp == self.info.time:
|
|
||||||
msg = self.createMessage()
|
|
||||||
self.sendMessage(msg)
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def cleanRoot(self):
|
|
||||||
changed = False
|
|
||||||
if self.root and self.info.time - self.root.time > TIMEOUT:
|
|
||||||
print "DEBUG: clean root,", self.root.path
|
|
||||||
self.drop[self.root.treeID] = self.root
|
|
||||||
self.root = None
|
|
||||||
changed = True
|
|
||||||
if not self.root or self.root.treeID < self.info.treeID:
|
|
||||||
# No need to drop someone who'se worse than us
|
|
||||||
self.info.coords = [self.info.nodeID]
|
|
||||||
self.root = self.info.clone()
|
|
||||||
changed = True
|
|
||||||
elif self.root.treeID == self.info.treeID:
|
|
||||||
self.root = self.info.clone()
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def cleanDropped(self):
|
|
||||||
# May actually be a treeID... better to iterate over keys explicitly
|
|
||||||
nodeIDs = sorted(self.drop.keys())
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
node = self.drop[nodeID]
|
|
||||||
if self.info.time - node.time > 4*TIMEOUT:
|
|
||||||
del self.drop[nodeID]
|
|
||||||
return None
|
|
||||||
|
|
||||||
def createMessage(self):
|
|
||||||
# Message is just a tuple
|
|
||||||
# First element is the sender
|
|
||||||
# Second element is the root
|
|
||||||
# We will .clone() everything during the send operation
|
|
||||||
msg = (self.info, self.root)
|
|
||||||
return msg
|
|
||||||
|
|
||||||
def sendMessage(self, msg):
|
|
||||||
for link in self.links.values():
|
|
||||||
newMsg = (msg[0].clone(), msg[1].clone())
|
|
||||||
link.msgs.append(newMsg)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def handleMessages(self):
|
|
||||||
changed = False
|
|
||||||
while self.msgs:
|
|
||||||
changed |= self.handleMessage(self.msgs.pop())
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def handleMessage(self, msg):
|
|
||||||
changed = False
|
|
||||||
for node in msg:
|
|
||||||
# Update the path and timestamp for the sender and root info
|
|
||||||
node.path.append(self.info.nodeID)
|
|
||||||
node.time = self.info.time
|
|
||||||
# Update the sender's info in our list of peers
|
|
||||||
sender = msg[0]
|
|
||||||
self.peers[sender.nodeID] = sender
|
|
||||||
# Decide if we want to update the root
|
|
||||||
root = msg[1]
|
|
||||||
updateRoot = False
|
|
||||||
isSameParent = False
|
|
||||||
isBetterParent = False
|
|
||||||
if len(self.root.path) > 1 and len(root.path) > 1:
|
|
||||||
parent = self.peers[self.root.path[-2]]
|
|
||||||
if parent.nodeID == sender.nodeID: isSameParent = True
|
|
||||||
if sender.degree > parent.degree:
|
|
||||||
# This would also be where you check path uptime/reliability/whatever
|
|
||||||
# All else being equal, we prefer parents with high degree
|
|
||||||
# We are trusting peers to report degree correctly in this case
|
|
||||||
# So expect some performance reduction if your peers aren't trustworthy
|
|
||||||
# (Lies can increase average stretch by a few %)
|
|
||||||
isBetterParent = True
|
|
||||||
if self.info.nodeID in root.path[:-1]: pass # No loopy routes allowed
|
|
||||||
elif root.treeID in self.drop and self.drop[root.treeID].tstamp >= root.tstamp: pass
|
|
||||||
elif not self.root: updateRoot = True
|
|
||||||
elif self.root.treeID < root.treeID: updateRoot = True
|
|
||||||
elif self.root.treeID != root.treeID: pass
|
|
||||||
elif self.root.tstamp > root.tstamp: pass
|
|
||||||
elif len(root.path) < len(self.root.path): updateRoot = True
|
|
||||||
elif isBetterParent and len(root.path) == len(self.root.path): updateRoot = True
|
|
||||||
elif isSameParent and self.root.tstamp < root.tstamp: updateRoot = True
|
|
||||||
if updateRoot:
|
|
||||||
if not self.root or self.root.path != root.path: changed = True
|
|
||||||
self.root = root
|
|
||||||
self.info.coords = self.root.path
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def lookup(self, dest):
|
|
||||||
# Note: Can loop in an unconverged network
|
|
||||||
# The person looking up the route is responsible for checking for loops
|
|
||||||
best = None
|
|
||||||
bestDist = 0
|
|
||||||
bestDeg = 0
|
|
||||||
for node in self.peers.itervalues():
|
|
||||||
# dist = distance to node + dist (on tree) from node to dest
|
|
||||||
dist = len(node.path)-1 + treeDist(node.coords, dest.coords)
|
|
||||||
deg = node.degree
|
|
||||||
if not best or dist < bestDist or (best == bestDist and deg > bestDeg):
|
|
||||||
best = node
|
|
||||||
bestDist = dist
|
|
||||||
bestDeg = deg
|
|
||||||
if best:
|
|
||||||
next = best.path[-2]
|
|
||||||
assert next in self.peers
|
|
||||||
return next
|
|
||||||
else:
|
|
||||||
# We failed to look something up
|
|
||||||
# TODO some way to signal this which doesn't crash
|
|
||||||
assert False
|
|
||||||
|
|
||||||
def initTable(self):
|
|
||||||
# Pre-computes a lookup table for destination coords
|
|
||||||
# Insert parent first so you prefer them as a next-hop
|
|
||||||
self.table.clear()
|
|
||||||
parent = self.info.nodeID
|
|
||||||
if len(self.info.coords) >= 2: parent = self.info.coords[-2]
|
|
||||||
for peer in self.peers.itervalues():
|
|
||||||
current = self.table
|
|
||||||
for coord in peer.coords:
|
|
||||||
if coord not in current: current[coord] = (peer.nodeID, dict())
|
|
||||||
old = current[coord]
|
|
||||||
next = old[1]
|
|
||||||
oldPeer = self.peers[old[0]]
|
|
||||||
oldDist = len(oldPeer.coords)
|
|
||||||
oldDeg = oldPeer.degree
|
|
||||||
newDist = len(peer.coords)
|
|
||||||
newDeg = peer.degree
|
|
||||||
# Prefer parent
|
|
||||||
# Else prefer short distance from root
|
|
||||||
# If equal distance, prefer high degree
|
|
||||||
if peer.nodeID == parent: current[coord] = (peer.nodeID, next)
|
|
||||||
elif newDist < oldDist: current[coord] = (peer.nodeID, next)
|
|
||||||
elif newDist == oldDist and newDeg > oldDeg: current[coord] = (peer.nodeID, next)
|
|
||||||
current = next
|
|
||||||
return None
|
|
||||||
|
|
||||||
def lookup_new(self, dest):
|
|
||||||
# Use pre-computed lookup table to look up next hop for dest coords
|
|
||||||
assert self.table
|
|
||||||
if len(self.info.coords) >= 2: parent = self.info.coords[-2]
|
|
||||||
else: parent = None
|
|
||||||
current = (parent, self.table)
|
|
||||||
c = None
|
|
||||||
for coord in dest.coords:
|
|
||||||
c = coord
|
|
||||||
if coord not in current[1]: break
|
|
||||||
current = current[1][coord]
|
|
||||||
next = current[0]
|
|
||||||
if c in self.peers: next = c
|
|
||||||
if next not in self.peers:
|
|
||||||
assert next == None
|
|
||||||
# You're the root of a different connected component
|
|
||||||
# You'd drop the packet in this case
|
|
||||||
# To make the path cache not die, need to return a valid next hop...
|
|
||||||
# Returning self for that reason
|
|
||||||
next = self.info.nodeID
|
|
||||||
return next
|
|
||||||
# End class Node
|
|
||||||
|
|
||||||
####################
|
|
||||||
# Helper Functions #
|
|
||||||
####################
|
|
||||||
|
|
||||||
def getIndexOfLCA(source, dest):
|
|
||||||
# Return index of last common ancestor in source/dest coords
|
|
||||||
# -1 if no common ancestor (e.g. different roots)
|
|
||||||
lcaIdx = -1
|
|
||||||
minLen = min(len(source), len(dest))
|
|
||||||
for idx in xrange(minLen):
|
|
||||||
if source[idx] == dest[idx]: lcaIdx = idx
|
|
||||||
else: break
|
|
||||||
return lcaIdx
|
|
||||||
|
|
||||||
def treePath(source, dest):
|
|
||||||
# Return path with source at head and dest at tail
|
|
||||||
lastMatch = getIndexOfLCA(source, dest)
|
|
||||||
path = dest[-1:lastMatch:-1] + source[lastMatch:]
|
|
||||||
assert path[0] == dest[-1]
|
|
||||||
assert path[-1] == source[-1]
|
|
||||||
return path
|
|
||||||
|
|
||||||
def treeDist(source, dest):
|
|
||||||
dist = len(source) + len(dest)
|
|
||||||
lcaIdx = getIndexOfLCA(source, dest)
|
|
||||||
dist -= 2*(lcaIdx+1)
|
|
||||||
return dist
|
|
||||||
|
|
||||||
def dijkstra(nodestore, startingNodeID):
|
|
||||||
# Idea to use heapq and basic implementation taken from stackexchange post
|
|
||||||
# http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python
|
|
||||||
nodeIDs = sorted(nodestore.keys())
|
|
||||||
nNodes = len(nodeIDs)
|
|
||||||
idxs = dict()
|
|
||||||
for nodeIdx in xrange(nNodes):
|
|
||||||
nodeID = nodeIDs[nodeIdx]
|
|
||||||
idxs[nodeID] = nodeIdx
|
|
||||||
dists = array.array("H", [0]*nNodes)
|
|
||||||
queue = [(0, startingNodeID)]
|
|
||||||
while queue:
|
|
||||||
dist, nodeID = heapq.heappop(queue)
|
|
||||||
idx = idxs[nodeID]
|
|
||||||
if not dists[idx]: # Unvisited, otherwise we skip it
|
|
||||||
dists[idx] = dist
|
|
||||||
for peer in nodestore[nodeID].links:
|
|
||||||
if not dists[idxs[peer]]:
|
|
||||||
# Peer is also unvisited, so add to queue
|
|
||||||
heapq.heappush(queue, (dist+LINK_COST, peer))
|
|
||||||
return dists
|
|
||||||
|
|
||||||
def dijkstrall(nodestore):
|
|
||||||
# Idea to use heapq and basic implementation taken from stackexchange post
|
|
||||||
# http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python
|
|
||||||
nodeIDs = sorted(nodestore.keys())
|
|
||||||
nNodes = len(nodeIDs)
|
|
||||||
idxs = dict()
|
|
||||||
for nodeIdx in xrange(nNodes):
|
|
||||||
nodeID = nodeIDs[nodeIdx]
|
|
||||||
idxs[nodeID] = nodeIdx
|
|
||||||
dists = array.array("H", [0]*nNodes*nNodes) # use GetCacheIndex(nNodes, start, end)
|
|
||||||
for sourceIdx in xrange(nNodes):
|
|
||||||
print "Finding shortest paths for node {} / {} ({})".format(sourceIdx+1, nNodes, nodeIDs[sourceIdx])
|
|
||||||
queue = [(0, sourceIdx)]
|
|
||||||
while queue:
|
|
||||||
dist, nodeIdx = heapq.heappop(queue)
|
|
||||||
distIdx = getCacheIndex(nNodes, sourceIdx, nodeIdx)
|
|
||||||
if not dists[distIdx]: # Unvisited, otherwise we skip it
|
|
||||||
dists[distIdx] = dist
|
|
||||||
for peer in nodestore[nodeIDs[nodeIdx]].links:
|
|
||||||
pIdx = idxs[peer]
|
|
||||||
pdIdx = getCacheIndex(nNodes, sourceIdx, pIdx)
|
|
||||||
if not dists[pdIdx]:
|
|
||||||
# Peer is also unvisited, so add to queue
|
|
||||||
heapq.heappush(queue, (dist+LINK_COST, pIdx))
|
|
||||||
return dists
|
|
||||||
|
|
||||||
def linkNodes(node1, node2):
|
|
||||||
node1.links[node2.info.nodeID] = node2
|
|
||||||
node2.links[node1.info.nodeID] = node1
|
|
||||||
|
|
||||||
############################
|
|
||||||
# Store topology functions #
|
|
||||||
############################
|
|
||||||
|
|
||||||
def makeStoreSquareGrid(sideLength, randomize=True):
|
|
||||||
# Simple grid in a sideLength*sideLength square
|
|
||||||
# Just used to validate that the code runs
|
|
||||||
store = dict()
|
|
||||||
nodeIDs = list(range(sideLength*sideLength))
|
|
||||||
if randomize: random.shuffle(nodeIDs)
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
store[nodeID] = Node(nodeID)
|
|
||||||
for index in xrange(len(nodeIDs)):
|
|
||||||
if (index % sideLength != 0): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-1]])
|
|
||||||
if (index >= sideLength): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-sideLength]])
|
|
||||||
print "Grid store created, size {}".format(len(store))
|
|
||||||
return store
|
|
||||||
|
|
||||||
def makeStoreASRelGraph(pathToGraph):
|
|
||||||
#Existing network graphs, in caida.org's asrel format (ASx|ASy|z per line, z denotes relationship type)
|
|
||||||
with open(pathToGraph, "r") as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
store = dict()
|
|
||||||
for line in inData:
|
|
||||||
if line.strip()[0] == "#": continue # Skip comment lines
|
|
||||||
line = line.replace('|'," ")
|
|
||||||
nodes = map(int, line.split()[0:2])
|
|
||||||
if nodes[0] not in store: store[nodes[0]] = Node(nodes[0])
|
|
||||||
if nodes[1] not in store: store[nodes[1]] = Node(nodes[1])
|
|
||||||
linkNodes(store[nodes[0]], store[nodes[1]])
|
|
||||||
print "CAIDA AS-relation graph successfully imported, size {}".format(len(store))
|
|
||||||
return store
|
|
||||||
|
|
||||||
def makeStoreASRelGraphMaxDeg(pathToGraph, degIdx=0):
|
|
||||||
with open(pathToGraph, "r") as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
store = dict()
|
|
||||||
nodeDeg = dict()
|
|
||||||
for line in inData:
|
|
||||||
if line.strip()[0] == "#": continue # Skip comment lines
|
|
||||||
line = line.replace('|'," ")
|
|
||||||
nodes = map(int, line.split()[0:2])
|
|
||||||
if nodes[0] not in nodeDeg: nodeDeg[nodes[0]] = 0
|
|
||||||
if nodes[1] not in nodeDeg: nodeDeg[nodes[1]] = 0
|
|
||||||
nodeDeg[nodes[0]] += 1
|
|
||||||
nodeDeg[nodes[1]] += 1
|
|
||||||
sortedNodes = sorted(nodeDeg.keys(), \
|
|
||||||
key=lambda x: (nodeDeg[x], x), \
|
|
||||||
reverse=True)
|
|
||||||
maxDegNodeID = sortedNodes[degIdx]
|
|
||||||
return makeStoreASRelGraphFixedRoot(pathToGraph, maxDegNodeID)
|
|
||||||
|
|
||||||
def makeStoreASRelGraphFixedRoot(pathToGraph, rootNodeID):
|
|
||||||
with open(pathToGraph, "r") as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
store = dict()
|
|
||||||
for line in inData:
|
|
||||||
if line.strip()[0] == "#": continue # Skip comment lines
|
|
||||||
line = line.replace('|'," ")
|
|
||||||
nodes = map(int, line.split()[0:2])
|
|
||||||
if nodes[0] not in store:
|
|
||||||
store[nodes[0]] = Node(nodes[0])
|
|
||||||
if nodes[0] == rootNodeID: store[nodes[0]].info.treeID += 1000000000
|
|
||||||
if nodes[1] not in store:
|
|
||||||
store[nodes[1]] = Node(nodes[1])
|
|
||||||
if nodes[1] == rootNodeID: store[nodes[1]].info.treeID += 1000000000
|
|
||||||
linkNodes(store[nodes[0]], store[nodes[1]])
|
|
||||||
print "CAIDA AS-relation graph successfully imported, size {}".format(len(store))
|
|
||||||
return store
|
|
||||||
|
|
||||||
def makeStoreDimesEdges(pathToGraph, rootNodeID=None):
|
|
||||||
# Read from a DIMES csv-formatted graph from a gzip file
|
|
||||||
store = dict()
|
|
||||||
with gzip.open(pathToGraph, "r") as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
size = len(inData)
|
|
||||||
index = 0
|
|
||||||
for edge in inData:
|
|
||||||
if not index % 1000:
|
|
||||||
pct = 100.0*index/size
|
|
||||||
print "Processing edge {}, {:.2f}%".format(index, pct)
|
|
||||||
index += 1
|
|
||||||
dat = edge.rstrip().split(',')
|
|
||||||
node1 = "N" + str(dat[0].strip())
|
|
||||||
node2 = "N" + str(dat[1].strip())
|
|
||||||
if '?' in node1 or '?' in node2: continue #Unknown node
|
|
||||||
if node1 == rootNodeID: node1 = "R" + str(dat[0].strip())
|
|
||||||
if node2 == rootNodeID: node2 = "R" + str(dat[1].strip())
|
|
||||||
if node1 not in store: store[node1] = Node(node1)
|
|
||||||
if node2 not in store: store[node2] = Node(node2)
|
|
||||||
if node1 != node2: linkNodes(store[node1], store[node2])
|
|
||||||
print "DIMES graph successfully imported, size {}".format(len(store))
|
|
||||||
return store
|
|
||||||
|
|
||||||
def makeStoreGeneratedGraph(pathToGraph, root=None):
|
|
||||||
with open(pathToGraph, "r") as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
store = dict()
|
|
||||||
for line in inData:
|
|
||||||
if line.strip()[0] == "#": continue # Skip comment lines
|
|
||||||
nodes = map(int, line.strip().split(' ')[0:2])
|
|
||||||
node1 = nodes[0]
|
|
||||||
node2 = nodes[1]
|
|
||||||
if node1 == root: node1 += 1000000
|
|
||||||
if node2 == root: node2 += 1000000
|
|
||||||
if node1 not in store: store[node1] = Node(node1)
|
|
||||||
if node2 not in store: store[node2] = Node(node2)
|
|
||||||
linkNodes(store[node1], store[node2])
|
|
||||||
print "Generated graph successfully imported, size {}".format(len(store))
|
|
||||||
return store
|
|
||||||
|
|
||||||
|
|
||||||
############################################
|
|
||||||
# Functions used as parts of network tests #
|
|
||||||
############################################
|
|
||||||
|
|
||||||
def idleUntilConverged(store):
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
timeOfLastChange = 0
|
|
||||||
step = 0
|
|
||||||
# Idle until the network has converged
|
|
||||||
while step - timeOfLastChange < 4*TIMEOUT:
|
|
||||||
step += 1
|
|
||||||
print "Step: {}, last change: {}".format(step, timeOfLastChange)
|
|
||||||
changed = False
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
# Update node status, send messages
|
|
||||||
changed |= store[nodeID].tick()
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
# Process messages
|
|
||||||
changed |= store[nodeID].handleMessages()
|
|
||||||
if changed: timeOfLastChange = step
|
|
||||||
initTables(store)
|
|
||||||
return store
|
|
||||||
|
|
||||||
def getCacheIndex(nodes, sourceIndex, destIndex):
|
|
||||||
return sourceIndex*nodes + destIndex
|
|
||||||
|
|
||||||
def initTables(store):
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
nNodes = len(nodeIDs)
|
|
||||||
print "Initializing routing tables for {} nodes".format(nNodes)
|
|
||||||
for idx in xrange(nNodes):
|
|
||||||
nodeID = nodeIDs[idx]
|
|
||||||
store[nodeID].initTable()
|
|
||||||
print "Routing tables initialized"
|
|
||||||
return None
|
|
||||||
|
|
||||||
def getCache(store):
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
nNodes = len(nodeIDs)
|
|
||||||
nodeIdxs = dict()
|
|
||||||
for nodeIdx in xrange(nNodes):
|
|
||||||
nodeIdxs[nodeIDs[nodeIdx]] = nodeIdx
|
|
||||||
cache = array.array("H", [0]*nNodes*nNodes)
|
|
||||||
for sourceIdx in xrange(nNodes):
|
|
||||||
sourceID = nodeIDs[sourceIdx]
|
|
||||||
print "Building fast lookup table for node {} / {} ({})".format(sourceIdx+1, nNodes, sourceID)
|
|
||||||
for destIdx in xrange(nNodes):
|
|
||||||
destID = nodeIDs[destIdx]
|
|
||||||
if sourceID == destID: nextHop = destID # lookup would fail
|
|
||||||
else: nextHop = store[sourceID].lookup(store[destID].info)
|
|
||||||
nextHopIdx = nodeIdxs[nextHop]
|
|
||||||
cache[getCacheIndex(nNodes, sourceIdx, destIdx)] = nextHopIdx
|
|
||||||
return cache
|
|
||||||
|
|
||||||
def testPaths(store, dists):
|
|
||||||
cache = getCache(store)
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
nNodes = len(nodeIDs)
|
|
||||||
idxs = dict()
|
|
||||||
for nodeIdx in xrange(nNodes):
|
|
||||||
nodeID = nodeIDs[nodeIdx]
|
|
||||||
idxs[nodeID] = nodeIdx
|
|
||||||
results = dict()
|
|
||||||
for sourceIdx in xrange(nNodes):
|
|
||||||
sourceID = nodeIDs[sourceIdx]
|
|
||||||
print "Testing paths from node {} / {} ({})".format(sourceIdx+1, len(nodeIDs), sourceID)
|
|
||||||
#dists = dijkstra(store, sourceID)
|
|
||||||
for destIdx in xrange(nNodes):
|
|
||||||
destID = nodeIDs[destIdx]
|
|
||||||
if destID == sourceID: continue # Skip self
|
|
||||||
distIdx = getCacheIndex(nNodes, sourceIdx, destIdx)
|
|
||||||
eHops = dists[distIdx]
|
|
||||||
if not eHops: continue # The network is split, no path exists
|
|
||||||
hops = 0
|
|
||||||
for pair in ((sourceIdx, destIdx), (destIdx, sourceIdx)): # Either direction because source routing
|
|
||||||
nHops = 0
|
|
||||||
locIdx = pair[0]
|
|
||||||
dIdx = pair[1]
|
|
||||||
while locIdx != dIdx:
|
|
||||||
locIdx = cache[getCacheIndex(nNodes, locIdx, dIdx)]
|
|
||||||
nHops += 1
|
|
||||||
if not hops or nHops < hops: hops = nHops
|
|
||||||
if eHops not in results: results[eHops] = dict()
|
|
||||||
if hops not in results[eHops]: results[eHops][hops] = 0
|
|
||||||
results[eHops][hops] += 1
|
|
||||||
return results
|
|
||||||
|
|
||||||
def getAvgStretch(pathMatrix):
|
|
||||||
avgStretch = 0.
|
|
||||||
checked = 0.
|
|
||||||
for eHops in sorted(pathMatrix.keys()):
|
|
||||||
for nHops in sorted(pathMatrix[eHops].keys()):
|
|
||||||
count = pathMatrix[eHops][nHops]
|
|
||||||
stretch = float(nHops)/float(max(1, eHops))
|
|
||||||
avgStretch += stretch*count
|
|
||||||
checked += count
|
|
||||||
avgStretch /= max(1, checked)
|
|
||||||
return avgStretch
|
|
||||||
|
|
||||||
def getMaxStretch(pathMatrix):
|
|
||||||
maxStretch = 0.
|
|
||||||
for eHops in sorted(pathMatrix.keys()):
|
|
||||||
for nHops in sorted(pathMatrix[eHops].keys()):
|
|
||||||
stretch = float(nHops)/float(max(1, eHops))
|
|
||||||
maxStretch = max(maxStretch, stretch)
|
|
||||||
return maxStretch
|
|
||||||
|
|
||||||
def getCertSizes(store):
|
|
||||||
# Returns nCerts frequency distribution
|
|
||||||
# De-duplicates common certs (for shared prefixes in the path)
|
|
||||||
sizes = dict()
|
|
||||||
for node in store.values():
|
|
||||||
certs = set()
|
|
||||||
for peer in node.peers.values():
|
|
||||||
pCerts = set()
|
|
||||||
assert len(peer.path) == 2
|
|
||||||
assert peer.coords[-1] == peer.path[0]
|
|
||||||
hops = peer.coords + peer.path[1:]
|
|
||||||
for hopIdx in xrange(len(hops)-1):
|
|
||||||
send = hops[hopIdx]
|
|
||||||
if send == node.info.nodeID: continue # We created it, already have it
|
|
||||||
path = hops[0:hopIdx+2]
|
|
||||||
# Each cert is signed by the sender
|
|
||||||
# Includes information about the path from the sender to the next hop
|
|
||||||
# Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2]
|
|
||||||
cert = "{}:{}".format(send, path)
|
|
||||||
certs.add(cert)
|
|
||||||
size = len(certs)
|
|
||||||
if size not in sizes: sizes[size] = 0
|
|
||||||
sizes[size] += 1
|
|
||||||
return sizes
|
|
||||||
|
|
||||||
def getMinLinkCertSizes(store):
|
|
||||||
# Returns nCerts frequency distribution
|
|
||||||
# De-duplicates common certs (for shared prefixes in the path)
|
|
||||||
# Based on the minimum number of certs that must be traded through a particular link
|
|
||||||
# Handled per link
|
|
||||||
sizes = dict()
|
|
||||||
for node in store.values():
|
|
||||||
peerCerts = dict()
|
|
||||||
for peer in node.peers.values():
|
|
||||||
pCerts = set()
|
|
||||||
assert len(peer.path) == 2
|
|
||||||
assert peer.coords[-1] == peer.path[0]
|
|
||||||
hops = peer.coords + peer.path[1:]
|
|
||||||
for hopIdx in xrange(len(hops)-1):
|
|
||||||
send = hops[hopIdx]
|
|
||||||
if send == node.info.nodeID: continue # We created it, already have it
|
|
||||||
path = hops[0:hopIdx+2]
|
|
||||||
# Each cert is signed by the sender
|
|
||||||
# Includes information about the path from the sender to the next hop
|
|
||||||
# Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2]
|
|
||||||
cert = "{}:{}".format(send, path)
|
|
||||||
pCerts.add(cert)
|
|
||||||
peerCerts[peer.nodeID] = pCerts
|
|
||||||
for peer in peerCerts:
|
|
||||||
size = 0
|
|
||||||
pCerts = peerCerts[peer]
|
|
||||||
for cert in pCerts:
|
|
||||||
required = True
|
|
||||||
for p2 in peerCerts:
|
|
||||||
if p2 == peer: continue
|
|
||||||
p2Certs = peerCerts[p2]
|
|
||||||
if cert in p2Certs: required = False
|
|
||||||
if required: size += 1
|
|
||||||
if size not in sizes: sizes[size] = 0
|
|
||||||
sizes[size] += 1
|
|
||||||
return sizes
|
|
||||||
|
|
||||||
def getPathSizes(store):
|
|
||||||
# Returns frequency distribution of the total number of hops the routing table
|
|
||||||
# I.e. a node with 3 peers, each with 5 hop coord+path, would count as 3x5=15
|
|
||||||
sizes = dict()
|
|
||||||
for node in store.values():
|
|
||||||
size = 0
|
|
||||||
for peer in node.peers.values():
|
|
||||||
assert len(peer.path) == 2
|
|
||||||
assert peer.coords[-1] == peer.path[0]
|
|
||||||
peerSize = len(peer.coords) + len(peer.path) - 1 # double-counts peer, -1
|
|
||||||
size += peerSize
|
|
||||||
if size not in sizes: sizes[size] = 0
|
|
||||||
sizes[size] += 1
|
|
||||||
return sizes
|
|
||||||
|
|
||||||
def getPeerSizes(store):
|
|
||||||
# Returns frequency distribution of the number of peers each node has
|
|
||||||
sizes = dict()
|
|
||||||
for node in store.values():
|
|
||||||
nPeers = len(node.peers)
|
|
||||||
if nPeers not in sizes: sizes[nPeers] = 0
|
|
||||||
sizes[nPeers] += 1
|
|
||||||
return sizes
|
|
||||||
|
|
||||||
def getAvgSize(sizes):
|
|
||||||
sumSizes = 0
|
|
||||||
nNodes = 0
|
|
||||||
for size in sizes:
|
|
||||||
count = sizes[size]
|
|
||||||
sumSizes += size*count
|
|
||||||
nNodes += count
|
|
||||||
avgSize = float(sumSizes)/max(1, nNodes)
|
|
||||||
return avgSize
|
|
||||||
|
|
||||||
def getMaxSize(sizes):
|
|
||||||
return max(sizes.keys())
|
|
||||||
|
|
||||||
def getMinSize(sizes):
|
|
||||||
return min(sizes.keys())
|
|
||||||
|
|
||||||
def getResults(pathMatrix):
|
|
||||||
results = []
|
|
||||||
for eHops in sorted(pathMatrix.keys()):
|
|
||||||
for nHops in sorted(pathMatrix[eHops].keys()):
|
|
||||||
count = pathMatrix[eHops][nHops]
|
|
||||||
results.append("{} {} {}".format(eHops, nHops, count))
|
|
||||||
return '\n'.join(results)
|
|
||||||
|
|
||||||
####################################
|
|
||||||
# Functions to run different tests #
|
|
||||||
####################################
|
|
||||||
|
|
||||||
def runTest(store):
|
|
||||||
# Runs the usual set of tests on the store
|
|
||||||
# Does not save results, so only meant for quick tests
|
|
||||||
# To e.g. check the code works, maybe warm up the pypy jit
|
|
||||||
for node in store.values():
|
|
||||||
node.info.time = random.randint(0, TIMEOUT)
|
|
||||||
node.info.tstamp = TIMEOUT
|
|
||||||
print "Begin testing network"
|
|
||||||
dists = None
|
|
||||||
if not dists: dists = dijkstrall(store)
|
|
||||||
idleUntilConverged(store)
|
|
||||||
pathMatrix = testPaths(store, dists)
|
|
||||||
avgStretch = getAvgStretch(pathMatrix)
|
|
||||||
maxStretch = getMaxStretch(pathMatrix)
|
|
||||||
peers = getPeerSizes(store)
|
|
||||||
certs = getCertSizes(store)
|
|
||||||
paths = getPathSizes(store)
|
|
||||||
linkCerts = getMinLinkCertSizes(store)
|
|
||||||
avgPeerSize = getAvgSize(peers)
|
|
||||||
maxPeerSize = getMaxSize(peers)
|
|
||||||
avgCertSize = getAvgSize(certs)
|
|
||||||
maxCertSize = getMaxSize(certs)
|
|
||||||
avgPathSize = getAvgSize(paths)
|
|
||||||
maxPathSize = getMaxSize(paths)
|
|
||||||
avgLinkCert = getAvgSize(linkCerts)
|
|
||||||
maxLinkCert = getMaxSize(linkCerts)
|
|
||||||
totalCerts = sum(map(lambda x: x*certs[x], certs.keys()))
|
|
||||||
totalLinks = sum(map(lambda x: x*peers[x], peers.keys())) # one-way links
|
|
||||||
avgCertsPerLink = float(totalCerts)/max(1, totalLinks)
|
|
||||||
print "Finished testing network"
|
|
||||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
|
||||||
print "Avg / Max nPeers size: {} / {}".format(avgPeerSize, maxPeerSize)
|
|
||||||
print "Avg / Max nCerts size: {} / {}".format(avgCertSize, maxCertSize)
|
|
||||||
print "Avg / Max total hops in any node's routing table: {} / {}".format(avgPathSize, maxPathSize)
|
|
||||||
print "Avg / Max lower bound cert requests per link (one-way): {} / {}".format(avgLinkCert, maxLinkCert)
|
|
||||||
print "Avg certs per link (one-way): {}".format(avgCertsPerLink)
|
|
||||||
return # End of function
|
|
||||||
|
|
||||||
def rootNodeASTest(path, outDir="output-treesim-AS", dists=None, proc = 1):
|
|
||||||
# Checks performance for every possible choice of root node
|
|
||||||
# Saves output for each root node to a separate file on disk
|
|
||||||
# path = input path to some caida.org formatted AS-relationship graph
|
|
||||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
|
||||||
assert os.path.exists(outDir)
|
|
||||||
store = makeStoreASRelGraph(path)
|
|
||||||
nodes = sorted(store.keys())
|
|
||||||
for nodeIdx in xrange(len(nodes)):
|
|
||||||
if nodeIdx % proc != 0: continue # Work belongs to someone else
|
|
||||||
rootNodeID = nodes[nodeIdx]
|
|
||||||
outpath = outDir+"/{}".format(rootNodeID)
|
|
||||||
if os.path.exists(outpath):
|
|
||||||
print "Skipping {}, already processed".format(rootNodeID)
|
|
||||||
continue
|
|
||||||
store = makeStoreASRelGraphFixedRoot(path, rootNodeID)
|
|
||||||
for node in store.values():
|
|
||||||
node.info.time = random.randint(0, TIMEOUT)
|
|
||||||
node.info.tstamp = TIMEOUT
|
|
||||||
print "Beginning {}, size {}".format(nodeIdx, len(store))
|
|
||||||
if not dists: dists = dijkstrall(store)
|
|
||||||
idleUntilConverged(store)
|
|
||||||
pathMatrix = testPaths(store, dists)
|
|
||||||
avgStretch = getAvgStretch(pathMatrix)
|
|
||||||
maxStretch = getMaxStretch(pathMatrix)
|
|
||||||
results = getResults(pathMatrix)
|
|
||||||
with open(outpath, "w") as f:
|
|
||||||
f.write(results)
|
|
||||||
print "Finished test for root AS {} ({} / {})".format(rootNodeID, nodeIdx+1, len(store))
|
|
||||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
|
||||||
#break # Stop after 1, because they can take forever
|
|
||||||
return # End of function
|
|
||||||
|
|
||||||
def timelineASTest():
|
|
||||||
# Meant to study the performance of the network as a function of network size
|
|
||||||
# Loops over a set of AS-relationship graphs
|
|
||||||
# Runs a test on each graph, selecting highest-degree node as the root
|
|
||||||
# Saves results for each graph to a separate file on disk
|
|
||||||
outDir = "output-treesim-timeline-AS"
|
|
||||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
|
||||||
assert os.path.exists(outDir)
|
|
||||||
paths = sorted(glob.glob("asrel/datasets/*"))
|
|
||||||
for path in paths:
|
|
||||||
date = os.path.basename(path).split(".")[0]
|
|
||||||
outpath = outDir+"/{}".format(date)
|
|
||||||
if os.path.exists(outpath):
|
|
||||||
print "Skipping {}, already processed".format(date)
|
|
||||||
continue
|
|
||||||
store = makeStoreASRelGraphMaxDeg(path)
|
|
||||||
dists = None
|
|
||||||
for node in store.values():
|
|
||||||
node.info.time = random.randint(0, TIMEOUT)
|
|
||||||
node.info.tstamp = TIMEOUT
|
|
||||||
print "Beginning {}, size {}".format(date, len(store))
|
|
||||||
if not dists: dists = dijkstrall(store)
|
|
||||||
idleUntilConverged(store)
|
|
||||||
pathMatrix = testPaths(store, dists)
|
|
||||||
avgStretch = getAvgStretch(pathMatrix)
|
|
||||||
maxStretch = getMaxStretch(pathMatrix)
|
|
||||||
results = getResults(pathMatrix)
|
|
||||||
with open(outpath, "w") as f:
|
|
||||||
f.write(results)
|
|
||||||
print "Finished {} with {} nodes".format(date, len(store))
|
|
||||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
|
||||||
#break # Stop after 1, because they can take forever
|
|
||||||
return # End of function
|
|
||||||
|
|
||||||
def timelineDimesTest():
|
|
||||||
# Meant to study the performance of the network as a function of network size
|
|
||||||
# Loops over a set of AS-relationship graphs
|
|
||||||
# Runs a test on each graph, selecting highest-degree node as the root
|
|
||||||
# Saves results for each graph to a separate file on disk
|
|
||||||
outDir = "output-treesim-timeline-dimes"
|
|
||||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
|
||||||
assert os.path.exists(outDir)
|
|
||||||
# Input files are named ASEdgesX_Y where X = month (no leading 0), Y = year
|
|
||||||
paths = sorted(glob.glob("DIMES/ASEdges/*.gz"))
|
|
||||||
exists = set(glob.glob(outDir+"/*"))
|
|
||||||
for path in paths:
|
|
||||||
date = os.path.basename(path).split(".")[0]
|
|
||||||
outpath = outDir+"/{}".format(date)
|
|
||||||
if outpath in exists:
|
|
||||||
print "Skipping {}, already processed".format(date)
|
|
||||||
continue
|
|
||||||
store = makeStoreDimesEdges(path)
|
|
||||||
# Get the highest degree node and make it root
|
|
||||||
# Sorted by nodeID just to make it stable in the event of a tie
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
bestRoot = ""
|
|
||||||
bestDeg = 0
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
node = store[nodeID]
|
|
||||||
if len(node.links) > bestDeg:
|
|
||||||
bestRoot = nodeID
|
|
||||||
bestDeg = len(node.links)
|
|
||||||
assert bestRoot
|
|
||||||
store = makeStoreDimesEdges(path, bestRoot)
|
|
||||||
rootID = "R" + bestRoot[1:]
|
|
||||||
assert rootID in store
|
|
||||||
# Don't forget to set random seed before setitng times
|
|
||||||
# To make results reproducible
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
random.seed(12345)
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
node = store[nodeID]
|
|
||||||
node.info.time = random.randint(0, TIMEOUT)
|
|
||||||
node.info.tstamp = TIMEOUT
|
|
||||||
print "Beginning {}, size {}".format(date, len(store))
|
|
||||||
if not dists: dists = dijkstrall(store)
|
|
||||||
idleUntilConverged(store)
|
|
||||||
pathMatrix = testPaths(store, dists)
|
|
||||||
avgStretch = getAvgStretch(pathMatrix)
|
|
||||||
maxStretch = getMaxStretch(pathMatrix)
|
|
||||||
results = getResults(pathMatrix)
|
|
||||||
with open(outpath, "w") as f:
|
|
||||||
f.write(results)
|
|
||||||
print "Finished {} with {} nodes".format(date, len(store))
|
|
||||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
|
||||||
break # Stop after 1, because they can take forever
|
|
||||||
return # End of function
|
|
||||||
|
|
||||||
def scalingTest(maxTests=None, inputDir="graphs"):
|
|
||||||
# Meant to study the performance of the network as a function of network size
|
|
||||||
# Loops over a set of nodes in a previously generated graph
|
|
||||||
# Runs a test on each graph, testing each node as the root
|
|
||||||
# if maxTests is set, tests only that number of roots (highest degree first)
|
|
||||||
# Saves results for each graph to a separate file on disk
|
|
||||||
outDir = "output-treesim-{}".format(inputDir)
|
|
||||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
|
||||||
assert os.path.exists(outDir)
|
|
||||||
paths = sorted(glob.glob("{}/*".format(inputDir)))
|
|
||||||
exists = set(glob.glob(outDir+"/*"))
|
|
||||||
for path in paths:
|
|
||||||
gc.collect() # pypy waits for gc to close files
|
|
||||||
graph = os.path.basename(path).split(".")[0]
|
|
||||||
store = makeStoreGeneratedGraph(path)
|
|
||||||
# Get the highest degree node and make it root
|
|
||||||
# Sorted by nodeID just to make it stable in the event of a tie
|
|
||||||
nodeIDs = sorted(store.keys(), key=lambda x: len(store[x].links), reverse=True)
|
|
||||||
dists = None
|
|
||||||
if maxTests: nodeIDs = nodeIDs[:maxTests]
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
nodeIDStr = str(nodeID).zfill(len(str(len(store)-1)))
|
|
||||||
outpath = outDir+"/{}-{}".format(graph, nodeIDStr)
|
|
||||||
if outpath in exists:
|
|
||||||
print "Skipping {}-{}, already processed".format(graph, nodeIDStr)
|
|
||||||
continue
|
|
||||||
store = makeStoreGeneratedGraph(path, nodeID)
|
|
||||||
# Don't forget to set random seed before setting times
|
|
||||||
random.seed(12345) # To make results reproducible
|
|
||||||
nIDs = sorted(store.keys())
|
|
||||||
for nID in nIDs:
|
|
||||||
node = store[nID]
|
|
||||||
node.info.time = random.randint(0, TIMEOUT)
|
|
||||||
node.info.tstamp = TIMEOUT
|
|
||||||
print "Beginning {}, size {}".format(graph, len(store))
|
|
||||||
if not dists: dists = dijkstrall(store)
|
|
||||||
idleUntilConverged(store)
|
|
||||||
pathMatrix = testPaths(store, dists)
|
|
||||||
avgStretch = getAvgStretch(pathMatrix)
|
|
||||||
maxStretch = getMaxStretch(pathMatrix)
|
|
||||||
results = getResults(pathMatrix)
|
|
||||||
with open(outpath, "w") as f:
|
|
||||||
f.write(results)
|
|
||||||
print "Finished {} with {} nodes for root {}".format(graph, len(store), nodeID)
|
|
||||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
|
||||||
return # End of function
|
|
||||||
|
|
||||||
##################
|
|
||||||
# Main Execution #
|
|
||||||
##################
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
if True: # Run a quick test
|
|
||||||
random.seed(12345) # DEBUG
|
|
||||||
store = makeStoreSquareGrid(4)
|
|
||||||
runTest(store) # Quick test
|
|
||||||
store = None
|
|
||||||
# Do some real work
|
|
||||||
#runTest(makeStoreDimesEdges("DIMES/ASEdges/ASEdges1_2007.csv.gz"))
|
|
||||||
#timelineDimesTest()
|
|
||||||
#rootNodeASTest("asrel/datasets/19980101.as-rel.txt")
|
|
||||||
#timelineASTest()
|
|
||||||
#rootNodeASTest("hype-2016-09-19.list", "output-treesim-hype")
|
|
||||||
#scalingTest(None, "graphs-20") # First argument 1 to only test 1 root per graph
|
|
||||||
#store = makeStoreGeneratedGraph("bgp_tables")
|
|
||||||
#store = makeStoreGeneratedGraph("skitter")
|
|
||||||
#store = makeStoreASRelGraphMaxDeg("hype-2016-09-19.list") #http://hia.cjdns.ca/watchlist/c/walk.peers.20160919
|
|
||||||
#store = makeStoreGeneratedGraph("fc00-2017-08-12.txt")
|
|
||||||
if store: runTest(store)
|
|
||||||
#rootNodeASTest("skitter", "output-treesim-skitter", None, 0, 1)
|
|
||||||
#scalingTest(1, "graphs-20") # First argument 1 to only test 1 root per graph
|
|
||||||
#scalingTest(1, "graphs-21") # First argument 1 to only test 1 root per graph
|
|
||||||
#scalingTest(1, "graphs-22") # First argument 1 to only test 1 root per graph
|
|
||||||
#scalingTest(1, "graphs-23") # First argument 1 to only test 1 root per graph
|
|
||||||
if not store:
|
|
||||||
import sys
|
|
||||||
args = sys.argv
|
|
||||||
if len(args) == 2:
|
|
||||||
job_number = int(sys.argv[1])
|
|
||||||
#rootNodeASTest("fc00-2017-08-12.txt", "fc00", None, job_number)
|
|
||||||
#rootNodeASTest("skitter", "out-skitter", None, job_number)
|
|
||||||
rootNodeASTest("walk-1517414401.txt.map", "out-walk", None, job_number)
|
|
||||||
else:
|
|
||||||
print "Usage: {} job_number".format(args[0])
|
|
||||||
print "job_number = which job set to run on this node (1-indexed)"
|
|
||||||
|
|
@ -1,907 +0,0 @@
|
|||||||
# Tree routing scheme (named Yggdrasil, after the world tree from Norse mythology)
|
|
||||||
# Steps:
|
|
||||||
# 1: Pick any node, here I'm using highest nodeID
|
|
||||||
# 2: Build spanning tree, each node stores path back to root
|
|
||||||
# Optionally with weights for each hop
|
|
||||||
# Ties broken by preferring a parent with higher degree
|
|
||||||
# 3: Distance metric: self->peer + (via tree) peer->dest
|
|
||||||
# 4: Perform (modified) greedy lookup via this metric for each direction (A->B and B->A)
|
|
||||||
# 5: Source-route traffic using the better of those two paths
|
|
||||||
|
|
||||||
# Note: This makes no attempt to simulate a dynamic network
|
|
||||||
# E.g. A node's peers cannot be disconnected
|
|
||||||
|
|
||||||
# TODO:
|
|
||||||
# Make better use of drop?
|
|
||||||
# In particular, we should be ignoring *all* recently dropped *paths* to the root
|
|
||||||
# To minimize route flapping
|
|
||||||
# Not really an issue in the sim, but probably needed for a real network
|
|
||||||
|
|
||||||
import array
|
|
||||||
import gc
|
|
||||||
import glob
|
|
||||||
import gzip
|
|
||||||
import heapq
|
|
||||||
import os
|
|
||||||
import random
|
|
||||||
import time
|
|
||||||
|
|
||||||
#############
|
|
||||||
# Constants #
|
|
||||||
#############
|
|
||||||
|
|
||||||
# Reminder of where link cost comes in
|
|
||||||
LINK_COST = 1
|
|
||||||
|
|
||||||
# Timeout before dropping something, in simulated seconds
|
|
||||||
TIMEOUT = 60
|
|
||||||
|
|
||||||
###########
|
|
||||||
# Classes #
|
|
||||||
###########
|
|
||||||
|
|
||||||
class PathInfo:
|
|
||||||
def __init__(self, nodeID):
|
|
||||||
self.nodeID = nodeID # e.g. IP
|
|
||||||
self.coords = [] # Position in tree
|
|
||||||
self.tstamp = 0 # Timestamp from sender, to keep track of old vs new info
|
|
||||||
self.degree = 0 # Number of peers the sender has, used to break ties
|
|
||||||
# The above should be signed
|
|
||||||
self.path = [nodeID] # Path to node (in path-vector route)
|
|
||||||
self.time = 0 # Time info was updated, to keep track of e.g. timeouts
|
|
||||||
self.treeID = nodeID # Hack, let tree use different ID than IP, used so we can dijkstra once and test many roots
|
|
||||||
def clone(self):
|
|
||||||
# Return a deep-enough copy of the path
|
|
||||||
clone = PathInfo(None)
|
|
||||||
clone.nodeID = self.nodeID
|
|
||||||
clone.coords = self.coords[:]
|
|
||||||
clone.tstamp = self.tstamp
|
|
||||||
clone.degree = self.degree
|
|
||||||
clone.path = self.path[:]
|
|
||||||
clone.time = self.time
|
|
||||||
clone.treeID = self.treeID
|
|
||||||
return clone
|
|
||||||
# End class PathInfo
|
|
||||||
|
|
||||||
class Node:
|
|
||||||
def __init__(self, nodeID):
|
|
||||||
self.info = PathInfo(nodeID) # Self NodeInfo
|
|
||||||
self.root = None # PathInfo to node at root of tree
|
|
||||||
self.drop = dict() # PathInfo to nodes from clus that have timed out
|
|
||||||
self.peers = dict() # PathInfo to peers
|
|
||||||
self.links = dict() # Links to peers (to pass messages)
|
|
||||||
self.msgs = [] # Said messages
|
|
||||||
self.table = dict() # Pre-computed lookup table of peer info
|
|
||||||
|
|
||||||
def tick(self):
|
|
||||||
# Do periodic maintenance stuff, including push updates
|
|
||||||
self.info.time += 1
|
|
||||||
if self.info.time > self.info.tstamp + TIMEOUT/4:
|
|
||||||
# Update timestamp at least once every 1/4 timeout period
|
|
||||||
# This should probably be randomized in a real implementation
|
|
||||||
self.info.tstamp = self.info.time
|
|
||||||
self.info.degree = len(self.peers)
|
|
||||||
self.info.degree = 0# TODO decide if degree should be used
|
|
||||||
changed = False # Used to track when the network has converged
|
|
||||||
changed |= self.cleanRoot()
|
|
||||||
self.cleanDropped()
|
|
||||||
# Should probably send messages infrequently if there's nothing new to report
|
|
||||||
if self.info.tstamp == self.info.time:
|
|
||||||
msg = self.createMessage()
|
|
||||||
self.sendMessage(msg)
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def cleanRoot(self):
|
|
||||||
changed = False
|
|
||||||
if self.root and self.info.time - self.root.time > TIMEOUT:
|
|
||||||
print "DEBUG: clean root,", self.root.path
|
|
||||||
self.drop[self.root.treeID] = self.root
|
|
||||||
self.root = None
|
|
||||||
changed = True
|
|
||||||
if not self.root or self.root.treeID < self.info.treeID:
|
|
||||||
# No need to drop someone who'se worse than us
|
|
||||||
self.info.coords = [self.info.nodeID]
|
|
||||||
self.root = self.info.clone()
|
|
||||||
changed = True
|
|
||||||
elif self.root.treeID == self.info.treeID:
|
|
||||||
self.root = self.info.clone()
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def cleanDropped(self):
|
|
||||||
# May actually be a treeID... better to iterate over keys explicitly
|
|
||||||
nodeIDs = sorted(self.drop.keys())
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
node = self.drop[nodeID]
|
|
||||||
if self.info.time - node.time > 4*TIMEOUT:
|
|
||||||
del self.drop[nodeID]
|
|
||||||
return None
|
|
||||||
|
|
||||||
def createMessage(self):
|
|
||||||
# Message is just a tuple
|
|
||||||
# First element is the sender
|
|
||||||
# Second element is the root
|
|
||||||
# We will .clone() everything during the send operation
|
|
||||||
msg = (self.info, self.root)
|
|
||||||
return msg
|
|
||||||
|
|
||||||
def sendMessage(self, msg):
|
|
||||||
for link in self.links.values():
|
|
||||||
newMsg = (msg[0].clone(), msg[1].clone())
|
|
||||||
link.msgs.append(newMsg)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def handleMessages(self):
|
|
||||||
changed = False
|
|
||||||
while self.msgs:
|
|
||||||
changed |= self.handleMessage(self.msgs.pop())
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def handleMessage(self, msg):
|
|
||||||
changed = False
|
|
||||||
for node in msg:
|
|
||||||
# Update the path and timestamp for the sender and root info
|
|
||||||
node.path.append(self.info.nodeID)
|
|
||||||
node.time = self.info.time
|
|
||||||
# Update the sender's info in our list of peers
|
|
||||||
sender = msg[0]
|
|
||||||
self.peers[sender.nodeID] = sender
|
|
||||||
# Decide if we want to update the root
|
|
||||||
root = msg[1]
|
|
||||||
updateRoot = False
|
|
||||||
isSameParent = False
|
|
||||||
isBetterParent = False
|
|
||||||
if len(self.root.path) > 1 and len(root.path) > 1:
|
|
||||||
parent = self.peers[self.root.path[-2]]
|
|
||||||
if parent.nodeID == sender.nodeID: isSameParent = True
|
|
||||||
if sender.degree > parent.degree:
|
|
||||||
# This would also be where you check path uptime/reliability/whatever
|
|
||||||
# All else being equal, we prefer parents with high degree
|
|
||||||
# We are trusting peers to report degree correctly in this case
|
|
||||||
# So expect some performance reduction if your peers aren't trustworthy
|
|
||||||
# (Lies can increase average stretch by a few %)
|
|
||||||
isBetterParent = True
|
|
||||||
if self.info.nodeID in root.path[:-1]: pass # No loopy routes allowed
|
|
||||||
elif root.treeID in self.drop and self.drop[root.treeID].tstamp >= root.tstamp: pass
|
|
||||||
elif not self.root: updateRoot = True
|
|
||||||
elif self.root.treeID < root.treeID: updateRoot = True
|
|
||||||
elif self.root.treeID != root.treeID: pass
|
|
||||||
elif self.root.tstamp > root.tstamp: pass
|
|
||||||
elif len(root.path) < len(self.root.path): updateRoot = True
|
|
||||||
elif isBetterParent and len(root.path) == len(self.root.path): updateRoot = True
|
|
||||||
elif isSameParent and self.root.tstamp < root.tstamp: updateRoot = True
|
|
||||||
if updateRoot:
|
|
||||||
if not self.root or self.root.path != root.path: changed = True
|
|
||||||
self.root = root
|
|
||||||
self.info.coords = self.root.path
|
|
||||||
return changed
|
|
||||||
|
|
||||||
def lookup(self, dest):
|
|
||||||
# Note: Can loop in an unconverged network
|
|
||||||
# The person looking up the route is responsible for checking for loops
|
|
||||||
best = None
|
|
||||||
bestDist = 0
|
|
||||||
bestDeg = 0
|
|
||||||
for node in self.peers.itervalues():
|
|
||||||
# dist = distance to node + dist (on tree) from node to dest
|
|
||||||
dist = len(node.path)-1 + treeDist(node.coords, dest.coords)
|
|
||||||
deg = node.degree
|
|
||||||
if not best or dist < bestDist or (best == bestDist and deg > bestDeg):
|
|
||||||
best = node
|
|
||||||
bestDist = dist
|
|
||||||
bestDeg = deg
|
|
||||||
if best:
|
|
||||||
next = best.path[-2]
|
|
||||||
assert next in self.peers
|
|
||||||
return next
|
|
||||||
else:
|
|
||||||
# We failed to look something up
|
|
||||||
# TODO some way to signal this which doesn't crash
|
|
||||||
assert False
|
|
||||||
|
|
||||||
def initTable(self):
|
|
||||||
# Pre-computes a lookup table for destination coords
|
|
||||||
# Insert parent first so you prefer them as a next-hop
|
|
||||||
self.table.clear()
|
|
||||||
parent = self.info.nodeID
|
|
||||||
if len(self.info.coords) >= 2: parent = self.info.coords[-2]
|
|
||||||
for peer in self.peers.itervalues():
|
|
||||||
current = self.table
|
|
||||||
for coord in peer.coords:
|
|
||||||
if coord not in current: current[coord] = (peer.nodeID, dict())
|
|
||||||
old = current[coord]
|
|
||||||
next = old[1]
|
|
||||||
oldPeer = self.peers[old[0]]
|
|
||||||
oldDist = len(oldPeer.coords)
|
|
||||||
oldDeg = oldPeer.degree
|
|
||||||
newDist = len(peer.coords)
|
|
||||||
newDeg = peer.degree
|
|
||||||
# Prefer parent
|
|
||||||
# Else prefer short distance from root
|
|
||||||
# If equal distance, prefer high degree
|
|
||||||
if peer.nodeID == parent: current[coord] = (peer.nodeID, next)
|
|
||||||
elif newDist < oldDist: current[coord] = (peer.nodeID, next)
|
|
||||||
elif newDist == oldDist and newDeg > oldDeg: current[coord] = (peer.nodeID, next)
|
|
||||||
current = next
|
|
||||||
return None
|
|
||||||
|
|
||||||
def lookup_new(self, dest):
|
|
||||||
# Use pre-computed lookup table to look up next hop for dest coords
|
|
||||||
assert self.table
|
|
||||||
if len(self.info.coords) >= 2: parent = self.info.coords[-2]
|
|
||||||
else: parent = None
|
|
||||||
current = (parent, self.table)
|
|
||||||
c = None
|
|
||||||
for coord in dest.coords:
|
|
||||||
c = coord
|
|
||||||
if coord not in current[1]: break
|
|
||||||
current = current[1][coord]
|
|
||||||
next = current[0]
|
|
||||||
if c in self.peers: next = c
|
|
||||||
if next not in self.peers:
|
|
||||||
assert next == None
|
|
||||||
# You're the root of a different connected component
|
|
||||||
# You'd drop the packet in this case
|
|
||||||
# To make the path cache not die, need to return a valid next hop...
|
|
||||||
# Returning self for that reason
|
|
||||||
next = self.info.nodeID
|
|
||||||
return next
|
|
||||||
# End class Node
|
|
||||||
|
|
||||||
####################
|
|
||||||
# Helper Functions #
|
|
||||||
####################
|
|
||||||
|
|
||||||
def getIndexOfLCA(source, dest):
|
|
||||||
# Return index of last common ancestor in source/dest coords
|
|
||||||
# -1 if no common ancestor (e.g. different roots)
|
|
||||||
lcaIdx = -1
|
|
||||||
minLen = min(len(source), len(dest))
|
|
||||||
for idx in xrange(minLen):
|
|
||||||
if source[idx] == dest[idx]: lcaIdx = idx
|
|
||||||
else: break
|
|
||||||
return lcaIdx
|
|
||||||
|
|
||||||
def treePath(source, dest):
|
|
||||||
# Return path with source at head and dest at tail
|
|
||||||
lastMatch = getIndexOfLCA(source, dest)
|
|
||||||
path = dest[-1:lastMatch:-1] + source[lastMatch:]
|
|
||||||
assert path[0] == dest[-1]
|
|
||||||
assert path[-1] == source[-1]
|
|
||||||
return path
|
|
||||||
|
|
||||||
def treeDist(source, dest):
|
|
||||||
dist = len(source) + len(dest)
|
|
||||||
lcaIdx = getIndexOfLCA(source, dest)
|
|
||||||
dist -= 2*(lcaIdx+1)
|
|
||||||
return dist
|
|
||||||
|
|
||||||
def dijkstra(nodestore, startingNodeID):
|
|
||||||
# Idea to use heapq and basic implementation taken from stackexchange post
|
|
||||||
# http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python
|
|
||||||
nodeIDs = sorted(nodestore.keys())
|
|
||||||
nNodes = len(nodeIDs)
|
|
||||||
idxs = dict()
|
|
||||||
for nodeIdx in xrange(nNodes):
|
|
||||||
nodeID = nodeIDs[nodeIdx]
|
|
||||||
idxs[nodeID] = nodeIdx
|
|
||||||
dists = array.array("H", [0]*nNodes)
|
|
||||||
queue = [(0, startingNodeID)]
|
|
||||||
while queue:
|
|
||||||
dist, nodeID = heapq.heappop(queue)
|
|
||||||
idx = idxs[nodeID]
|
|
||||||
if not dists[idx]: # Unvisited, otherwise we skip it
|
|
||||||
dists[idx] = dist
|
|
||||||
for peer in nodestore[nodeID].links:
|
|
||||||
if not dists[idxs[peer]]:
|
|
||||||
# Peer is also unvisited, so add to queue
|
|
||||||
heapq.heappush(queue, (dist+LINK_COST, peer))
|
|
||||||
return dists
|
|
||||||
|
|
||||||
def dijkstrall(nodestore):
|
|
||||||
# Idea to use heapq and basic implementation taken from stackexchange post
|
|
||||||
# http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python
|
|
||||||
nodeIDs = sorted(nodestore.keys())
|
|
||||||
nNodes = len(nodeIDs)
|
|
||||||
idxs = dict()
|
|
||||||
for nodeIdx in xrange(nNodes):
|
|
||||||
nodeID = nodeIDs[nodeIdx]
|
|
||||||
idxs[nodeID] = nodeIdx
|
|
||||||
dists = array.array("H", [0]*nNodes*nNodes) # use GetCacheIndex(nNodes, start, end)
|
|
||||||
for sourceIdx in xrange(nNodes):
|
|
||||||
print "Finding shortest paths for node {} / {} ({})".format(sourceIdx+1, nNodes, nodeIDs[sourceIdx])
|
|
||||||
queue = [(0, sourceIdx)]
|
|
||||||
while queue:
|
|
||||||
dist, nodeIdx = heapq.heappop(queue)
|
|
||||||
distIdx = getCacheIndex(nNodes, sourceIdx, nodeIdx)
|
|
||||||
if not dists[distIdx]: # Unvisited, otherwise we skip it
|
|
||||||
dists[distIdx] = dist
|
|
||||||
for peer in nodestore[nodeIDs[nodeIdx]].links:
|
|
||||||
pIdx = idxs[peer]
|
|
||||||
pdIdx = getCacheIndex(nNodes, sourceIdx, pIdx)
|
|
||||||
if not dists[pdIdx]:
|
|
||||||
# Peer is also unvisited, so add to queue
|
|
||||||
heapq.heappush(queue, (dist+LINK_COST, pIdx))
|
|
||||||
return dists
|
|
||||||
|
|
||||||
def linkNodes(node1, node2):
|
|
||||||
node1.links[node2.info.nodeID] = node2
|
|
||||||
node2.links[node1.info.nodeID] = node1
|
|
||||||
|
|
||||||
############################
|
|
||||||
# Store topology functions #
|
|
||||||
############################
|
|
||||||
|
|
||||||
def makeStoreSquareGrid(sideLength, randomize=True):
|
|
||||||
# Simple grid in a sideLength*sideLength square
|
|
||||||
# Just used to validate that the code runs
|
|
||||||
store = dict()
|
|
||||||
nodeIDs = list(range(sideLength*sideLength))
|
|
||||||
if randomize: random.shuffle(nodeIDs)
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
store[nodeID] = Node(nodeID)
|
|
||||||
for index in xrange(len(nodeIDs)):
|
|
||||||
if (index % sideLength != 0): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-1]])
|
|
||||||
if (index >= sideLength): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-sideLength]])
|
|
||||||
print "Grid store created, size {}".format(len(store))
|
|
||||||
return store
|
|
||||||
|
|
||||||
def makeStoreASRelGraph(pathToGraph):
|
|
||||||
#Existing network graphs, in caida.org's asrel format (ASx|ASy|z per line, z denotes relationship type)
|
|
||||||
with open(pathToGraph, "r") as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
store = dict()
|
|
||||||
for line in inData:
|
|
||||||
if line.strip()[0] == "#": continue # Skip comment lines
|
|
||||||
line = line.replace('|'," ")
|
|
||||||
nodes = map(int, line.split()[0:2])
|
|
||||||
if nodes[0] not in store: store[nodes[0]] = Node(nodes[0])
|
|
||||||
if nodes[1] not in store: store[nodes[1]] = Node(nodes[1])
|
|
||||||
linkNodes(store[nodes[0]], store[nodes[1]])
|
|
||||||
print "CAIDA AS-relation graph successfully imported, size {}".format(len(store))
|
|
||||||
return store
|
|
||||||
|
|
||||||
def makeStoreASRelGraphMaxDeg(pathToGraph, degIdx=0):
|
|
||||||
with open(pathToGraph, "r") as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
store = dict()
|
|
||||||
nodeDeg = dict()
|
|
||||||
for line in inData:
|
|
||||||
if line.strip()[0] == "#": continue # Skip comment lines
|
|
||||||
line = line.replace('|'," ")
|
|
||||||
nodes = map(int, line.split()[0:2])
|
|
||||||
if nodes[0] not in nodeDeg: nodeDeg[nodes[0]] = 0
|
|
||||||
if nodes[1] not in nodeDeg: nodeDeg[nodes[1]] = 0
|
|
||||||
nodeDeg[nodes[0]] += 1
|
|
||||||
nodeDeg[nodes[1]] += 1
|
|
||||||
sortedNodes = sorted(nodeDeg.keys(), \
|
|
||||||
key=lambda x: (nodeDeg[x], x), \
|
|
||||||
reverse=True)
|
|
||||||
maxDegNodeID = sortedNodes[degIdx]
|
|
||||||
return makeStoreASRelGraphFixedRoot(pathToGraph, maxDegNodeID)
|
|
||||||
|
|
||||||
def makeStoreASRelGraphFixedRoot(pathToGraph, rootNodeID):
|
|
||||||
with open(pathToGraph, "r") as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
store = dict()
|
|
||||||
for line in inData:
|
|
||||||
if line.strip()[0] == "#": continue # Skip comment lines
|
|
||||||
line = line.replace('|'," ")
|
|
||||||
nodes = map(int, line.split()[0:2])
|
|
||||||
if nodes[0] not in store:
|
|
||||||
store[nodes[0]] = Node(nodes[0])
|
|
||||||
if nodes[0] == rootNodeID: store[nodes[0]].info.treeID += 1000000000
|
|
||||||
if nodes[1] not in store:
|
|
||||||
store[nodes[1]] = Node(nodes[1])
|
|
||||||
if nodes[1] == rootNodeID: store[nodes[1]].info.treeID += 1000000000
|
|
||||||
linkNodes(store[nodes[0]], store[nodes[1]])
|
|
||||||
print "CAIDA AS-relation graph successfully imported, size {}".format(len(store))
|
|
||||||
return store
|
|
||||||
|
|
||||||
def makeStoreDimesEdges(pathToGraph, rootNodeID=None):
|
|
||||||
# Read from a DIMES csv-formatted graph from a gzip file
|
|
||||||
store = dict()
|
|
||||||
with gzip.open(pathToGraph, "r") as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
size = len(inData)
|
|
||||||
index = 0
|
|
||||||
for edge in inData:
|
|
||||||
if not index % 1000:
|
|
||||||
pct = 100.0*index/size
|
|
||||||
print "Processing edge {}, {:.2f}%".format(index, pct)
|
|
||||||
index += 1
|
|
||||||
dat = edge.rstrip().split(',')
|
|
||||||
node1 = "N" + str(dat[0].strip())
|
|
||||||
node2 = "N" + str(dat[1].strip())
|
|
||||||
if '?' in node1 or '?' in node2: continue #Unknown node
|
|
||||||
if node1 == rootNodeID: node1 = "R" + str(dat[0].strip())
|
|
||||||
if node2 == rootNodeID: node2 = "R" + str(dat[1].strip())
|
|
||||||
if node1 not in store: store[node1] = Node(node1)
|
|
||||||
if node2 not in store: store[node2] = Node(node2)
|
|
||||||
if node1 != node2: linkNodes(store[node1], store[node2])
|
|
||||||
print "DIMES graph successfully imported, size {}".format(len(store))
|
|
||||||
return store
|
|
||||||
|
|
||||||
def makeStoreGeneratedGraph(pathToGraph, root=None):
|
|
||||||
with open(pathToGraph, "r") as f:
|
|
||||||
inData = f.readlines()
|
|
||||||
store = dict()
|
|
||||||
for line in inData:
|
|
||||||
if line.strip()[0] == "#": continue # Skip comment lines
|
|
||||||
nodes = map(int, line.strip().split(' ')[0:2])
|
|
||||||
node1 = nodes[0]
|
|
||||||
node2 = nodes[1]
|
|
||||||
if node1 == root: node1 += 1000000
|
|
||||||
if node2 == root: node2 += 1000000
|
|
||||||
if node1 not in store: store[node1] = Node(node1)
|
|
||||||
if node2 not in store: store[node2] = Node(node2)
|
|
||||||
linkNodes(store[node1], store[node2])
|
|
||||||
print "Generated graph successfully imported, size {}".format(len(store))
|
|
||||||
return store
|
|
||||||
|
|
||||||
|
|
||||||
############################################
|
|
||||||
# Functions used as parts of network tests #
|
|
||||||
############################################
|
|
||||||
|
|
||||||
def idleUntilConverged(store):
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
timeOfLastChange = 0
|
|
||||||
step = 0
|
|
||||||
# Idle until the network has converged
|
|
||||||
while step - timeOfLastChange < 4*TIMEOUT:
|
|
||||||
step += 1
|
|
||||||
print "Step: {}, last change: {}".format(step, timeOfLastChange)
|
|
||||||
changed = False
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
# Update node status, send messages
|
|
||||||
changed |= store[nodeID].tick()
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
# Process messages
|
|
||||||
changed |= store[nodeID].handleMessages()
|
|
||||||
if changed: timeOfLastChange = step
|
|
||||||
initTables(store)
|
|
||||||
return store
|
|
||||||
|
|
||||||
def getCacheIndex(nodes, sourceIndex, destIndex):
|
|
||||||
return sourceIndex*nodes + destIndex
|
|
||||||
|
|
||||||
def initTables(store):
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
nNodes = len(nodeIDs)
|
|
||||||
print "Initializing routing tables for {} nodes".format(nNodes)
|
|
||||||
for idx in xrange(nNodes):
|
|
||||||
nodeID = nodeIDs[idx]
|
|
||||||
store[nodeID].initTable()
|
|
||||||
print "Routing tables initialized"
|
|
||||||
return None
|
|
||||||
|
|
||||||
def getCache(store):
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
nNodes = len(nodeIDs)
|
|
||||||
nodeIdxs = dict()
|
|
||||||
for nodeIdx in xrange(nNodes):
|
|
||||||
nodeIdxs[nodeIDs[nodeIdx]] = nodeIdx
|
|
||||||
cache = array.array("H", [0]*nNodes*nNodes)
|
|
||||||
for sourceIdx in xrange(nNodes):
|
|
||||||
sourceID = nodeIDs[sourceIdx]
|
|
||||||
print "Building fast lookup table for node {} / {} ({})".format(sourceIdx+1, nNodes, sourceID)
|
|
||||||
for destIdx in xrange(nNodes):
|
|
||||||
destID = nodeIDs[destIdx]
|
|
||||||
if sourceID == destID: nextHop = destID # lookup would fail
|
|
||||||
else: nextHop = store[sourceID].lookup(store[destID].info)
|
|
||||||
nextHopIdx = nodeIdxs[nextHop]
|
|
||||||
cache[getCacheIndex(nNodes, sourceIdx, destIdx)] = nextHopIdx
|
|
||||||
return cache
|
|
||||||
|
|
||||||
def testPaths(store, dists):
|
|
||||||
cache = getCache(store)
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
nNodes = len(nodeIDs)
|
|
||||||
idxs = dict()
|
|
||||||
for nodeIdx in xrange(nNodes):
|
|
||||||
nodeID = nodeIDs[nodeIdx]
|
|
||||||
idxs[nodeID] = nodeIdx
|
|
||||||
results = dict()
|
|
||||||
for sourceIdx in xrange(nNodes):
|
|
||||||
sourceID = nodeIDs[sourceIdx]
|
|
||||||
print "Testing paths from node {} / {} ({})".format(sourceIdx+1, len(nodeIDs), sourceID)
|
|
||||||
#dists = dijkstra(store, sourceID)
|
|
||||||
for destIdx in xrange(nNodes):
|
|
||||||
destID = nodeIDs[destIdx]
|
|
||||||
if destID == sourceID: continue # Skip self
|
|
||||||
distIdx = getCacheIndex(nNodes, sourceIdx, destIdx)
|
|
||||||
eHops = dists[distIdx]
|
|
||||||
if not eHops: continue # The network is split, no path exists
|
|
||||||
hops = 0
|
|
||||||
for pair in ((sourceIdx, destIdx), (destIdx, sourceIdx)): # Either direction because source routing
|
|
||||||
nHops = 0
|
|
||||||
locIdx = pair[0]
|
|
||||||
dIdx = pair[1]
|
|
||||||
while locIdx != dIdx:
|
|
||||||
locIdx = cache[getCacheIndex(nNodes, locIdx, dIdx)]
|
|
||||||
nHops += 1
|
|
||||||
if not hops or nHops < hops: hops = nHops
|
|
||||||
if eHops not in results: results[eHops] = dict()
|
|
||||||
if hops not in results[eHops]: results[eHops][hops] = 0
|
|
||||||
results[eHops][hops] += 1
|
|
||||||
return results
|
|
||||||
|
|
||||||
def getAvgStretch(pathMatrix):
|
|
||||||
avgStretch = 0.
|
|
||||||
checked = 0.
|
|
||||||
for eHops in sorted(pathMatrix.keys()):
|
|
||||||
for nHops in sorted(pathMatrix[eHops].keys()):
|
|
||||||
count = pathMatrix[eHops][nHops]
|
|
||||||
stretch = float(nHops)/float(max(1, eHops))
|
|
||||||
avgStretch += stretch*count
|
|
||||||
checked += count
|
|
||||||
avgStretch /= max(1, checked)
|
|
||||||
return avgStretch
|
|
||||||
|
|
||||||
def getMaxStretch(pathMatrix):
|
|
||||||
maxStretch = 0.
|
|
||||||
for eHops in sorted(pathMatrix.keys()):
|
|
||||||
for nHops in sorted(pathMatrix[eHops].keys()):
|
|
||||||
stretch = float(nHops)/float(max(1, eHops))
|
|
||||||
maxStretch = max(maxStretch, stretch)
|
|
||||||
return maxStretch
|
|
||||||
|
|
||||||
def getCertSizes(store):
|
|
||||||
# Returns nCerts frequency distribution
|
|
||||||
# De-duplicates common certs (for shared prefixes in the path)
|
|
||||||
sizes = dict()
|
|
||||||
for node in store.values():
|
|
||||||
certs = set()
|
|
||||||
for peer in node.peers.values():
|
|
||||||
pCerts = set()
|
|
||||||
assert len(peer.path) == 2
|
|
||||||
assert peer.coords[-1] == peer.path[0]
|
|
||||||
hops = peer.coords + peer.path[1:]
|
|
||||||
for hopIdx in xrange(len(hops)-1):
|
|
||||||
send = hops[hopIdx]
|
|
||||||
if send == node.info.nodeID: continue # We created it, already have it
|
|
||||||
path = hops[0:hopIdx+2]
|
|
||||||
# Each cert is signed by the sender
|
|
||||||
# Includes information about the path from the sender to the next hop
|
|
||||||
# Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2]
|
|
||||||
cert = "{}:{}".format(send, path)
|
|
||||||
certs.add(cert)
|
|
||||||
size = len(certs)
|
|
||||||
if size not in sizes: sizes[size] = 0
|
|
||||||
sizes[size] += 1
|
|
||||||
return sizes
|
|
||||||
|
|
||||||
def getMinLinkCertSizes(store):
|
|
||||||
# Returns nCerts frequency distribution
|
|
||||||
# De-duplicates common certs (for shared prefixes in the path)
|
|
||||||
# Based on the minimum number of certs that must be traded through a particular link
|
|
||||||
# Handled per link
|
|
||||||
sizes = dict()
|
|
||||||
for node in store.values():
|
|
||||||
peerCerts = dict()
|
|
||||||
for peer in node.peers.values():
|
|
||||||
pCerts = set()
|
|
||||||
assert len(peer.path) == 2
|
|
||||||
assert peer.coords[-1] == peer.path[0]
|
|
||||||
hops = peer.coords + peer.path[1:]
|
|
||||||
for hopIdx in xrange(len(hops)-1):
|
|
||||||
send = hops[hopIdx]
|
|
||||||
if send == node.info.nodeID: continue # We created it, already have it
|
|
||||||
path = hops[0:hopIdx+2]
|
|
||||||
# Each cert is signed by the sender
|
|
||||||
# Includes information about the path from the sender to the next hop
|
|
||||||
# Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2]
|
|
||||||
cert = "{}:{}".format(send, path)
|
|
||||||
pCerts.add(cert)
|
|
||||||
peerCerts[peer.nodeID] = pCerts
|
|
||||||
for peer in peerCerts:
|
|
||||||
size = 0
|
|
||||||
pCerts = peerCerts[peer]
|
|
||||||
for cert in pCerts:
|
|
||||||
required = True
|
|
||||||
for p2 in peerCerts:
|
|
||||||
if p2 == peer: continue
|
|
||||||
p2Certs = peerCerts[p2]
|
|
||||||
if cert in p2Certs: required = False
|
|
||||||
if required: size += 1
|
|
||||||
if size not in sizes: sizes[size] = 0
|
|
||||||
sizes[size] += 1
|
|
||||||
return sizes
|
|
||||||
|
|
||||||
def getPathSizes(store):
|
|
||||||
# Returns frequency distribution of the total number of hops the routing table
|
|
||||||
# I.e. a node with 3 peers, each with 5 hop coord+path, would count as 3x5=15
|
|
||||||
sizes = dict()
|
|
||||||
for node in store.values():
|
|
||||||
size = 0
|
|
||||||
for peer in node.peers.values():
|
|
||||||
assert len(peer.path) == 2
|
|
||||||
assert peer.coords[-1] == peer.path[0]
|
|
||||||
peerSize = len(peer.coords) + len(peer.path) - 1 # double-counts peer, -1
|
|
||||||
size += peerSize
|
|
||||||
if size not in sizes: sizes[size] = 0
|
|
||||||
sizes[size] += 1
|
|
||||||
return sizes
|
|
||||||
|
|
||||||
def getPeerSizes(store):
|
|
||||||
# Returns frequency distribution of the number of peers each node has
|
|
||||||
sizes = dict()
|
|
||||||
for node in store.values():
|
|
||||||
nPeers = len(node.peers)
|
|
||||||
if nPeers not in sizes: sizes[nPeers] = 0
|
|
||||||
sizes[nPeers] += 1
|
|
||||||
return sizes
|
|
||||||
|
|
||||||
def getAvgSize(sizes):
|
|
||||||
sumSizes = 0
|
|
||||||
nNodes = 0
|
|
||||||
for size in sizes:
|
|
||||||
count = sizes[size]
|
|
||||||
sumSizes += size*count
|
|
||||||
nNodes += count
|
|
||||||
avgSize = float(sumSizes)/max(1, nNodes)
|
|
||||||
return avgSize
|
|
||||||
|
|
||||||
def getMaxSize(sizes):
|
|
||||||
return max(sizes.keys())
|
|
||||||
|
|
||||||
def getMinSize(sizes):
|
|
||||||
return min(sizes.keys())
|
|
||||||
|
|
||||||
def getResults(pathMatrix):
|
|
||||||
results = []
|
|
||||||
for eHops in sorted(pathMatrix.keys()):
|
|
||||||
for nHops in sorted(pathMatrix[eHops].keys()):
|
|
||||||
count = pathMatrix[eHops][nHops]
|
|
||||||
results.append("{} {} {}".format(eHops, nHops, count))
|
|
||||||
return '\n'.join(results)
|
|
||||||
|
|
||||||
####################################
|
|
||||||
# Functions to run different tests #
|
|
||||||
####################################
|
|
||||||
|
|
||||||
def runTest(store):
|
|
||||||
# Runs the usual set of tests on the store
|
|
||||||
# Does not save results, so only meant for quick tests
|
|
||||||
# To e.g. check the code works, maybe warm up the pypy jit
|
|
||||||
for node in store.values():
|
|
||||||
node.info.time = random.randint(0, TIMEOUT)
|
|
||||||
node.info.tstamp = TIMEOUT
|
|
||||||
print "Begin testing network"
|
|
||||||
dists = None
|
|
||||||
if not dists: dists = dijkstrall(store)
|
|
||||||
idleUntilConverged(store)
|
|
||||||
pathMatrix = testPaths(store, dists)
|
|
||||||
avgStretch = getAvgStretch(pathMatrix)
|
|
||||||
maxStretch = getMaxStretch(pathMatrix)
|
|
||||||
peers = getPeerSizes(store)
|
|
||||||
certs = getCertSizes(store)
|
|
||||||
paths = getPathSizes(store)
|
|
||||||
linkCerts = getMinLinkCertSizes(store)
|
|
||||||
avgPeerSize = getAvgSize(peers)
|
|
||||||
maxPeerSize = getMaxSize(peers)
|
|
||||||
avgCertSize = getAvgSize(certs)
|
|
||||||
maxCertSize = getMaxSize(certs)
|
|
||||||
avgPathSize = getAvgSize(paths)
|
|
||||||
maxPathSize = getMaxSize(paths)
|
|
||||||
avgLinkCert = getAvgSize(linkCerts)
|
|
||||||
maxLinkCert = getMaxSize(linkCerts)
|
|
||||||
totalCerts = sum(map(lambda x: x*certs[x], certs.keys()))
|
|
||||||
totalLinks = sum(map(lambda x: x*peers[x], peers.keys())) # one-way links
|
|
||||||
avgCertsPerLink = float(totalCerts)/max(1, totalLinks)
|
|
||||||
print "Finished testing network"
|
|
||||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
|
||||||
print "Avg / Max nPeers size: {} / {}".format(avgPeerSize, maxPeerSize)
|
|
||||||
print "Avg / Max nCerts size: {} / {}".format(avgCertSize, maxCertSize)
|
|
||||||
print "Avg / Max total hops in any node's routing table: {} / {}".format(avgPathSize, maxPathSize)
|
|
||||||
print "Avg / Max lower bound cert requests per link (one-way): {} / {}".format(avgLinkCert, maxLinkCert)
|
|
||||||
print "Avg certs per link (one-way): {}".format(avgCertsPerLink)
|
|
||||||
return # End of function
|
|
||||||
|
|
||||||
def rootNodeASTest(path, outDir="output-treesim-AS", dists=None, proc = 1):
|
|
||||||
# Checks performance for every possible choice of root node
|
|
||||||
# Saves output for each root node to a separate file on disk
|
|
||||||
# path = input path to some caida.org formatted AS-relationship graph
|
|
||||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
|
||||||
assert os.path.exists(outDir)
|
|
||||||
store = makeStoreASRelGraph(path)
|
|
||||||
nodes = sorted(store.keys())
|
|
||||||
for nodeIdx in xrange(len(nodes)):
|
|
||||||
if nodeIdx % proc != 0: continue # Work belongs to someone else
|
|
||||||
rootNodeID = nodes[nodeIdx]
|
|
||||||
outpath = outDir+"/{}".format(rootNodeID)
|
|
||||||
if os.path.exists(outpath):
|
|
||||||
print "Skipping {}, already processed".format(rootNodeID)
|
|
||||||
continue
|
|
||||||
store = makeStoreASRelGraphFixedRoot(path, rootNodeID)
|
|
||||||
for node in store.values():
|
|
||||||
node.info.time = random.randint(0, TIMEOUT)
|
|
||||||
node.info.tstamp = TIMEOUT
|
|
||||||
print "Beginning {}, size {}".format(nodeIdx, len(store))
|
|
||||||
if not dists: dists = dijkstrall(store)
|
|
||||||
idleUntilConverged(store)
|
|
||||||
pathMatrix = testPaths(store, dists)
|
|
||||||
avgStretch = getAvgStretch(pathMatrix)
|
|
||||||
maxStretch = getMaxStretch(pathMatrix)
|
|
||||||
results = getResults(pathMatrix)
|
|
||||||
with open(outpath, "w") as f:
|
|
||||||
f.write(results)
|
|
||||||
print "Finished test for root AS {} ({} / {})".format(rootNodeID, nodeIdx+1, len(store))
|
|
||||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
|
||||||
#break # Stop after 1, because they can take forever
|
|
||||||
return # End of function
|
|
||||||
|
|
||||||
def timelineASTest():
|
|
||||||
# Meant to study the performance of the network as a function of network size
|
|
||||||
# Loops over a set of AS-relationship graphs
|
|
||||||
# Runs a test on each graph, selecting highest-degree node as the root
|
|
||||||
# Saves results for each graph to a separate file on disk
|
|
||||||
outDir = "output-treesim-timeline-AS"
|
|
||||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
|
||||||
assert os.path.exists(outDir)
|
|
||||||
paths = sorted(glob.glob("asrel/datasets/*"))
|
|
||||||
for path in paths:
|
|
||||||
date = os.path.basename(path).split(".")[0]
|
|
||||||
outpath = outDir+"/{}".format(date)
|
|
||||||
if os.path.exists(outpath):
|
|
||||||
print "Skipping {}, already processed".format(date)
|
|
||||||
continue
|
|
||||||
store = makeStoreASRelGraphMaxDeg(path)
|
|
||||||
dists = None
|
|
||||||
for node in store.values():
|
|
||||||
node.info.time = random.randint(0, TIMEOUT)
|
|
||||||
node.info.tstamp = TIMEOUT
|
|
||||||
print "Beginning {}, size {}".format(date, len(store))
|
|
||||||
if not dists: dists = dijkstrall(store)
|
|
||||||
idleUntilConverged(store)
|
|
||||||
pathMatrix = testPaths(store, dists)
|
|
||||||
avgStretch = getAvgStretch(pathMatrix)
|
|
||||||
maxStretch = getMaxStretch(pathMatrix)
|
|
||||||
results = getResults(pathMatrix)
|
|
||||||
with open(outpath, "w") as f:
|
|
||||||
f.write(results)
|
|
||||||
print "Finished {} with {} nodes".format(date, len(store))
|
|
||||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
|
||||||
#break # Stop after 1, because they can take forever
|
|
||||||
return # End of function
|
|
||||||
|
|
||||||
def timelineDimesTest():
|
|
||||||
# Meant to study the performance of the network as a function of network size
|
|
||||||
# Loops over a set of AS-relationship graphs
|
|
||||||
# Runs a test on each graph, selecting highest-degree node as the root
|
|
||||||
# Saves results for each graph to a separate file on disk
|
|
||||||
outDir = "output-treesim-timeline-dimes"
|
|
||||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
|
||||||
assert os.path.exists(outDir)
|
|
||||||
# Input files are named ASEdgesX_Y where X = month (no leading 0), Y = year
|
|
||||||
paths = sorted(glob.glob("DIMES/ASEdges/*.gz"))
|
|
||||||
exists = set(glob.glob(outDir+"/*"))
|
|
||||||
for path in paths:
|
|
||||||
date = os.path.basename(path).split(".")[0]
|
|
||||||
outpath = outDir+"/{}".format(date)
|
|
||||||
if outpath in exists:
|
|
||||||
print "Skipping {}, already processed".format(date)
|
|
||||||
continue
|
|
||||||
store = makeStoreDimesEdges(path)
|
|
||||||
# Get the highest degree node and make it root
|
|
||||||
# Sorted by nodeID just to make it stable in the event of a tie
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
bestRoot = ""
|
|
||||||
bestDeg = 0
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
node = store[nodeID]
|
|
||||||
if len(node.links) > bestDeg:
|
|
||||||
bestRoot = nodeID
|
|
||||||
bestDeg = len(node.links)
|
|
||||||
assert bestRoot
|
|
||||||
store = makeStoreDimesEdges(path, bestRoot)
|
|
||||||
rootID = "R" + bestRoot[1:]
|
|
||||||
assert rootID in store
|
|
||||||
# Don't forget to set random seed before setitng times
|
|
||||||
# To make results reproducible
|
|
||||||
nodeIDs = sorted(store.keys())
|
|
||||||
random.seed(12345)
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
node = store[nodeID]
|
|
||||||
node.info.time = random.randint(0, TIMEOUT)
|
|
||||||
node.info.tstamp = TIMEOUT
|
|
||||||
print "Beginning {}, size {}".format(date, len(store))
|
|
||||||
if not dists: dists = dijkstrall(store)
|
|
||||||
idleUntilConverged(store)
|
|
||||||
pathMatrix = testPaths(store, dists)
|
|
||||||
avgStretch = getAvgStretch(pathMatrix)
|
|
||||||
maxStretch = getMaxStretch(pathMatrix)
|
|
||||||
results = getResults(pathMatrix)
|
|
||||||
with open(outpath, "w") as f:
|
|
||||||
f.write(results)
|
|
||||||
print "Finished {} with {} nodes".format(date, len(store))
|
|
||||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
|
||||||
break # Stop after 1, because they can take forever
|
|
||||||
return # End of function
|
|
||||||
|
|
||||||
def scalingTest(maxTests=None, inputDir="graphs"):
|
|
||||||
# Meant to study the performance of the network as a function of network size
|
|
||||||
# Loops over a set of nodes in a previously generated graph
|
|
||||||
# Runs a test on each graph, testing each node as the root
|
|
||||||
# if maxTests is set, tests only that number of roots (highest degree first)
|
|
||||||
# Saves results for each graph to a separate file on disk
|
|
||||||
outDir = "output-treesim-{}".format(inputDir)
|
|
||||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
|
||||||
assert os.path.exists(outDir)
|
|
||||||
paths = sorted(glob.glob("{}/*".format(inputDir)))
|
|
||||||
exists = set(glob.glob(outDir+"/*"))
|
|
||||||
for path in paths:
|
|
||||||
gc.collect() # pypy waits for gc to close files
|
|
||||||
graph = os.path.basename(path).split(".")[0]
|
|
||||||
store = makeStoreGeneratedGraph(path)
|
|
||||||
# Get the highest degree node and make it root
|
|
||||||
# Sorted by nodeID just to make it stable in the event of a tie
|
|
||||||
nodeIDs = sorted(store.keys(), key=lambda x: len(store[x].links), reverse=True)
|
|
||||||
dists = None
|
|
||||||
if maxTests: nodeIDs = nodeIDs[:maxTests]
|
|
||||||
for nodeID in nodeIDs:
|
|
||||||
nodeIDStr = str(nodeID).zfill(len(str(len(store)-1)))
|
|
||||||
outpath = outDir+"/{}-{}".format(graph, nodeIDStr)
|
|
||||||
if outpath in exists:
|
|
||||||
print "Skipping {}-{}, already processed".format(graph, nodeIDStr)
|
|
||||||
continue
|
|
||||||
store = makeStoreGeneratedGraph(path, nodeID)
|
|
||||||
# Don't forget to set random seed before setting times
|
|
||||||
random.seed(12345) # To make results reproducible
|
|
||||||
nIDs = sorted(store.keys())
|
|
||||||
for nID in nIDs:
|
|
||||||
node = store[nID]
|
|
||||||
node.info.time = random.randint(0, TIMEOUT)
|
|
||||||
node.info.tstamp = TIMEOUT
|
|
||||||
print "Beginning {}, size {}".format(graph, len(store))
|
|
||||||
if not dists: dists = dijkstrall(store)
|
|
||||||
idleUntilConverged(store)
|
|
||||||
pathMatrix = testPaths(store, dists)
|
|
||||||
avgStretch = getAvgStretch(pathMatrix)
|
|
||||||
maxStretch = getMaxStretch(pathMatrix)
|
|
||||||
results = getResults(pathMatrix)
|
|
||||||
with open(outpath, "w") as f:
|
|
||||||
f.write(results)
|
|
||||||
print "Finished {} with {} nodes for root {}".format(graph, len(store), nodeID)
|
|
||||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
|
||||||
return # End of function
|
|
||||||
|
|
||||||
##################
|
|
||||||
# Main Execution #
|
|
||||||
##################
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
if True: # Run a quick test
|
|
||||||
random.seed(12345) # DEBUG
|
|
||||||
store = makeStoreSquareGrid(4)
|
|
||||||
runTest(store) # Quick test
|
|
||||||
store = None
|
|
||||||
# Do some real work
|
|
||||||
#runTest(makeStoreDimesEdges("DIMES/ASEdges/ASEdges1_2007.csv.gz"))
|
|
||||||
#timelineDimesTest()
|
|
||||||
#rootNodeASTest("asrel/datasets/19980101.as-rel.txt")
|
|
||||||
#timelineASTest()
|
|
||||||
#rootNodeASTest("hype-2016-09-19.list", "output-treesim-hype")
|
|
||||||
#scalingTest(None, "graphs-20") # First argument 1 to only test 1 root per graph
|
|
||||||
#store = makeStoreGeneratedGraph("bgp_tables")
|
|
||||||
#store = makeStoreGeneratedGraph("skitter")
|
|
||||||
#store = makeStoreASRelGraphMaxDeg("hype-2016-09-19.list") #http://hia.cjdns.ca/watchlist/c/walk.peers.20160919
|
|
||||||
#store = makeStoreGeneratedGraph("fc00-2017-08-12.txt")
|
|
||||||
if store: runTest(store)
|
|
||||||
#rootNodeASTest("skitter", "output-treesim-skitter", None, 0, 1)
|
|
||||||
#scalingTest(1, "graphs-20") # First argument 1 to only test 1 root per graph
|
|
||||||
#scalingTest(1, "graphs-21") # First argument 1 to only test 1 root per graph
|
|
||||||
#scalingTest(1, "graphs-22") # First argument 1 to only test 1 root per graph
|
|
||||||
#scalingTest(1, "graphs-23") # First argument 1 to only test 1 root per graph
|
|
||||||
if not store:
|
|
||||||
import sys
|
|
||||||
args = sys.argv
|
|
||||||
if len(args) == 2:
|
|
||||||
job_number = int(sys.argv[1])
|
|
||||||
#rootNodeASTest("fc00-2017-08-12.txt", "fc00", None, job_number)
|
|
||||||
#rootNodeASTest("skitter", "out-skitter", None, job_number)
|
|
||||||
rootNodeASTest("walk-1517414401.txt.map", "out-walk", None, job_number)
|
|
||||||
else:
|
|
||||||
print "Usage: {} job_number".format(args[0])
|
|
||||||
print "job_number = which job set to run on this node (1-indexed)"
|
|
||||||
|
|
@ -412,10 +412,10 @@ func main() {
|
|||||||
}
|
}
|
||||||
fmt.Println("Test")
|
fmt.Println("Test")
|
||||||
Util_testAddrIDMask()
|
Util_testAddrIDMask()
|
||||||
idxstore := makeStoreSquareGrid(4)
|
//idxstore := makeStoreSquareGrid(4)
|
||||||
//idxstore := makeStoreStar(256)
|
//idxstore := makeStoreStar(256)
|
||||||
//idxstore := loadGraph("misc/sim/hype-2016-09-19.list")
|
//idxstore := loadGraph("misc/sim/hype-2016-09-19.list")
|
||||||
//idxstore := loadGraph("misc/sim/fc00-2017-08-12.txt")
|
idxstore := loadGraph("misc/sim/fc00-2017-08-12.txt")
|
||||||
//idxstore := loadGraph("skitter")
|
//idxstore := loadGraph("skitter")
|
||||||
kstore := getKeyedStore(idxstore)
|
kstore := getKeyedStore(idxstore)
|
||||||
/*
|
/*
|
||||||
|
@ -1,35 +0,0 @@
|
|||||||
#!/usr/bin/env python2
|
|
||||||
|
|
||||||
def main():
|
|
||||||
import sys
|
|
||||||
args = sys.argv
|
|
||||||
if len(args) != 2:
|
|
||||||
print "Usage:", args[0], "path/to/walk.txt"
|
|
||||||
return
|
|
||||||
import glob
|
|
||||||
files = glob.glob(args[1])
|
|
||||||
if len(files) == 0:
|
|
||||||
print "File not found:", args[1]
|
|
||||||
return
|
|
||||||
for inFile in files:
|
|
||||||
with open(inFile, 'r') as f: lines = f.readlines()
|
|
||||||
out = []
|
|
||||||
nodes = dict()
|
|
||||||
for line in lines:
|
|
||||||
words = line.strip().strip('[').strip(']').split(',')
|
|
||||||
if len(words) < 5: continue
|
|
||||||
if words[0].strip('"') != "link": continue
|
|
||||||
first, second = words[3], words[4]
|
|
||||||
if first not in nodes: nodes[first] = len(nodes)
|
|
||||||
if second not in nodes: nodes[second] = len(nodes)
|
|
||||||
for line in lines:
|
|
||||||
words = line.strip().strip('[').strip(']').split(',')
|
|
||||||
if len(words) < 5: continue
|
|
||||||
if words[0].strip('"') != "link": continue
|
|
||||||
first, second = nodes[words[3]], nodes[words[4]]
|
|
||||||
out.append("{0} {1}".format(first, second))
|
|
||||||
with open(inFile+".map", "w") as f: f.write("\n".join(out))
|
|
||||||
# End loop over files
|
|
||||||
# End main
|
|
||||||
|
|
||||||
if __name__ == "__main__": main()
|
|
@ -1,22 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
import "time"
|
|
||||||
import "sync/atomic"
|
|
||||||
import "runtime"
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
|
|
||||||
var ops uint64 = 0
|
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
atomic.AddUint64(&ops, 1)
|
|
||||||
runtime.Gosched()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
opsFinal := atomic.LoadUint64(&ops)
|
|
||||||
fmt.Println("ops:", opsFinal)
|
|
||||||
}
|
|
@ -1,53 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
import "net"
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
listener, err := net.ListenTCP("tcp", addr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer listener.Close()
|
|
||||||
|
|
||||||
packetSize := 65535
|
|
||||||
numPackets := 65535
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
send, err := net.DialTCP("tcp", nil, addr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer send.Close()
|
|
||||||
msg := make([]byte, packetSize)
|
|
||||||
for idx := 0; idx < numPackets; idx++ {
|
|
||||||
send.Write(msg)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
start := time.Now()
|
|
||||||
//msg := make([]byte, 1280)
|
|
||||||
sock, err := listener.AcceptTCP()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer sock.Close()
|
|
||||||
read := 0
|
|
||||||
buf := make([]byte, packetSize)
|
|
||||||
for {
|
|
||||||
n, err := sock.Read(buf)
|
|
||||||
read += n
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
timed := time.Since(start)
|
|
||||||
|
|
||||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
|
||||||
fmt.Printf("%f bits/sec\n", 8*float64(read)/timed.Seconds())
|
|
||||||
}
|
|
@ -1,36 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
import "fmt"
|
|
||||||
import "sync"
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
fmt.Println("Testing speed of recv+send loop")
|
|
||||||
const count = 10000000
|
|
||||||
c := make(chan []byte, 1)
|
|
||||||
c <- []byte{}
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
worker := func() {
|
|
||||||
for idx := 0; idx < count; idx++ {
|
|
||||||
p := <-c
|
|
||||||
select {
|
|
||||||
case c <- p:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
wg.Done()
|
|
||||||
}
|
|
||||||
nIter := 0
|
|
||||||
start := time.Now()
|
|
||||||
for idx := 0; idx < 1; idx++ {
|
|
||||||
go worker()
|
|
||||||
nIter += count
|
|
||||||
wg.Add(1)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
stop := time.Now()
|
|
||||||
timed := stop.Sub(start)
|
|
||||||
fmt.Printf("%d iterations in %s\n", nIter, timed)
|
|
||||||
fmt.Printf("%f iterations per second\n", float64(nIter)/timed.Seconds())
|
|
||||||
fmt.Printf("%s per iteration\n", timed/time.Duration(nIter))
|
|
||||||
}
|
|
@ -1,56 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "bytes"
|
|
||||||
import "encoding/gob"
|
|
||||||
import "time"
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
type testStruct struct {
|
|
||||||
First uint64
|
|
||||||
Second float64
|
|
||||||
Third []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func testFunc(tickerDuration time.Duration) {
|
|
||||||
chn := make(chan []byte)
|
|
||||||
ticker := time.NewTicker(tickerDuration)
|
|
||||||
defer ticker.Stop()
|
|
||||||
send := testStruct{First: 1, Second: 2, Third: []byte{3, 4, 5}}
|
|
||||||
buf := bytes.NewBuffer(nil)
|
|
||||||
enc := gob.NewEncoder(buf)
|
|
||||||
dec := gob.NewDecoder(buf)
|
|
||||||
sendCall := func() {
|
|
||||||
err := enc.EncodeValue(&send)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
bs := make([]byte, buf.Len())
|
|
||||||
buf.Read(bs)
|
|
||||||
fmt.Println("send:", bs)
|
|
||||||
go func() { chn <- bs }()
|
|
||||||
}
|
|
||||||
recvCall := func(bs []byte) {
|
|
||||||
buf.Write(bs)
|
|
||||||
recv := testStruct{}
|
|
||||||
err := dec.DecodeValue(&recv)
|
|
||||||
fmt.Println("recv:", bs)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case bs := <-chn:
|
|
||||||
recvCall(bs)
|
|
||||||
case <-ticker.C:
|
|
||||||
sendCall()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
go testFunc(100 * time.Millisecond) // Does not crash
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
go testFunc(time.Nanosecond) // Does crash
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
}
|
|
@ -1,22 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "sync"
|
|
||||||
import "time"
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
const reqs = 1000000
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
start := time.Now()
|
|
||||||
for idx := 0; idx < reqs; idx++ {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() { wg.Done() }()
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
stop := time.Now()
|
|
||||||
timed := stop.Sub(start)
|
|
||||||
fmt.Printf("%d goroutines in %s (%f per second)\n",
|
|
||||||
reqs,
|
|
||||||
timed,
|
|
||||||
reqs/timed.Seconds())
|
|
||||||
}
|
|
@ -1,57 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
import "net"
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
|
||||||
|
|
||||||
func basic_test() {
|
|
||||||
|
|
||||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
|
||||||
//addr, err := net.ResolveUDPAddr("udp", "[ff02::1%veth0]:9001")
|
|
||||||
addr, err := net.ResolveUDPAddr("udp", "[ff02::1]:9001")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
sock, err := net.ListenMulticastUDP("udp", nil, addr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer sock.Close()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
saddr, err := net.ResolveUDPAddr("udp", "[::]:0")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
send, err := net.ListenUDP("udp", saddr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer send.Close()
|
|
||||||
msg := make([]byte, 1280)
|
|
||||||
for {
|
|
||||||
//fmt.Println("Sending...")
|
|
||||||
send.WriteTo(msg, addr)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
numPackets := 1000
|
|
||||||
start := time.Now()
|
|
||||||
msg := make([]byte, 2000)
|
|
||||||
for i := 0; i < numPackets; i++ {
|
|
||||||
//fmt.Println("Reading:", i)
|
|
||||||
sock.ReadFromUDP(msg)
|
|
||||||
}
|
|
||||||
timed := time.Since(start)
|
|
||||||
|
|
||||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
|
|
||||||
basic_test()
|
|
||||||
|
|
||||||
}
|
|
@ -1,92 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
import "net"
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
|
||||||
|
|
||||||
func basic_test() {
|
|
||||||
|
|
||||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
|
||||||
|
|
||||||
var ip *net.IP
|
|
||||||
ifaces, err := net.Interfaces()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
var zone string
|
|
||||||
for _, iface := range ifaces {
|
|
||||||
addrs, err := iface.Addrs()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
for _, addr := range addrs {
|
|
||||||
addrIP, _, _ := net.ParseCIDR(addr.String())
|
|
||||||
if addrIP.To4() != nil {
|
|
||||||
continue
|
|
||||||
} // IPv6 only
|
|
||||||
if !addrIP.IsLinkLocalUnicast() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
zone = iface.Name
|
|
||||||
ip = &addrIP
|
|
||||||
}
|
|
||||||
addrs, err = iface.MulticastAddrs()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
for _, addr := range addrs {
|
|
||||||
fmt.Println(addr.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ip == nil {
|
|
||||||
panic("No link-local IPv6 found")
|
|
||||||
}
|
|
||||||
fmt.Println("Using address:", *ip)
|
|
||||||
|
|
||||||
addr := net.UDPAddr{IP: *ip, Port: 9001, Zone: zone}
|
|
||||||
|
|
||||||
saddr := net.UDPAddr{IP: *ip, Port: 9002, Zone: zone}
|
|
||||||
send, err := net.ListenUDP("udp", &saddr)
|
|
||||||
defer send.Close()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
sock, err := net.ListenUDP("udp", &addr)
|
|
||||||
defer sock.Close()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
const buffSize = 1048576 * 100
|
|
||||||
|
|
||||||
send.SetWriteBuffer(buffSize)
|
|
||||||
sock.SetReadBuffer(buffSize)
|
|
||||||
sock.SetWriteBuffer(buffSize)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
msg := make([]byte, 1280)
|
|
||||||
for {
|
|
||||||
send.WriteTo(msg, &addr)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
numPackets := 100000
|
|
||||||
start := time.Now()
|
|
||||||
msg := make([]byte, 2000)
|
|
||||||
for i := 0; i < numPackets; i++ {
|
|
||||||
_, addr, _ := sock.ReadFrom(msg)
|
|
||||||
sock.WriteTo(msg, addr)
|
|
||||||
}
|
|
||||||
timed := time.Since(start)
|
|
||||||
|
|
||||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
|
|
||||||
basic_test()
|
|
||||||
|
|
||||||
}
|
|
@ -1,89 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
//import "net"
|
|
||||||
import "time"
|
|
||||||
import "runtime"
|
|
||||||
import "sync/atomic"
|
|
||||||
|
|
||||||
func poolbench() {
|
|
||||||
nWorkers := runtime.GOMAXPROCS(0)
|
|
||||||
work := make(chan func(), 1)
|
|
||||||
workers := make(chan chan<- func(), nWorkers)
|
|
||||||
makeWorker := func() chan<- func() {
|
|
||||||
ch := make(chan func())
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
f := <-ch
|
|
||||||
f()
|
|
||||||
select {
|
|
||||||
case workers <- (ch):
|
|
||||||
default:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
getWorker := func() chan<- func() {
|
|
||||||
select {
|
|
||||||
case ch := <-workers:
|
|
||||||
return ch
|
|
||||||
default:
|
|
||||||
return makeWorker()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dispatcher := func() {
|
|
||||||
for {
|
|
||||||
w := <-work
|
|
||||||
ch := getWorker()
|
|
||||||
ch <- w
|
|
||||||
}
|
|
||||||
}
|
|
||||||
go dispatcher()
|
|
||||||
var count uint64
|
|
||||||
const nCounts = 1000000
|
|
||||||
for idx := 0; idx < nCounts; idx++ {
|
|
||||||
f := func() { atomic.AddUint64(&count, 1) }
|
|
||||||
work <- f
|
|
||||||
}
|
|
||||||
for atomic.LoadUint64(&count) < nCounts {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func normalbench() {
|
|
||||||
var count uint64
|
|
||||||
const nCounts = 1000000
|
|
||||||
ch := make(chan struct{}, 1)
|
|
||||||
ch <- struct{}{}
|
|
||||||
for idx := 0; idx < nCounts; idx++ {
|
|
||||||
f := func() { atomic.AddUint64(&count, 1) }
|
|
||||||
f()
|
|
||||||
<-ch
|
|
||||||
ch <- struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func gobench() {
|
|
||||||
var count uint64
|
|
||||||
const nCounts = 1000000
|
|
||||||
for idx := 0; idx < nCounts; idx++ {
|
|
||||||
f := func() { atomic.AddUint64(&count, 1) }
|
|
||||||
go f()
|
|
||||||
}
|
|
||||||
for atomic.LoadUint64(&count) < nCounts {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
start := time.Now()
|
|
||||||
poolbench()
|
|
||||||
fmt.Println(time.Since(start))
|
|
||||||
start = time.Now()
|
|
||||||
normalbench()
|
|
||||||
fmt.Println(time.Since(start))
|
|
||||||
start = time.Now()
|
|
||||||
gobench()
|
|
||||||
fmt.Println(time.Since(start))
|
|
||||||
}
|
|
@ -1,95 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"encoding/pem"
|
|
||||||
"fmt"
|
|
||||||
quic "github.com/lucas-clemente/quic-go"
|
|
||||||
"math/big"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const addr = "[::1]:9001"
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
go run_server()
|
|
||||||
run_client()
|
|
||||||
}
|
|
||||||
|
|
||||||
func run_server() {
|
|
||||||
listener, err := quic.ListenAddr(addr, generateTLSConfig(), nil)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
ses, err := listener.Accept()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
stream, err := ses.AcceptStream()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
defer stream.Close()
|
|
||||||
bs := bytes.Buffer{}
|
|
||||||
_, err := bs.ReadFrom(stream)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
} //<-- TooManyOpenStreams
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func run_client() {
|
|
||||||
msgSize := 1048576
|
|
||||||
msgCount := 128
|
|
||||||
ses, err := quic.DialAddr(addr, &tls.Config{InsecureSkipVerify: true}, nil)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
bs := make([]byte, msgSize)
|
|
||||||
wg := sync.WaitGroup{}
|
|
||||||
start := time.Now()
|
|
||||||
for idx := 0; idx < msgCount; idx++ {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
stream, err := ses.OpenStreamSync()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer stream.Close()
|
|
||||||
stream.Write(bs)
|
|
||||||
}() // "go" this later
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
timed := time.Since(start)
|
|
||||||
fmt.Println("Client finished", timed, fmt.Sprintf("%f Bits/sec", 8*float64(msgSize*msgCount)/timed.Seconds()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup a bare-bones TLS config for the server
|
|
||||||
func generateTLSConfig() *tls.Config {
|
|
||||||
key, err := rsa.GenerateKey(rand.Reader, 1024)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
template := x509.Certificate{SerialNumber: big.NewInt(1)}
|
|
||||||
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)})
|
|
||||||
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
|
|
||||||
|
|
||||||
tlsCert, err := tls.X509KeyPair(certPEM, keyPEM)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return &tls.Config{Certificates: []tls.Certificate{tlsCert}}
|
|
||||||
}
|
|
@ -1,74 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
import "net"
|
|
||||||
import "time"
|
|
||||||
import "flag"
|
|
||||||
import "os"
|
|
||||||
import "runtime/pprof"
|
|
||||||
|
|
||||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
|
||||||
|
|
||||||
func basic_test() {
|
|
||||||
|
|
||||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
|
||||||
|
|
||||||
addr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
sock, err := net.ListenUDP("udp", addr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer sock.Close()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
send, err := net.DialUDP("udp", nil, addr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer send.Close()
|
|
||||||
msg := make([]byte, 1280)
|
|
||||||
for {
|
|
||||||
send.Write(msg)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
numPackets := 1000000
|
|
||||||
start := time.Now()
|
|
||||||
msg := make([]byte, 2000)
|
|
||||||
for i := 0; i < numPackets; i++ {
|
|
||||||
sock.ReadFrom(msg)
|
|
||||||
}
|
|
||||||
timed := time.Since(start)
|
|
||||||
|
|
||||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
|
||||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
if *cpuprofile != "" {
|
|
||||||
f, err := os.Create(*cpuprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
|
||||||
}
|
|
||||||
if err := pprof.StartCPUProfile(f); err != nil {
|
|
||||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
|
||||||
}
|
|
||||||
defer pprof.StopCPUProfile()
|
|
||||||
}
|
|
||||||
if *memprofile != "" {
|
|
||||||
f, err := os.Create(*memprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
|
||||||
}
|
|
||||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
|
||||||
}
|
|
||||||
basic_test()
|
|
||||||
|
|
||||||
}
|
|
@ -1,84 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
import "net"
|
|
||||||
import "time"
|
|
||||||
import "flag"
|
|
||||||
import "os"
|
|
||||||
import "runtime/pprof"
|
|
||||||
|
|
||||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
|
||||||
|
|
||||||
func basic_test() {
|
|
||||||
|
|
||||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
|
||||||
|
|
||||||
addr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
sock, err := net.ListenUDP("udp", addr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer sock.Close()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
send, err := net.DialUDP("udp", nil, addr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer send.Close()
|
|
||||||
msg := make([]byte, 1280)
|
|
||||||
bss := make(net.Buffers, 0, 1024)
|
|
||||||
for {
|
|
||||||
for len(bss) < 1024 {
|
|
||||||
bss = append(bss, msg)
|
|
||||||
}
|
|
||||||
bss.WriteTo(send)
|
|
||||||
//bss = bss[:0]
|
|
||||||
//send.Write(msg)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
numPackets := 1000
|
|
||||||
start := time.Now()
|
|
||||||
msg := make([]byte, 2000)
|
|
||||||
for i := 0; i < numPackets; i++ {
|
|
||||||
n, err := sock.Read(msg)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
fmt.Println(n)
|
|
||||||
}
|
|
||||||
timed := time.Since(start)
|
|
||||||
|
|
||||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
|
||||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
if *cpuprofile != "" {
|
|
||||||
f, err := os.Create(*cpuprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
|
||||||
}
|
|
||||||
if err := pprof.StartCPUProfile(f); err != nil {
|
|
||||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
|
||||||
}
|
|
||||||
defer pprof.StopCPUProfile()
|
|
||||||
}
|
|
||||||
if *memprofile != "" {
|
|
||||||
f, err := os.Create(*memprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
|
||||||
}
|
|
||||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
|
||||||
}
|
|
||||||
basic_test()
|
|
||||||
|
|
||||||
}
|
|
@ -1,116 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "flag"
|
|
||||||
import "fmt"
|
|
||||||
import "net"
|
|
||||||
import "os"
|
|
||||||
import "runtime/pprof"
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
|
||||||
|
|
||||||
func basic_test() {
|
|
||||||
|
|
||||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
|
||||||
|
|
||||||
var ip *net.IP
|
|
||||||
ifaces, err := net.Interfaces()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
var zone string
|
|
||||||
for _, iface := range ifaces {
|
|
||||||
addrs, err := iface.Addrs()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
for _, addr := range addrs {
|
|
||||||
addrIP, _, _ := net.ParseCIDR(addr.String())
|
|
||||||
if addrIP.To4() != nil {
|
|
||||||
continue
|
|
||||||
} // IPv6 only
|
|
||||||
if !addrIP.IsLinkLocalUnicast() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fmt.Println(iface.Name, addrIP)
|
|
||||||
zone = iface.Name
|
|
||||||
ip = &addrIP
|
|
||||||
}
|
|
||||||
if ip != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
addrs, err = iface.MulticastAddrs()
|
|
||||||
if err != nil { panic(err) }
|
|
||||||
for _, addr := range addrs {
|
|
||||||
fmt.Println(addr.String())
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
if ip == nil {
|
|
||||||
panic("No link-local IPv6 found")
|
|
||||||
}
|
|
||||||
fmt.Println("Using address:", *ip)
|
|
||||||
addr := net.UDPAddr{IP: *ip, Port: 9001, Zone: zone}
|
|
||||||
|
|
||||||
laddr, err := net.ResolveUDPAddr("udp", "[::]:9001")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
sock, err := net.ListenUDP("udp", laddr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer sock.Close()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
send, err := net.DialUDP("udp", nil, &addr)
|
|
||||||
//send, err := net.ListenUDP("udp", nil)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer send.Close()
|
|
||||||
msg := make([]byte, 1280)
|
|
||||||
for {
|
|
||||||
send.Write(msg)
|
|
||||||
//send.WriteToUDP(msg, &addr)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
numPackets := 1000000
|
|
||||||
start := time.Now()
|
|
||||||
msg := make([]byte, 2000)
|
|
||||||
for i := 0; i < numPackets; i++ {
|
|
||||||
sock.ReadFromUDP(msg)
|
|
||||||
}
|
|
||||||
timed := time.Since(start)
|
|
||||||
|
|
||||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
|
||||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
if *cpuprofile != "" {
|
|
||||||
f, err := os.Create(*cpuprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
|
||||||
}
|
|
||||||
if err := pprof.StartCPUProfile(f); err != nil {
|
|
||||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
|
||||||
}
|
|
||||||
defer pprof.StopCPUProfile()
|
|
||||||
}
|
|
||||||
if *memprofile != "" {
|
|
||||||
f, err := os.Create(*memprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
|
||||||
}
|
|
||||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
|
||||||
}
|
|
||||||
basic_test()
|
|
||||||
|
|
||||||
}
|
|
@ -1,103 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
import "net"
|
|
||||||
import "time"
|
|
||||||
import "flag"
|
|
||||||
import "os"
|
|
||||||
import "runtime/pprof"
|
|
||||||
|
|
||||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible?
|
|
||||||
|
|
||||||
const buffSize = 32
|
|
||||||
|
|
||||||
func basic_test() {
|
|
||||||
|
|
||||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
|
||||||
|
|
||||||
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
listener, err := net.ListenTCP("tcp", addr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer listener.Close()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
send, err := net.DialTCP("tcp", nil, addr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer send.Close()
|
|
||||||
msg := make([]byte, 1280)
|
|
||||||
bss := make(net.Buffers, 0, 1024)
|
|
||||||
for {
|
|
||||||
for len(bss) < 1 { //buffSize {
|
|
||||||
bss = append(bss, msg)
|
|
||||||
}
|
|
||||||
bss := net.Buffers{[]byte{0, 1, 2, 3}, []byte{0, 1}, msg}
|
|
||||||
bss.WriteTo(send)
|
|
||||||
//send.Write(msg)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
numPackets := 1000000
|
|
||||||
start := time.Now()
|
|
||||||
//msg := make([]byte, 1280)
|
|
||||||
sock, err := listener.AcceptTCP()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer sock.Close()
|
|
||||||
for i := 0; i < numPackets; i++ {
|
|
||||||
msg := make([]byte, 1280*buffSize)
|
|
||||||
n, err := sock.Read(msg)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
msg = msg[:n]
|
|
||||||
for len(msg) > 1286 {
|
|
||||||
// handle message
|
|
||||||
i++
|
|
||||||
msg = msg[1286:]
|
|
||||||
}
|
|
||||||
// handle remaining fragment of message
|
|
||||||
//fmt.Println(n)
|
|
||||||
}
|
|
||||||
timed := time.Since(start)
|
|
||||||
|
|
||||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
|
||||||
|
|
||||||
_ = func(in chan<- int) {
|
|
||||||
close(in)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
|
||||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
if *cpuprofile != "" {
|
|
||||||
f, err := os.Create(*cpuprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
|
||||||
}
|
|
||||||
if err := pprof.StartCPUProfile(f); err != nil {
|
|
||||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
|
||||||
}
|
|
||||||
defer pprof.StopCPUProfile()
|
|
||||||
}
|
|
||||||
if *memprofile != "" {
|
|
||||||
f, err := os.Create(*memprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
|
||||||
}
|
|
||||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
|
||||||
}
|
|
||||||
basic_test()
|
|
||||||
|
|
||||||
}
|
|
@ -1,77 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
import "net"
|
|
||||||
import "time"
|
|
||||||
import "flag"
|
|
||||||
import "os"
|
|
||||||
import "runtime/pprof"
|
|
||||||
|
|
||||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
|
||||||
|
|
||||||
func basic_test() {
|
|
||||||
|
|
||||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
|
||||||
|
|
||||||
addr, err := net.ResolveUDPAddr("udp", "[::1]:0")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
sock, err := net.ListenUDP("udp", addr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer sock.Close()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
raddr := sock.LocalAddr().(*net.UDPAddr)
|
|
||||||
send, err := net.DialUDP("udp", nil, raddr)
|
|
||||||
//send, err := net.ListenUDP("udp", addr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer send.Close()
|
|
||||||
msg := make([]byte, 1280)
|
|
||||||
for {
|
|
||||||
send.Write(msg)
|
|
||||||
//send.WriteToUDP(msg, raddr)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
numPackets := 1000000
|
|
||||||
start := time.Now()
|
|
||||||
msg := make([]byte, 2000)
|
|
||||||
for i := 0; i < numPackets; i++ {
|
|
||||||
sock.ReadFromUDP(msg)
|
|
||||||
}
|
|
||||||
timed := time.Since(start)
|
|
||||||
|
|
||||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
|
||||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
if *cpuprofile != "" {
|
|
||||||
f, err := os.Create(*cpuprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
|
||||||
}
|
|
||||||
if err := pprof.StartCPUProfile(f); err != nil {
|
|
||||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
|
||||||
}
|
|
||||||
defer pprof.StopCPUProfile()
|
|
||||||
}
|
|
||||||
if *memprofile != "" {
|
|
||||||
f, err := os.Create(*memprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
|
||||||
}
|
|
||||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
|
||||||
}
|
|
||||||
basic_test()
|
|
||||||
|
|
||||||
}
|
|
@ -1,79 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
import "net"
|
|
||||||
import "time"
|
|
||||||
import "flag"
|
|
||||||
import "os"
|
|
||||||
import "runtime/pprof"
|
|
||||||
|
|
||||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
|
||||||
|
|
||||||
func basic_test() {
|
|
||||||
|
|
||||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
|
||||||
|
|
||||||
saddr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
raddr, err := net.ResolveUDPAddr("udp", "[::1]:9002")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
send, err := net.DialUDP("udp", saddr, raddr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer send.Close()
|
|
||||||
|
|
||||||
recv, err := net.DialUDP("udp", raddr, saddr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer recv.Close()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
msg := make([]byte, 1280)
|
|
||||||
for {
|
|
||||||
send.Write(msg)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
numPackets := 1000000
|
|
||||||
start := time.Now()
|
|
||||||
msg := make([]byte, 2000)
|
|
||||||
for i := 0; i < numPackets; i++ {
|
|
||||||
recv.Read(msg)
|
|
||||||
}
|
|
||||||
timed := time.Since(start)
|
|
||||||
|
|
||||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
|
||||||
}
|
|
||||||
|
|
||||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
|
||||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
if *cpuprofile != "" {
|
|
||||||
f, err := os.Create(*cpuprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
|
||||||
}
|
|
||||||
if err := pprof.StartCPUProfile(f); err != nil {
|
|
||||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
|
||||||
}
|
|
||||||
defer pprof.StopCPUProfile()
|
|
||||||
}
|
|
||||||
if *memprofile != "" {
|
|
||||||
f, err := os.Create(*memprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
|
||||||
}
|
|
||||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
|
||||||
}
|
|
||||||
basic_test()
|
|
||||||
|
|
||||||
}
|
|
@ -1,92 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
import "net"
|
|
||||||
import "time"
|
|
||||||
import "flag"
|
|
||||||
import "os"
|
|
||||||
import "runtime/pprof"
|
|
||||||
|
|
||||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
|
||||||
|
|
||||||
func basic_test() {
|
|
||||||
|
|
||||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
|
||||||
|
|
||||||
sock, err := net.ListenUDP("udp", nil)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer sock.Close()
|
|
||||||
|
|
||||||
ch := make(chan []byte, 1)
|
|
||||||
|
|
||||||
writer := func() {
|
|
||||||
raddr := sock.LocalAddr().(*net.UDPAddr)
|
|
||||||
//send, err := net.ListenUDP("udp", nil)
|
|
||||||
//if err != nil { panic(err) }
|
|
||||||
//defer send.Close()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ch:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
msg := make([]byte, 1280)
|
|
||||||
sock.WriteToUDP(msg, raddr)
|
|
||||||
//send.WriteToUDP(msg, raddr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
go writer()
|
|
||||||
//go writer()
|
|
||||||
//go writer()
|
|
||||||
//go writer()
|
|
||||||
|
|
||||||
numPackets := 65536
|
|
||||||
size := 0
|
|
||||||
start := time.Now()
|
|
||||||
success := 0
|
|
||||||
for i := 0; i < numPackets; i++ {
|
|
||||||
msg := make([]byte, 2048)
|
|
||||||
n, _, err := sock.ReadFromUDP(msg)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
size += n
|
|
||||||
select {
|
|
||||||
case ch <- msg:
|
|
||||||
success += 1
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
timed := time.Since(start)
|
|
||||||
|
|
||||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
|
||||||
fmt.Printf("%f bits per second\n", 8*float64(size)/timed.Seconds())
|
|
||||||
fmt.Println("Success:", success, "/", numPackets)
|
|
||||||
}
|
|
||||||
|
|
||||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
|
||||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
if *cpuprofile != "" {
|
|
||||||
f, err := os.Create(*cpuprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
|
||||||
}
|
|
||||||
if err := pprof.StartCPUProfile(f); err != nil {
|
|
||||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
|
||||||
}
|
|
||||||
defer pprof.StopCPUProfile()
|
|
||||||
}
|
|
||||||
if *memprofile != "" {
|
|
||||||
f, err := os.Create(*memprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
|
||||||
}
|
|
||||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
|
||||||
}
|
|
||||||
basic_test()
|
|
||||||
|
|
||||||
}
|
|
@ -1,124 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
import "net"
|
|
||||||
import "time"
|
|
||||||
import "flag"
|
|
||||||
import "os"
|
|
||||||
import "runtime/pprof"
|
|
||||||
|
|
||||||
import "golang.org/x/net/ipv6"
|
|
||||||
|
|
||||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
|
||||||
|
|
||||||
func basic_test() {
|
|
||||||
|
|
||||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
|
||||||
|
|
||||||
udpAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
sock, err := net.ListenUDP("udp", udpAddr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer sock.Close()
|
|
||||||
|
|
||||||
writer := func() {
|
|
||||||
raddr := sock.LocalAddr().(*net.UDPAddr)
|
|
||||||
send, err := net.ListenUDP("udp", nil)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer send.Close()
|
|
||||||
conn := ipv6.NewPacketConn(send)
|
|
||||||
defer conn.Close()
|
|
||||||
var msgs []ipv6.Message
|
|
||||||
for idx := 0; idx < 1024; idx++ {
|
|
||||||
msg := ipv6.Message{Addr: raddr, Buffers: [][]byte{make([]byte, 1280)}}
|
|
||||||
msgs = append(msgs, msg)
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
/*
|
|
||||||
var msgs []ipv6.Message
|
|
||||||
for idx := 0 ; idx < 1024 ; idx++ {
|
|
||||||
msg := ipv6.Message{Addr: raddr, Buffers: [][]byte{make([]byte, 1280)}}
|
|
||||||
msgs = append(msgs, msg)
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
conn.WriteBatch(msgs, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
go writer()
|
|
||||||
//go writer()
|
|
||||||
//go writer()
|
|
||||||
//go writer()
|
|
||||||
|
|
||||||
numPackets := 65536
|
|
||||||
size := 0
|
|
||||||
count := 0
|
|
||||||
start := time.Now()
|
|
||||||
/*
|
|
||||||
conn := ipv6.NewPacketConn(sock)
|
|
||||||
defer conn.Close()
|
|
||||||
for ; count < numPackets ; count++ {
|
|
||||||
msgs := make([]ipv6.Message, 1024)
|
|
||||||
for _, msg := range msgs {
|
|
||||||
msg.Buffers = append(msg.Buffers, make([]byte, 2048))
|
|
||||||
}
|
|
||||||
n, err := conn.ReadBatch(msgs, 0)
|
|
||||||
if err != nil { panic(err) }
|
|
||||||
fmt.Println("DEBUG: n", n)
|
|
||||||
for _, msg := range msgs[:n] {
|
|
||||||
fmt.Println("DEBUG: msg", msg)
|
|
||||||
size += msg.N
|
|
||||||
//for _, bs := range msg.Buffers {
|
|
||||||
// size += len(bs)
|
|
||||||
//}
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
//*/
|
|
||||||
//*
|
|
||||||
for ; count < numPackets; count++ {
|
|
||||||
msg := make([]byte, 2048)
|
|
||||||
n, _, err := sock.ReadFromUDP(msg)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
size += n
|
|
||||||
}
|
|
||||||
//*/
|
|
||||||
timed := time.Since(start)
|
|
||||||
|
|
||||||
fmt.Printf("%f packets per second\n", float64(count)/timed.Seconds())
|
|
||||||
fmt.Printf("%f bits/second\n", float64(8*size)/timed.Seconds())
|
|
||||||
}
|
|
||||||
|
|
||||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
|
||||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
if *cpuprofile != "" {
|
|
||||||
f, err := os.Create(*cpuprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
|
||||||
}
|
|
||||||
if err := pprof.StartCPUProfile(f); err != nil {
|
|
||||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
|
||||||
}
|
|
||||||
defer pprof.StopCPUProfile()
|
|
||||||
}
|
|
||||||
if *memprofile != "" {
|
|
||||||
f, err := os.Create(*memprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
|
||||||
}
|
|
||||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
|
||||||
}
|
|
||||||
basic_test()
|
|
||||||
|
|
||||||
}
|
|
@ -1,105 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
import "net"
|
|
||||||
import "time"
|
|
||||||
import "flag"
|
|
||||||
import "os"
|
|
||||||
import "runtime/pprof"
|
|
||||||
|
|
||||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible?
|
|
||||||
|
|
||||||
const buffSize = 32
|
|
||||||
|
|
||||||
func basic_test() {
|
|
||||||
|
|
||||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
|
||||||
|
|
||||||
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
listener, err := net.ListenTCP("tcp", addr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer listener.Close()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
send, err := net.DialTCP("tcp", nil, addr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer send.Close()
|
|
||||||
msg := make([]byte, 1280)
|
|
||||||
bss := make(net.Buffers, 0, 1024)
|
|
||||||
count := 0
|
|
||||||
for {
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
for len(bss) < count {
|
|
||||||
bss = append(bss, msg)
|
|
||||||
}
|
|
||||||
bss.WriteTo(send)
|
|
||||||
count++
|
|
||||||
//send.Write(msg)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
numPackets := 1000000
|
|
||||||
start := time.Now()
|
|
||||||
//msg := make([]byte, 1280)
|
|
||||||
sock, err := listener.AcceptTCP()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer sock.Close()
|
|
||||||
for {
|
|
||||||
msg := make([]byte, 1280*buffSize)
|
|
||||||
n, err := sock.Read(msg)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
msg = msg[:n]
|
|
||||||
fmt.Println("Read:", n)
|
|
||||||
for len(msg) > 1280 {
|
|
||||||
// handle message
|
|
||||||
msg = msg[1280:]
|
|
||||||
}
|
|
||||||
// handle remaining fragment of message
|
|
||||||
//fmt.Println(n)
|
|
||||||
}
|
|
||||||
timed := time.Since(start)
|
|
||||||
|
|
||||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
|
||||||
|
|
||||||
_ = func(in chan<- int) {
|
|
||||||
close(in)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
|
||||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
if *cpuprofile != "" {
|
|
||||||
f, err := os.Create(*cpuprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
|
||||||
}
|
|
||||||
if err := pprof.StartCPUProfile(f); err != nil {
|
|
||||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
|
||||||
}
|
|
||||||
defer pprof.StopCPUProfile()
|
|
||||||
}
|
|
||||||
if *memprofile != "" {
|
|
||||||
f, err := os.Create(*memprofile)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
|
||||||
}
|
|
||||||
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
|
|
||||||
}
|
|
||||||
basic_test()
|
|
||||||
|
|
||||||
}
|
|
@ -1,83 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"os/exec"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/yggdrasil-network/water"
|
|
||||||
)
|
|
||||||
|
|
||||||
const mtu = 65535
|
|
||||||
|
|
||||||
func setup_dev() *water.Interface {
|
|
||||||
ifce, err := water.New(water.Config{
|
|
||||||
DeviceType: water.TUN,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return ifce
|
|
||||||
}
|
|
||||||
|
|
||||||
func setup_dev1() *water.Interface {
|
|
||||||
ifce := setup_dev()
|
|
||||||
cmd := exec.Command("ip", "-f", "inet6",
|
|
||||||
"addr", "add", "fc00::2/8",
|
|
||||||
"dev", ifce.Name())
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(string(out))
|
|
||||||
panic("Failed to assign address")
|
|
||||||
}
|
|
||||||
cmd = exec.Command("ip", "link", "set",
|
|
||||||
"dev", ifce.Name(),
|
|
||||||
"mtu", fmt.Sprintf("%d", mtu),
|
|
||||||
"up")
|
|
||||||
out, err = cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(string(out))
|
|
||||||
panic("Failed to bring up interface")
|
|
||||||
}
|
|
||||||
return ifce
|
|
||||||
}
|
|
||||||
|
|
||||||
func connect(ifce *water.Interface) {
|
|
||||||
conn, err := net.DialTimeout("tcp", "192.168.2.2:9001", time.Second)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
sock := conn.(*net.TCPConn)
|
|
||||||
// TODO go a worker to move packets to/from the tun
|
|
||||||
}
|
|
||||||
|
|
||||||
func bench() {
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
ifce := setup_dev1()
|
|
||||||
connect(ifce)
|
|
||||||
bench()
|
|
||||||
fmt.Println("Done?")
|
|
||||||
return
|
|
||||||
ifce, err := water.New(water.Config{
|
|
||||||
DeviceType: water.TUN,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("Interface Name: %s\n", ifce.Name())
|
|
||||||
|
|
||||||
packet := make([]byte, 2000)
|
|
||||||
for {
|
|
||||||
n, err := ifce.Read(packet)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
log.Printf("Packet Received: % x\n", packet[:n])
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,126 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"os/exec"
|
|
||||||
|
|
||||||
"github.com/yggdrasil-network/water"
|
|
||||||
)
|
|
||||||
|
|
||||||
const mtu = 65535
|
|
||||||
const netnsName = "tunbenchns"
|
|
||||||
|
|
||||||
func setup_dev() *water.Interface {
|
|
||||||
ifce, err := water.New(water.Config{
|
|
||||||
DeviceType: water.TUN,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return ifce
|
|
||||||
}
|
|
||||||
|
|
||||||
func setup_dev1() *water.Interface {
|
|
||||||
ifce := setup_dev()
|
|
||||||
cmd := exec.Command("ip", "-f", "inet6",
|
|
||||||
"addr", "add", "fc00::1/8",
|
|
||||||
"dev", ifce.Name())
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(string(out))
|
|
||||||
fmt.Println(string(err))
|
|
||||||
panic("Failed to assign address")
|
|
||||||
}
|
|
||||||
cmd = exec.Command("ip", "link", "set",
|
|
||||||
"dev", tun.name,
|
|
||||||
"mtu", fmt.Sprintf("%d", mtu),
|
|
||||||
"up")
|
|
||||||
out, err = cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(string(out))
|
|
||||||
panic("Failed to bring up interface")
|
|
||||||
}
|
|
||||||
return ifce
|
|
||||||
}
|
|
||||||
|
|
||||||
func addNS(name string) {
|
|
||||||
cmd := exec.COmmand("ip", "netns", "add", name)
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(string(out))
|
|
||||||
panic("Failed to setup netns")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func delNS(name string) {
|
|
||||||
cmd := exec.COmmand("ip", "netns", "delete", name)
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(string(out))
|
|
||||||
panic("Failed to setup netns")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func doInNetNS(comm ...string) *exec.Cmd {
|
|
||||||
return exec.Command("ip", "netns", "exec", netnsName, comm...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func setup_dev2() *water.Interface {
|
|
||||||
ifce := setup_dev()
|
|
||||||
addNS(netnsName)
|
|
||||||
cmd := exec.Command("ip", "link", "set", ifce.Name(), "netns", netnsName)
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(string(out))
|
|
||||||
panic("Failed to move tun to netns")
|
|
||||||
}
|
|
||||||
cmd = doInNetNS("ip", "-f", "inet6",
|
|
||||||
"addr", "add", "fc00::2/8",
|
|
||||||
"dev", ifce.Name())
|
|
||||||
out, err = cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(string(out))
|
|
||||||
panic("Failed to assign address")
|
|
||||||
}
|
|
||||||
cmd = doInNetNS("ip", "link", "set",
|
|
||||||
"dev", tun.name,
|
|
||||||
"mtu", fmt.Sprintf("%d", mtu),
|
|
||||||
"up")
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(string(out))
|
|
||||||
fmt.Println(string(err))
|
|
||||||
panic("Failed to bring up interface")
|
|
||||||
}
|
|
||||||
return ifce
|
|
||||||
}
|
|
||||||
|
|
||||||
func connect() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func bench() {
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
ifce, err := water.New(water.Config{
|
|
||||||
DeviceType: water.TUN,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("Interface Name: %s\n", ifce.Name())
|
|
||||||
|
|
||||||
packet := make([]byte, 2000)
|
|
||||||
for {
|
|
||||||
n, err := ifce.Read(packet)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
log.Printf("Packet Received: % x\n", packet[:n])
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,128 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"os/exec"
|
|
||||||
|
|
||||||
"github.com/yggdrasil-network/water"
|
|
||||||
)
|
|
||||||
|
|
||||||
const mtu = 65535
|
|
||||||
const netnsName = "tunbenchns"
|
|
||||||
|
|
||||||
func setup_dev() *water.Interface {
|
|
||||||
ifce, err := water.New(water.Config{
|
|
||||||
DeviceType: water.TUN,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return ifce
|
|
||||||
}
|
|
||||||
|
|
||||||
func setup_dev1() *water.Interface {
|
|
||||||
ifce := setup_dev()
|
|
||||||
cmd := exec.Command("ip", "-f", "inet6",
|
|
||||||
"addr", "add", "fc00::1/8",
|
|
||||||
"dev", ifce.Name())
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(string(out))
|
|
||||||
fmt.Println(string(err))
|
|
||||||
panic("Failed to assign address")
|
|
||||||
}
|
|
||||||
cmd = exec.Command("ip", "link", "set",
|
|
||||||
"dev", tun.name,
|
|
||||||
"mtu", fmt.Sprintf("%d", mtu),
|
|
||||||
"up")
|
|
||||||
out, err = cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(string(out))
|
|
||||||
panic("Failed to bring up interface")
|
|
||||||
}
|
|
||||||
return ifce
|
|
||||||
}
|
|
||||||
|
|
||||||
func addNS(name string) {
|
|
||||||
cmd := exec.COmmand("ip", "netns", "add", name)
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(string(out))
|
|
||||||
panic("Failed to setup netns")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func delNS(name string) {
|
|
||||||
cmd := exec.COmmand("ip", "netns", "delete", name)
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(string(out))
|
|
||||||
panic("Failed to setup netns")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func doInNetNS(comm ...string) *exec.Cmd {
|
|
||||||
return exec.Command("ip", "netns", "exec", netnsName, comm...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func setup_dev2() *water.Interface {
|
|
||||||
ifce := setup_dev()
|
|
||||||
addNS(netnsName)
|
|
||||||
cmd := exec.Command("ip", "link", "set", ifce.Name(), "netns", netnsName)
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(string(out))
|
|
||||||
panic("Failed to move tun to netns")
|
|
||||||
}
|
|
||||||
cmd = exec.Command(
|
|
||||||
"ip", "-f", "inet6",
|
|
||||||
"addr", "add", "fc00::2/8",
|
|
||||||
"dev", ifce.Name())
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(string(out))
|
|
||||||
panic("Failed to assign address")
|
|
||||||
}
|
|
||||||
cmd = exec.Command(
|
|
||||||
"ip", "link", "set",
|
|
||||||
"dev", tun.name,
|
|
||||||
"mtu", fmt.Sprintf("%d", mtu),
|
|
||||||
"up")
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(string(out))
|
|
||||||
fmt.Println(string(err))
|
|
||||||
panic("Failed to bring up interface")
|
|
||||||
}
|
|
||||||
return ifce
|
|
||||||
}
|
|
||||||
|
|
||||||
func connect() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func bench() {
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
ifce, err := water.New(water.Config{
|
|
||||||
DeviceType: water.TUN,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("Interface Name: %s\n", ifce.Name())
|
|
||||||
|
|
||||||
packet := make([]byte, 2000)
|
|
||||||
for {
|
|
||||||
n, err := ifce.Read(packet)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
log.Printf("Packet Received: % x\n", packet[:n])
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,45 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/FlexibleBroadband/tun-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
// first start server tun server.
|
|
||||||
func main() {
|
|
||||||
wg := sync.WaitGroup{}
|
|
||||||
// local tun interface read and write channel.
|
|
||||||
rCh := make(chan []byte, 1024)
|
|
||||||
// read from local tun interface channel, and write into remote udp channel.
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
wg.Done()
|
|
||||||
for {
|
|
||||||
data := <-rCh
|
|
||||||
// if data[0]&0xf0 == 0x40
|
|
||||||
// write into udp conn.
|
|
||||||
log.Println("tun->conn:", len(data))
|
|
||||||
log.Println("read!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
|
|
||||||
log.Println("src:", net.IP(data[8:24]), "dst:", net.IP(data[24:40]))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
address := net.ParseIP("fc00::1")
|
|
||||||
tuntap, err := tun.OpenTun(address)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer tuntap.Close()
|
|
||||||
// read data from tun into rCh channel.
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
if err := tuntap.Read(rCh); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
@ -1,40 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import "wire"
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
for idx := 0; idx < 64; idx++ {
|
|
||||||
num := uint64(1) << uint(idx)
|
|
||||||
encoded := make([]byte, 10)
|
|
||||||
length := wire.Encode_uint64(num, encoded)
|
|
||||||
decoded, _ := wire.Decode_uint64(encoded[:length])
|
|
||||||
if decoded != num {
|
|
||||||
panic(fmt.Sprintf("%d != %d", decoded, num))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const count = 1000000
|
|
||||||
start := time.Now()
|
|
||||||
encoded := make([]byte, 10)
|
|
||||||
//num := ^uint64(0) // Longest possible value for full uint64 range
|
|
||||||
num := ^uint64(0) >> 1 // Largest positive int64 (real use case)
|
|
||||||
//num := uint64(0) // Shortest possible value, most will be of this length
|
|
||||||
length := wire.Encode_uint64(num, encoded)
|
|
||||||
for idx := 0; idx < count; idx++ {
|
|
||||||
wire.Encode_uint64(num, encoded)
|
|
||||||
}
|
|
||||||
timed := time.Since(start)
|
|
||||||
fmt.Println("Ops:", count/timed.Seconds())
|
|
||||||
fmt.Println("Time:", timed.Nanoseconds()/count)
|
|
||||||
|
|
||||||
encoded = encoded[:length]
|
|
||||||
start = time.Now()
|
|
||||||
for idx := 0; idx < count; idx++ {
|
|
||||||
wire.Decode_uint64(encoded)
|
|
||||||
}
|
|
||||||
timed = time.Since(start)
|
|
||||||
fmt.Println("Ops:", count/timed.Seconds())
|
|
||||||
fmt.Println("Time:", timed.Nanoseconds()/count)
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user