From 6bc2044ced8674cc2d07c8dfc4759a0ec5b19c26 Mon Sep 17 00:00:00 2001 From: Arceliar Date: Sun, 23 May 2021 17:52:10 -0500 Subject: [PATCH] update ironwood dependency, fix ansible code, go mod tidy --- cmd/yggdrasilsim/dial.go | 61 -- cmd/yggdrasilsim/main.go | 6 - cmd/yggdrasilsim/node.go | 28 - cmd/yggdrasilsim/store.go | 41 - contrib/ansible/genkeys.go | 60 +- go.mod | 3 +- go.sum | 4 +- misc/sim/fc00-2017-08-12.txt | 1593 ---------------------------------- misc/sim/merge.py | 62 -- misc/sim/run-sim | 2 - misc/sim/treesim-forward.py | 901 ------------------- misc/sim/treesim.go | 459 ---------- src/core/link.go | 3 +- 13 files changed, 25 insertions(+), 3198 deletions(-) delete mode 100644 cmd/yggdrasilsim/dial.go delete mode 100644 cmd/yggdrasilsim/main.go delete mode 100644 cmd/yggdrasilsim/node.go delete mode 100644 cmd/yggdrasilsim/store.go delete mode 100644 misc/sim/fc00-2017-08-12.txt delete mode 100644 misc/sim/merge.py delete mode 100755 misc/sim/run-sim delete mode 100644 misc/sim/treesim-forward.py delete mode 100644 misc/sim/treesim.go diff --git a/cmd/yggdrasilsim/dial.go b/cmd/yggdrasilsim/dial.go deleted file mode 100644 index c7892d4..0000000 --- a/cmd/yggdrasilsim/dial.go +++ /dev/null @@ -1,61 +0,0 @@ -package main - -import ( - "fmt" - "sort" - "time" - - "github.com/yggdrasil-network/yggdrasil-go/src/crypto" -) - -func doListen(recvNode *simNode) { - // TODO be able to stop the listeners somehow so they don't leak across different tests - for { - c, err := recvNode.listener.Accept() - if err != nil { - panic(err) - } - c.Close() - } -} - -func dialTest(sendNode, recvNode *simNode) { - if sendNode.id == recvNode.id { - fmt.Println("Skipping dial to self") - return - } - var mask crypto.NodeID - for idx := range mask { - mask[idx] = 0xff - } - for { - c, err := sendNode.dialer.DialByNodeIDandMask(nil, &recvNode.nodeID, &mask) - if c != nil { - c.Close() - return - } - if err != nil { - fmt.Println("Dial failed:", err) - } - time.Sleep(time.Second) - } -} - -func dialStore(store nodeStore) { - var nodeIdxs []int - for idx, n := range store { - nodeIdxs = append(nodeIdxs, idx) - go doListen(n) - } - sort.Slice(nodeIdxs, func(i, j int) bool { - return nodeIdxs[i] < nodeIdxs[j] - }) - for _, idx := range nodeIdxs { - sendNode := store[idx] - for _, jdx := range nodeIdxs { - recvNode := store[jdx] - fmt.Printf("Dialing from node %d to node %d / %d...\n", idx, jdx, len(store)) - dialTest(sendNode, recvNode) - } - } -} diff --git a/cmd/yggdrasilsim/main.go b/cmd/yggdrasilsim/main.go deleted file mode 100644 index 25504c9..0000000 --- a/cmd/yggdrasilsim/main.go +++ /dev/null @@ -1,6 +0,0 @@ -package main - -func main() { - store := makeStoreSquareGrid(4) - dialStore(store) -} diff --git a/cmd/yggdrasilsim/node.go b/cmd/yggdrasilsim/node.go deleted file mode 100644 index 65e6a80..0000000 --- a/cmd/yggdrasilsim/node.go +++ /dev/null @@ -1,28 +0,0 @@ -package main - -import ( - "io/ioutil" - - "github.com/gologme/log" - - "github.com/yggdrasil-network/yggdrasil-go/src/config" - "github.com/yggdrasil-network/yggdrasil-go/src/crypto" - "github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil" -) - -type simNode struct { - core yggdrasil.Core - id int - nodeID crypto.NodeID - dialer *yggdrasil.Dialer - listener *yggdrasil.Listener -} - -func newNode(id int) *simNode { - n := simNode{id: id} - n.core.Start(config.GenerateConfig(), log.New(ioutil.Discard, "", 0)) - n.nodeID = *n.core.NodeID() - n.dialer, _ = n.core.ConnDialer() - n.listener, _ = n.core.ConnListen() - return &n -} diff --git a/cmd/yggdrasilsim/store.go b/cmd/yggdrasilsim/store.go deleted file mode 100644 index 6fce81a..0000000 --- a/cmd/yggdrasilsim/store.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -type nodeStore map[int]*simNode - -func makeStoreSingle() nodeStore { - s := make(nodeStore) - s[0] = newNode(0) - return s -} - -func linkNodes(a *simNode, b *simNode) { - la := a.core.NewSimlink() - lb := b.core.NewSimlink() - la.SetDestination(lb) - lb.SetDestination(la) - la.Start() - lb.Start() -} - -func makeStoreSquareGrid(sideLength int) nodeStore { - store := make(nodeStore) - nNodes := sideLength * sideLength - idxs := make([]int, 0, nNodes) - // TODO shuffle nodeIDs - for idx := 1; idx <= nNodes; idx++ { - idxs = append(idxs, idx) - } - for _, idx := range idxs { - n := newNode(idx) - store[idx] = n - } - for idx := 0; idx < nNodes; idx++ { - if (idx % sideLength) != 0 { - linkNodes(store[idxs[idx]], store[idxs[idx-1]]) - } - if idx >= sideLength { - linkNodes(store[idxs[idx]], store[idxs[idx-sideLength]]) - } - } - return store -} diff --git a/contrib/ansible/genkeys.go b/contrib/ansible/genkeys.go index 681431b..4a02b9b 100644 --- a/contrib/ansible/genkeys.go +++ b/contrib/ansible/genkeys.go @@ -6,6 +6,7 @@ This file generates crypto keys for [ansible-yggdrasil](https://github.com/jcgru package main import ( + "crypto/ed25519" "encoding/hex" "flag" "fmt" @@ -14,7 +15,6 @@ import ( "github.com/cheggaaa/pb/v3" "github.com/yggdrasil-network/yggdrasil-go/src/address" - "github.com/yggdrasil-network/yggdrasil-go/src/crypto" ) var numHosts = flag.Int("hosts", 1, "number of host vars to generate") @@ -23,7 +23,6 @@ var keyTries = flag.Int("tries", 1000, "number of tries before taking the best k type keySet struct { priv []byte pub []byte - id []byte ip string } @@ -37,27 +36,15 @@ func main() { return } - var encryptionKeys []keySet + var keys []keySet for i := 0; i < *numHosts+1; i++ { - encryptionKeys = append(encryptionKeys, newBoxKey()) + keys = append(keys, newKey()) bar.Increment() } - encryptionKeys = sortKeySetArray(encryptionKeys) + keys = sortKeySetArray(keys) for i := 0; i < *keyTries-*numHosts-1; i++ { - encryptionKeys[0] = newBoxKey() - encryptionKeys = bubbleUpTo(encryptionKeys, 0) - bar.Increment() - } - - var signatureKeys []keySet - for i := 0; i < *numHosts+1; i++ { - signatureKeys = append(signatureKeys, newSigKey()) - bar.Increment() - } - signatureKeys = sortKeySetArray(signatureKeys) - for i := 0; i < *keyTries-*numHosts-1; i++ { - signatureKeys[0] = newSigKey() - signatureKeys = bubbleUpTo(signatureKeys, 0) + keys[0] = newKey() + keys = bubbleUpTo(keys, 0) bar.Increment() } @@ -70,43 +57,36 @@ func main() { return } defer file.Close() - file.WriteString(fmt.Sprintf("yggdrasil_encryption_public_key: %v\n", hex.EncodeToString(encryptionKeys[i].pub))) - file.WriteString("yggdrasil_encryption_private_key: \"{{ vault_yggdrasil_encryption_private_key }}\"\n") - file.WriteString(fmt.Sprintf("yggdrasil_signing_public_key: %v\n", hex.EncodeToString(signatureKeys[i].pub))) - file.WriteString("yggdrasil_signing_private_key: \"{{ vault_yggdrasil_signing_private_key }}\"\n") - file.WriteString(fmt.Sprintf("ansible_host: %v\n", encryptionKeys[i].ip)) + file.WriteString(fmt.Sprintf("yggdrasil_public_key: %v\n", hex.EncodeToString(keys[i].pub))) + file.WriteString("yggdrasil_private_key: \"{{ vault_yggdrasil_private_key }}\"\n") + file.WriteString(fmt.Sprintf("ansible_host: %v\n", keys[i].ip)) file, err = os.Create(fmt.Sprintf("host_vars/%x/vault", i)) if err != nil { return } defer file.Close() - file.WriteString(fmt.Sprintf("vault_yggdrasil_encryption_private_key: %v\n", hex.EncodeToString(encryptionKeys[i].priv))) - file.WriteString(fmt.Sprintf("vault_yggdrasil_signing_private_key: %v\n", hex.EncodeToString(signatureKeys[i].priv))) + file.WriteString(fmt.Sprintf("vault_yggdrasil_private_key: %v\n", hex.EncodeToString(keys[i].priv))) bar.Increment() } bar.Finish() } -func newBoxKey() keySet { - pub, priv := crypto.NewBoxKeys() - id := crypto.GetNodeID(pub) - ip := net.IP(address.AddrForNodeID(id)[:]).String() - return keySet{priv[:], pub[:], id[:], ip} -} - -func newSigKey() keySet { - pub, priv := crypto.NewSigKeys() - id := crypto.GetTreeID(pub) - return keySet{priv[:], pub[:], id[:], ""} +func newKey() keySet { + pub, priv, err := ed25519.GenerateKey(nil) + if err != nil { + panic(err) + } + ip := net.IP(address.AddrForKey(pub)[:]).String() + return keySet{priv[:], pub[:], ip} } func isBetter(oldID, newID []byte) bool { for idx := range oldID { - if newID[idx] > oldID[idx] { + if newID[idx] < oldID[idx] { return true } - if newID[idx] < oldID[idx] { + if newID[idx] > oldID[idx] { return false } } @@ -122,7 +102,7 @@ func sortKeySetArray(sets []keySet) []keySet { func bubbleUpTo(sets []keySet, num int) []keySet { for i := 0; i < len(sets)-num-1; i++ { - if isBetter(sets[i+1].id, sets[i].id) { + if isBetter(sets[i+1].pub, sets[i].pub) { var tmp = sets[i] sets[i] = sets[i+1] sets[i+1] = tmp diff --git a/go.mod b/go.mod index 600026b..2334664 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/yggdrasil-network/yggdrasil-go go 1.16 require ( - github.com/Arceliar/ironwood v0.0.0-20210519013150-a5401869b037 + github.com/Arceliar/ironwood v0.0.0-20210523223424-d320cf0ed78e github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979 github.com/cheggaaa/pb/v3 v3.0.6 github.com/fatih/color v1.10.0 // indirect @@ -16,7 +16,6 @@ require ( github.com/rivo/uniseg v0.2.0 // indirect github.com/vishvananda/netlink v1.1.0 github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect - golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b golang.org/x/text v0.3.6-0.20210220033129-8f690f22cf1c diff --git a/go.sum b/go.sum index 5e39b48..9c66c19 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -github.com/Arceliar/ironwood v0.0.0-20210519013150-a5401869b037 h1:SQ7opLc8dCNAgyYIeVZUGwvZ5YrfqRLHMwOGWfH/S/k= -github.com/Arceliar/ironwood v0.0.0-20210519013150-a5401869b037/go.mod h1:RP72rucOFm5udrnEzTmIWLRVGQiV/fSUAQXJ0RST/nk= +github.com/Arceliar/ironwood v0.0.0-20210523223424-d320cf0ed78e h1:EoZ4Dfm3xBDFjXRUzZUH+44NVvQ8tLf/VyESuC0BijI= +github.com/Arceliar/ironwood v0.0.0-20210523223424-d320cf0ed78e/go.mod h1:RP72rucOFm5udrnEzTmIWLRVGQiV/fSUAQXJ0RST/nk= github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979 h1:WndgpSW13S32VLQ3ugUxx2EnnWmgba1kCqPkd4Gk1yQ= github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979/go.mod h1:6Lkn+/zJilRMsKmbmG1RPoamiArC6HS73xbwRyp3UyI= github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= diff --git a/misc/sim/fc00-2017-08-12.txt b/misc/sim/fc00-2017-08-12.txt deleted file mode 100644 index 13fa724..0000000 --- a/misc/sim/fc00-2017-08-12.txt +++ /dev/null @@ -1,1593 +0,0 @@ -0 1 -2 3 -2 4 -2 5 -3 6 -3 7 -3 8 -3 9 -3 10 -3 11 -4 11 -4 12 -4 13 -4 14 -4 15 -4 16 -4 17 -4 18 -4 19 -4 20 -4 21 -4 22 -4 23 -4 24 -4 25 -4 26 -4 27 -4 28 -4 29 -4 30 -4 31 -4 32 -4 33 -4 34 -4 35 -4 36 -4 37 -4 38 -4 39 -4 40 -4 41 -4 42 -4 43 -4 44 -4 45 -4 46 -4 47 -4 48 -5 10 -5 11 -5 14 -5 20 -5 22 -5 24 -5 25 -5 29 -5 31 -5 36 -5 39 -5 47 -5 49 -5 50 -5 51 -5 52 -5 53 -5 54 -5 55 -5 56 -5 57 -5 58 -5 59 -5 60 -5 61 -5 62 -5 63 -5 64 -5 65 -5 66 -5 67 -5 68 -5 69 -5 70 -6 71 -6 72 -7 0 -7 6 -7 19 -7 50 -7 73 -7 74 -7 75 -7 76 -7 77 -7 78 -7 79 -7 80 -7 81 -7 82 -7 83 -7 84 -7 85 -7 86 -7 87 -7 88 -7 89 -7 90 -7 91 -7 92 -7 93 -8 50 -9 8 -9 50 -9 94 -9 95 -10 96 -10 97 -10 98 -10 99 -12 5 -12 7 -12 100 -12 101 -12 102 -14 15 -14 26 -14 31 -14 32 -14 103 -14 104 -14 105 -16 5 -16 6 -16 71 -16 101 -16 102 -16 106 -16 107 -16 108 -16 109 -20 7 -20 8 -20 9 -20 110 -20 111 -20 101 -20 102 -21 7 -21 20 -24 8 -24 110 -24 101 -24 112 -24 113 -24 114 -24 115 -24 116 -28 37 -28 38 -28 117 -29 118 -30 119 -31 26 -31 32 -31 104 -32 26 -32 46 -32 105 -33 5 -33 101 -33 102 -34 5 -34 101 -35 7 -38 37 -40 101 -41 120 -41 121 -42 122 -42 123 -45 7 -45 124 -47 8 -52 54 -52 55 -52 56 -52 57 -52 58 -52 59 -52 125 -53 52 -53 54 -53 57 -53 60 -53 61 -53 126 -54 55 -56 55 -56 57 -56 58 -56 59 -56 125 -57 54 -57 55 -57 58 -57 59 -58 54 -58 55 -58 59 -59 54 -59 55 -61 60 -62 52 -62 54 -62 55 -62 56 -62 57 -62 58 -62 59 -62 125 -63 64 -63 127 -65 99 -66 100 -67 49 -70 128 -129 3 -129 6 -129 130 -129 131 -130 7 -130 8 -130 9 -130 10 -130 22 -73 74 -74 0 -74 1 -74 19 -74 75 -74 132 -74 133 -74 76 -135 7 -135 136 -135 137 -136 3 -136 20 -136 50 -136 110 -136 111 -136 138 -136 139 -136 140 -136 141 -136 142 -136 77 -136 78 -136 79 -136 143 -136 144 -136 80 -136 145 -136 146 -136 147 -136 148 -136 149 -136 150 -136 151 -136 152 -136 153 -136 154 -136 155 -136 156 -136 157 -136 158 -137 7 -137 19 -137 20 -137 22 -137 50 -137 73 -137 75 -137 110 -137 111 -137 78 -137 79 -137 143 -137 80 -137 147 -137 149 -137 150 -137 81 -137 86 -137 87 -137 88 -137 89 -137 91 -137 93 -137 159 -137 160 -137 161 -137 162 -137 163 -137 164 -110 7 -110 8 -110 9 -110 65 -110 130 -110 139 -110 79 -110 83 -110 120 -110 108 -110 165 -110 166 -110 114 -110 167 -110 168 -110 169 -110 170 -138 8 -138 9 -138 67 -138 137 -139 7 -140 7 -140 9 -140 110 -140 171 -140 172 -140 173 -140 174 -141 7 -141 137 -141 110 -141 175 -142 7 -142 176 -144 7 -144 137 -80 78 -145 7 -145 110 -146 137 -147 7 -148 7 -148 110 -151 7 -151 137 -153 111 -154 3 -154 6 -156 7 -156 137 -157 77 -158 137 -158 119 -159 7 -160 101 -160 96 -160 97 -161 7 -161 110 -162 7 -162 74 -163 7 -163 74 -164 7 -100 0 -100 3 -100 8 -100 9 -100 20 -100 22 -100 50 -100 69 -100 72 -100 74 -100 84 -100 161 -100 177 -100 178 -100 179 -100 180 -100 181 -100 182 -100 183 -100 94 -100 184 -100 185 -100 186 -100 187 -100 188 -100 189 -100 190 -101 0 -101 18 -101 19 -101 25 -101 49 -101 74 -101 106 -101 107 -102 0 -102 6 -102 19 -102 74 -102 191 -102 192 -102 193 -102 194 -178 7 -180 7 -181 6 -181 72 -182 23 -183 10 -184 7 -184 9 -185 7 -186 195 -187 177 -187 196 -187 197 -187 198 -189 7 -190 199 -106 200 -202 203 -203 8 -203 9 -203 28 -203 204 -204 3 -204 8 -204 9 -204 20 -204 50 -204 130 -204 110 -204 100 -204 94 -204 205 -204 176 -206 4 -206 13 -206 14 -206 15 -206 103 -206 207 -206 208 -206 209 -206 210 -103 13 -103 211 -103 120 -207 15 -207 210 -207 212 -208 15 -209 15 -210 13 -210 14 -210 15 -210 103 -210 213 -211 15 -211 120 -120 13 -212 120 -214 215 -214 216 -214 217 -214 218 -214 219 -217 4 -217 7 -217 136 -217 137 -217 110 -217 220 -220 3 -220 5 -220 12 -220 20 -220 21 -220 22 -220 23 -220 24 -220 49 -220 51 -220 52 -220 53 -220 54 -220 55 -220 56 -220 57 -220 58 -220 59 -220 60 -220 61 -220 62 -220 129 -220 130 -220 137 -220 138 -220 139 -220 82 -220 160 -220 178 -220 180 -220 221 -220 222 -220 223 -220 224 -220 225 -220 226 -220 227 -220 96 -220 228 -220 229 -220 230 -220 231 -220 97 -220 232 -220 233 -220 234 -235 236 -236 237 -236 238 -237 239 -237 240 -237 241 -243 103 -243 210 -243 211 -243 220 -243 126 -243 244 -126 52 -126 56 -126 57 -126 58 -126 59 -126 60 -126 61 -126 62 -244 5 -244 10 -244 179 -244 245 -244 246 -244 112 -244 247 -221 7 -221 201 -222 7 -222 71 -222 100 -222 178 -222 228 -223 23 -223 182 -224 5 -224 52 -224 54 -224 55 -224 56 -224 57 -224 58 -224 59 -224 62 -224 126 -224 232 -224 121 -224 125 -224 248 -226 16 -226 249 -226 250 -228 7 -228 71 -228 100 -228 178 -229 7 -229 233 -230 4 -230 18 -230 24 -230 67 -230 159 -230 121 -230 251 -231 7 -231 252 -231 253 -97 96 -232 5 -232 54 -232 55 -232 56 -232 57 -232 58 -232 59 -232 62 -232 121 -232 125 -232 248 -246 96 -246 97 -246 112 -246 254 -112 255 -112 170 -196 177 -257 4 -257 5 -257 16 -257 101 -257 102 -257 178 -257 258 -258 6 -258 16 -258 71 -258 222 -258 226 -258 113 -113 50 -113 65 -113 71 -113 259 -108 260 -109 101 -262 7 -263 137 -265 17 -128 7 -128 101 -266 100 -266 177 -266 267 -267 177 -268 18 -114 65 -114 165 -168 7 -272 7 -272 136 -272 273 -272 274 -273 274 -273 275 -276 277 -277 92 -278 7 -121 52 -121 53 -121 54 -121 55 -121 56 -121 57 -121 58 -121 59 -121 60 -121 61 -121 62 -121 126 -121 114 -125 55 -125 57 -125 58 -125 59 -248 52 -248 53 -248 55 -248 56 -248 57 -248 59 -248 62 -248 98 -251 116 -116 110 -116 160 -197 177 -279 136 -279 280 -280 136 -281 140 -281 171 -173 9 -283 7 -283 136 -283 137 -283 110 -283 141 -283 175 -284 182 -285 7 -285 101 -285 220 -205 5 -205 7 -205 8 -205 9 -205 136 -205 137 -205 110 -205 142 -205 100 -205 102 -205 113 -205 176 -176 7 -176 8 -176 9 -176 286 -288 5 -288 220 -289 290 -290 10 -290 137 -290 291 -290 292 -290 293 -290 294 -290 295 -291 237 -291 239 -291 296 -291 297 -291 298 -291 299 -293 292 -293 294 -295 110 -300 7 -300 301 -300 302 -300 303 -302 301 -303 301 -304 305 -305 306 -306 118 -307 102 -309 10 -309 310 -310 10 -311 312 -312 237 -313 111 -314 7 -314 137 -316 239 -317 318 -317 319 -318 237 -318 291 -318 239 -318 320 -318 321 -318 322 -320 237 -320 242 -320 291 -320 239 -320 323 -320 296 -320 324 -326 7 -326 136 -326 77 -329 29 -118 330 -331 180 -119 18 -332 333 -334 141 -335 4 -335 5 -335 101 -335 102 -335 336 -335 337 -336 2 -336 3 -336 6 -336 11 -336 12 -336 16 -336 18 -336 20 -336 29 -336 30 -336 33 -336 34 -336 36 -336 37 -336 38 -336 39 -336 63 -336 107 -336 223 -336 257 -336 268 -336 119 -336 338 -336 339 -336 340 -336 341 -336 342 -337 4 -337 5 -337 7 -337 9 -337 12 -337 13 -337 16 -337 20 -337 23 -337 28 -337 33 -337 34 -337 47 -337 50 -337 63 -337 65 -337 70 -337 129 -337 130 -337 136 -337 137 -337 110 -337 84 -337 90 -337 100 -337 101 -337 102 -337 177 -337 179 -337 106 -337 206 -337 220 -337 257 -337 113 -337 261 -337 259 -337 336 -337 343 -337 344 -337 345 -337 346 -337 347 -338 3 -338 4 -338 5 -338 11 -338 339 -339 3 -339 10 -339 11 -341 7 -341 18 -341 22 -341 130 -341 268 -341 348 -343 113 -345 101 -345 102 -347 261 -349 23 -349 100 -349 182 -349 220 -349 223 -349 284 -349 350 -351 5 -352 100 -354 7 -354 137 -355 102 -355 356 -355 357 -356 41 -357 3 -357 4 -357 6 -357 12 -357 14 -357 16 -357 20 -357 26 -357 29 -357 31 -357 32 -357 33 -357 46 -357 66 -357 79 -357 257 -357 171 -357 173 -357 335 -357 337 -357 358 -357 359 -359 4 -359 14 -359 32 -362 7 -362 100 -260 237 -260 363 -364 0 -364 1 -364 7 -364 19 -364 73 -364 74 -364 75 -364 132 -364 133 -364 162 -364 163 -364 100 -364 101 -364 102 -364 365 -364 366 -364 367 -364 368 -364 369 -364 370 -364 371 -364 372 -364 373 -364 374 -364 375 -364 376 -364 377 -365 74 -365 373 -366 74 -367 74 -368 74 -369 7 -369 74 -369 137 -370 74 -371 74 -372 7 -372 74 -372 137 -373 74 -373 132 -374 74 -375 74 -376 74 -377 74 -377 376 -378 323 -379 240 -380 140 -380 171 -381 382 -381 383 -381 384 -381 385 -382 383 -382 384 -383 102 -383 384 -384 386 -384 387 -385 102 -385 384 -386 193 -387 193 -389 390 -391 3 -391 71 -391 101 -391 258 -391 113 -391 339 -393 3 -393 110 -394 291 -395 282 -395 396 -396 390 -397 237 -397 291 -397 318 -397 320 -397 296 -398 160 -348 10 -399 82 -400 4 -400 5 -400 101 -400 102 -400 336 -400 337 -400 357 -400 401 -401 0 -401 6 -401 12 -401 16 -401 20 -401 33 -401 74 -401 102 -401 192 -401 193 -401 257 -401 205 -401 307 -401 335 -401 337 -401 345 -401 355 -401 364 -401 383 -401 385 -401 402 -401 403 -401 404 -401 405 -401 406 -402 102 -403 102 -403 121 -404 4 -404 5 -404 7 -404 16 -404 129 -404 137 -404 100 -404 101 -404 102 -404 220 -404 258 -404 336 -404 337 -405 4 -405 5 -405 102 -406 0 -406 1 -406 7 -406 19 -406 73 -406 74 -406 75 -406 132 -406 133 -406 76 -406 162 -406 163 -406 100 -406 101 -406 102 -406 364 -406 365 -406 366 -406 367 -406 368 -406 369 -406 371 -406 372 -406 373 -406 374 -406 375 -406 376 -406 377 -407 10 -409 237 -409 410 -409 411 -412 102 -413 12 -414 7 -415 16 -415 24 -415 113 -415 337 -416 241 -420 102 -420 402 -422 242 -422 325 -422 423 -424 7 -424 100 -426 136 -427 7 -427 137 -428 7 -428 110 -429 422 -430 131 -430 126 -430 431 -431 129 -432 177 -435 296 -437 7 -437 438 -441 226 -442 5 -443 7 -443 110 -443 168 -444 260 -445 394 -448 5 -449 5 -451 130 -451 328 -452 7 -452 137 -453 101 -453 308 -453 339 -453 341 -454 82 -455 7 -455 137 -456 301 -459 4 -459 5 -459 16 -459 101 -459 102 -459 178 -459 228 -459 257 -459 336 -459 337 -459 357 -459 401 -460 461 -461 237 -461 462 -463 318 -463 464 -465 454 -466 5 -466 10 -466 110 -466 121 -467 267 -468 7 -469 226 -469 250 -471 100 -472 119 -473 312 -474 137 -474 101 -478 110 -479 180 -480 100 -480 186 -480 195 -482 5 -482 53 -482 59 -482 60 -482 61 -482 220 -482 126 -482 483 -483 5 -483 52 -483 53 -483 55 -483 56 -483 59 -483 60 -483 61 -483 62 -483 220 -483 126 -483 224 -483 232 -483 121 -483 125 -484 7 -484 110 -484 408 -486 237 -487 68 -488 300 -488 301 -489 113 -492 209 -493 494 -494 32 -494 389 -494 395 -494 396 -495 15 -496 244 -496 327 -496 497 -497 485 -498 7 -498 137 -500 7 -500 136 -500 137 -501 110 -502 7 -502 137 -503 20 -503 110 -503 92 -504 23 -504 100 -504 182 -504 223 -504 349 -505 92 -505 277 -505 506 -507 7 -507 136 -507 137 -508 280 -510 3 -511 129 -511 357 -512 121 -514 100 -516 242 -517 30 -517 65 -517 114 -518 242 -519 489 -521 237 -522 320 -522 513 -522 523 -524 110 -525 7 -525 137 -526 100 -527 7 -527 77 -527 157 -527 528 -528 77 -528 157 -529 5 -529 110 -530 7 -531 242 -531 450 -532 429 -533 7 -533 136 -533 142 -533 205 -533 286 -534 126 -535 237 -535 536 -537 74 -537 364 -537 406 -538 7 -538 136 -540 148 -541 237 -542 0 -542 3 -542 6 -542 7 -542 8 -542 9 -542 12 -542 20 -542 21 -542 22 -542 42 -542 49 -542 130 -542 74 -542 137 -542 139 -542 140 -542 141 -542 77 -542 79 -542 143 -542 150 -542 84 -542 100 -542 178 -542 180 -542 183 -542 190 -542 204 -542 217 -542 222 -542 226 -542 227 -542 228 -542 229 -542 233 -542 271 -542 283 -542 285 -542 290 -542 291 -542 292 -542 293 -542 294 -542 300 -542 309 -542 326 -542 118 -542 337 -542 349 -542 352 -542 364 -542 404 -542 406 -542 407 -542 417 -542 474 -542 477 -542 504 -542 514 -542 519 -542 527 -542 530 -542 543 -542 544 -542 545 -542 546 -542 547 -542 548 -542 549 -542 550 -542 551 -542 552 -542 553 -542 554 -544 36 -545 190 -546 130 -546 110 -546 184 -546 544 -547 7 -547 8 -547 9 -547 136 -547 100 -547 204 -547 220 -547 244 -547 550 -547 551 -548 7 -548 153 -548 100 -548 220 -548 550 -549 7 -549 136 -549 137 -549 110 -549 141 -549 175 -550 3 -550 20 -550 50 -550 110 -550 111 -550 138 -550 141 -550 79 -550 143 -550 145 -550 146 -550 149 -550 150 -550 158 -550 184 -550 217 -550 229 -550 233 -550 283 -550 205 -550 295 -550 99 -550 337 -550 443 -550 458 -550 484 -550 503 -550 546 -550 549 -550 555 -550 556 -551 3 -551 8 -551 9 -551 20 -551 50 -551 130 -551 110 -551 138 -551 146 -551 100 -551 184 -551 203 -551 204 -551 205 -551 176 -551 337 -552 10 -553 7 -553 100 -553 440 -554 4 -554 7 -554 136 -554 137 -554 101 -554 214 -554 217 -554 218 -554 220 -554 332 -554 433 -554 491 -554 509 -554 550 -557 177 -557 196 -557 432 -558 318 -558 559 -560 4 -560 270 -560 360 -561 110 -562 14 -562 15 -562 563 -564 276 -564 505 -564 565 -565 92 -566 5 -566 220 -569 221 -570 141 -570 175 -573 160 -574 14 -574 31 -574 32 -575 47 -577 7 -577 136 -578 4 -578 5 -578 7 -579 461 -580 110 -581 291 -583 25 -584 237 -585 342 -586 7 -586 137 -587 4 -587 16 -587 101 -587 336 -587 337 -589 14 -589 137 -589 244 -589 96 -589 97 -589 246 -589 112 -589 248 -589 99 -589 254 -589 590 -589 591 -593 312 -595 291 -595 434 -596 169 -596 254 -597 5 -597 52 -597 55 -597 220 -597 126 -597 121 -598 312 -599 161 -600 472 -601 94 -602 7 -602 603 -603 7 -604 381 -604 382 -604 383 -604 384 -604 385 -605 96 -605 97 -606 7 -606 475 -608 99 -609 15 -610 237 -611 239 -612 5 -612 68 -612 487 -613 291 -614 7 -614 142 -614 205 -614 286 -615 7 -615 137 -616 291 -616 457 -617 170 -619 8 -619 9 -619 204 -619 220 -619 243 -619 476 -619 515 -619 551 -619 620 -621 137 -621 110 -621 220 -622 67 -622 121 -623 4 -623 7 -623 74 -623 137 -623 101 -623 102 -623 364 -623 406 -624 74 -624 364 -624 406 -625 63 -625 127 -626 5 -627 110 -627 100 -628 7 -629 461 -630 387 -632 336 -632 340 -632 342 -632 585 -633 244 -633 112 -633 247 -633 255 -633 606 \ No newline at end of file diff --git a/misc/sim/merge.py b/misc/sim/merge.py deleted file mode 100644 index 52e4f87..0000000 --- a/misc/sim/merge.py +++ /dev/null @@ -1,62 +0,0 @@ -import glob -import sys -inputDirPath = sys.argv[1] - -inputFilePaths = glob.glob(inputDirPath+"/*") -inputFilePaths.sort() - -merged = dict() - -stretches = [] - -total = 0 -for inputFilePath in inputFilePaths: - print "Processing file {}".format(inputFilePath) - with open(inputFilePath, 'r') as f: - inData = f.readlines() - pathsChecked = 0. - avgStretch = 0. - for line in inData: - dat = line.rstrip('\n').split(' ') - eHops = int(dat[0]) - nHops = int(dat[1]) - count = int(dat[2]) - if eHops not in merged: merged[eHops] = dict() - if nHops not in merged[eHops]: merged[eHops][nHops] = 0 - merged[eHops][nHops] += count - total += count - pathsChecked += count - stretch = float(nHops)/eHops - avgStretch += stretch*count - finStretch = avgStretch / max(1, pathsChecked) - stretches.append(str(finStretch)) - -hopsUsed = 0. -hopsNeeded = 0. -avgStretch = 0. -results = [] -for eHops in sorted(merged.keys()): - for nHops in sorted(merged[eHops].keys()): - count = merged[eHops][nHops] - result = "{} {} {}".format(eHops, nHops, count) - results.append(result) - hopsUsed += nHops*count - hopsNeeded += eHops*count - stretch = float(nHops)/eHops - avgStretch += stretch*count - print result -bandwidthUsage = hopsUsed/max(1, hopsNeeded) -avgStretch /= max(1, total) - -with open("results.txt", "w") as f: - f.write('\n'.join(results)) - -with open("stretches.txt", "w") as f: - f.write('\n'.join(stretches)) - -print "Total files processed: {}".format(len(inputFilePaths)) -print "Total paths found: {}".format(total) -print "Bandwidth usage: {}".format(bandwidthUsage) -print "Average stretch: {}".format(avgStretch) - - diff --git a/misc/sim/run-sim b/misc/sim/run-sim deleted file mode 100755 index 14057e8..0000000 --- a/misc/sim/run-sim +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -go run -tags debug misc/sim/treesim.go "$@" diff --git a/misc/sim/treesim-forward.py b/misc/sim/treesim-forward.py deleted file mode 100644 index f7ca509..0000000 --- a/misc/sim/treesim-forward.py +++ /dev/null @@ -1,901 +0,0 @@ -# Tree routing scheme (named Yggdrasil, after the world tree from Norse mythology) -# Steps: -# 1: Pick any node, here I'm using highest nodeID -# 2: Build spanning tree, each node stores path back to root -# Optionally with weights for each hop -# Ties broken by preferring a parent with higher degree -# 3: Distance metric: self->peer + (via tree) peer->dest -# 4: Perform (modified) greedy lookup via this metric for each direction (A->B and B->A) -# 5: Source-route traffic using the better of those two paths - -# Note: This makes no attempt to simulate a dynamic network -# E.g. A node's peers cannot be disconnected - -# TODO: -# Make better use of drop? -# In particular, we should be ignoring *all* recently dropped *paths* to the root -# To minimize route flapping -# Not really an issue in the sim, but probably needed for a real network - -import array -import gc -import glob -import gzip -import heapq -import os -import random -import time - -############# -# Constants # -############# - -# Reminder of where link cost comes in -LINK_COST = 1 - -# Timeout before dropping something, in simulated seconds -TIMEOUT = 60 - -########### -# Classes # -########### - -class PathInfo: - def __init__(self, nodeID): - self.nodeID = nodeID # e.g. IP - self.coords = [] # Position in tree - self.tstamp = 0 # Timestamp from sender, to keep track of old vs new info - self.degree = 0 # Number of peers the sender has, used to break ties - # The above should be signed - self.path = [nodeID] # Path to node (in path-vector route) - self.time = 0 # Time info was updated, to keep track of e.g. timeouts - self.treeID = nodeID # Hack, let tree use different ID than IP, used so we can dijkstra once and test many roots - def clone(self): - # Return a deep-enough copy of the path - clone = PathInfo(None) - clone.nodeID = self.nodeID - clone.coords = self.coords[:] - clone.tstamp = self.tstamp - clone.degree = self.degree - clone.path = self.path[:] - clone.time = self.time - clone.treeID = self.treeID - return clone -# End class PathInfo - -class Node: - def __init__(self, nodeID): - self.info = PathInfo(nodeID) # Self NodeInfo - self.root = None # PathInfo to node at root of tree - self.drop = dict() # PathInfo to nodes from clus that have timed out - self.peers = dict() # PathInfo to peers - self.links = dict() # Links to peers (to pass messages) - self.msgs = [] # Said messages - self.table = dict() # Pre-computed lookup table of peer info - - def tick(self): - # Do periodic maintenance stuff, including push updates - self.info.time += 1 - if self.info.time > self.info.tstamp + TIMEOUT/4: - # Update timestamp at least once every 1/4 timeout period - # This should probably be randomized in a real implementation - self.info.tstamp = self.info.time - self.info.degree = 0# TODO decide if degree should be used, len(self.peers) - changed = False # Used to track when the network has converged - changed |= self.cleanRoot() - self.cleanDropped() - # Should probably send messages infrequently if there's nothing new to report - if self.info.tstamp == self.info.time: - msg = self.createMessage() - self.sendMessage(msg) - return changed - - def cleanRoot(self): - changed = False - if self.root and self.info.time - self.root.time > TIMEOUT: - print "DEBUG: clean root,", self.root.path - self.drop[self.root.treeID] = self.root - self.root = None - changed = True - if not self.root or self.root.treeID < self.info.treeID: - # No need to drop someone who'se worse than us - self.info.coords = [self.info.nodeID] - self.root = self.info.clone() - changed = True - elif self.root.treeID == self.info.treeID: - self.root = self.info.clone() - return changed - - def cleanDropped(self): - # May actually be a treeID... better to iterate over keys explicitly - nodeIDs = sorted(self.drop.keys()) - for nodeID in nodeIDs: - node = self.drop[nodeID] - if self.info.time - node.time > 4*TIMEOUT: - del self.drop[nodeID] - return None - - def createMessage(self): - # Message is just a tuple - # First element is the sender - # Second element is the root - # We will .clone() everything during the send operation - msg = (self.info, self.root) - return msg - - def sendMessage(self, msg): - for link in self.links.values(): - newMsg = (msg[0].clone(), msg[1].clone()) - link.msgs.append(newMsg) - return None - - def handleMessages(self): - changed = False - while self.msgs: - changed |= self.handleMessage(self.msgs.pop()) - return changed - - def handleMessage(self, msg): - changed = False - for node in msg: - # Update the path and timestamp for the sender and root info - node.path.append(self.info.nodeID) - node.time = self.info.time - # Update the sender's info in our list of peers - sender = msg[0] - self.peers[sender.nodeID] = sender - # Decide if we want to update the root - root = msg[1] - updateRoot = False - isSameParent = False - isBetterParent = False - if len(self.root.path) > 1 and len(root.path) > 1: - parent = self.peers[self.root.path[-2]] - if parent.nodeID == sender.nodeID: isSameParent = True - if sender.degree > parent.degree: - # This would also be where you check path uptime/reliability/whatever - # All else being equal, we prefer parents with high degree - # We are trusting peers to report degree correctly in this case - # So expect some performance reduction if your peers aren't trustworthy - # (Lies can increase average stretch by a few %) - isBetterParent = True - if self.info.nodeID in root.path[:-1]: pass # No loopy routes allowed - elif root.treeID in self.drop and self.drop[root.treeID].tstamp >= root.tstamp: pass - elif not self.root: updateRoot = True - elif self.root.treeID < root.treeID: updateRoot = True - elif self.root.treeID != root.treeID: pass - elif self.root.tstamp > root.tstamp: pass - elif len(root.path) < len(self.root.path): updateRoot = True - elif isBetterParent and len(root.path) == len(self.root.path): updateRoot = True - elif isSameParent and self.root.tstamp < root.tstamp: updateRoot = True - if updateRoot: - if not self.root or self.root.path != root.path: changed = True - self.root = root - self.info.coords = self.root.path - return changed - - def lookup(self, dest): - # Note: Can loop in an unconverged network - # The person looking up the route is responsible for checking for loops - best = None - bestDist = 0 - for node in self.peers.itervalues(): - # dist = distance to node + dist (on tree) from node to dest - dist = len(node.path)-1 + treeDist(node.coords, dest.coords) - if not best or dist < bestDist: - best = node - bestDist = dist - if best: - next = best.path[-2] - assert next in self.peers - return next - else: - # We failed to look something up - # TODO some way to signal this which doesn't crash - assert False - - def initTable(self): - # Pre-computes a lookup table for destination coords - # Insert parent first so you prefer them as a next-hop - self.table.clear() - parent = self.info.nodeID - if len(self.info.coords) >= 2: parent = self.info.coords[-2] - for peer in self.peers.itervalues(): - current = self.table - for coord in peer.coords: - if coord not in current: current[coord] = (peer.nodeID, dict()) - old = current[coord] - next = old[1] - oldPeer = self.peers[old[0]] - oldDist = len(oldPeer.coords) - oldDeg = oldPeer.degree - newDist = len(peer.coords) - newDeg = peer.degree - # Prefer parent - # Else prefer short distance from root - # If equal distance, prefer high degree - if peer.nodeID == parent: current[coord] = (peer.nodeID, next) - elif newDist < oldDist: current[coord] = (peer.nodeID, next) - elif newDist == oldDist and newDeg > oldDeg: current[coord] = (peer.nodeID, next) - current = next - return None - - def lookup_new(self, dest): - # Use pre-computed lookup table to look up next hop for dest coords - assert self.table - if len(self.info.coords) >= 2: parent = self.info.coords[-2] - else: parent = None - current = (parent, self.table) - c = None - for coord in dest.coords: - c = coord - if coord not in current[1]: break - current = current[1][coord] - next = current[0] - if c in self.peers: next = c - if next not in self.peers: - assert next == None - # You're the root of a different connected component - # You'd drop the packet in this case - # To make the path cache not die, need to return a valid next hop... - # Returning self for that reason - next = self.info.nodeID - return next -# End class Node - -#################### -# Helper Functions # -#################### - -def getIndexOfLCA(source, dest): - # Return index of last common ancestor in source/dest coords - # -1 if no common ancestor (e.g. different roots) - lcaIdx = -1 - minLen = min(len(source), len(dest)) - for idx in xrange(minLen): - if source[idx] == dest[idx]: lcaIdx = idx - else: break - return lcaIdx - -def treePath(source, dest): - # Return path with source at head and dest at tail - lastMatch = getIndexOfLCA(source, dest) - path = dest[-1:lastMatch:-1] + source[lastMatch:] - assert path[0] == dest[-1] - assert path[-1] == source[-1] - return path - -def treeDist(source, dest): - dist = len(source) + len(dest) - lcaIdx = getIndexOfLCA(source, dest) - dist -= 2*(lcaIdx+1) - return dist - -def dijkstra(nodestore, startingNodeID): - # Idea to use heapq and basic implementation taken from stackexchange post - # http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python - nodeIDs = sorted(nodestore.keys()) - nNodes = len(nodeIDs) - idxs = dict() - for nodeIdx in xrange(nNodes): - nodeID = nodeIDs[nodeIdx] - idxs[nodeID] = nodeIdx - dists = array.array("H", [0]*nNodes) - queue = [(0, startingNodeID)] - while queue: - dist, nodeID = heapq.heappop(queue) - idx = idxs[nodeID] - if not dists[idx]: # Unvisited, otherwise we skip it - dists[idx] = dist - for peer in nodestore[nodeID].links: - if not dists[idxs[peer]]: - # Peer is also unvisited, so add to queue - heapq.heappush(queue, (dist+LINK_COST, peer)) - return dists - -def dijkstrall(nodestore): - # Idea to use heapq and basic implementation taken from stackexchange post - # http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python - nodeIDs = sorted(nodestore.keys()) - nNodes = len(nodeIDs) - idxs = dict() - for nodeIdx in xrange(nNodes): - nodeID = nodeIDs[nodeIdx] - idxs[nodeID] = nodeIdx - dists = array.array("H", [0]*nNodes*nNodes) # use GetCacheIndex(nNodes, start, end) - for sourceIdx in xrange(nNodes): - print "Finding shortest paths for node {} / {} ({})".format(sourceIdx+1, nNodes, nodeIDs[sourceIdx]) - queue = [(0, sourceIdx)] - while queue: - dist, nodeIdx = heapq.heappop(queue) - distIdx = getCacheIndex(nNodes, sourceIdx, nodeIdx) - if not dists[distIdx]: # Unvisited, otherwise we skip it - dists[distIdx] = dist - for peer in nodestore[nodeIDs[nodeIdx]].links: - pIdx = idxs[peer] - pdIdx = getCacheIndex(nNodes, sourceIdx, pIdx) - if not dists[pdIdx]: - # Peer is also unvisited, so add to queue - heapq.heappush(queue, (dist+LINK_COST, pIdx)) - return dists - -def linkNodes(node1, node2): - node1.links[node2.info.nodeID] = node2 - node2.links[node1.info.nodeID] = node1 - -############################ -# Store topology functions # -############################ - -def makeStoreSquareGrid(sideLength, randomize=True): - # Simple grid in a sideLength*sideLength square - # Just used to validate that the code runs - store = dict() - nodeIDs = list(range(sideLength*sideLength)) - if randomize: random.shuffle(nodeIDs) - for nodeID in nodeIDs: - store[nodeID] = Node(nodeID) - for index in xrange(len(nodeIDs)): - if (index % sideLength != 0): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-1]]) - if (index >= sideLength): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-sideLength]]) - print "Grid store created, size {}".format(len(store)) - return store - -def makeStoreASRelGraph(pathToGraph): - #Existing network graphs, in caida.org's asrel format (ASx|ASy|z per line, z denotes relationship type) - with open(pathToGraph, "r") as f: - inData = f.readlines() - store = dict() - for line in inData: - if line.strip()[0] == "#": continue # Skip comment lines - line = line.replace('|'," ") - nodes = map(int, line.split()[0:2]) - if nodes[0] not in store: store[nodes[0]] = Node(nodes[0]) - if nodes[1] not in store: store[nodes[1]] = Node(nodes[1]) - linkNodes(store[nodes[0]], store[nodes[1]]) - print "CAIDA AS-relation graph successfully imported, size {}".format(len(store)) - return store - -def makeStoreASRelGraphMaxDeg(pathToGraph, degIdx=0): - with open(pathToGraph, "r") as f: - inData = f.readlines() - store = dict() - nodeDeg = dict() - for line in inData: - if line.strip()[0] == "#": continue # Skip comment lines - line = line.replace('|'," ") - nodes = map(int, line.split()[0:2]) - if nodes[0] not in nodeDeg: nodeDeg[nodes[0]] = 0 - if nodes[1] not in nodeDeg: nodeDeg[nodes[1]] = 0 - nodeDeg[nodes[0]] += 1 - nodeDeg[nodes[1]] += 1 - sortedNodes = sorted(nodeDeg.keys(), \ - key=lambda x: (nodeDeg[x], x), \ - reverse=True) - maxDegNodeID = sortedNodes[degIdx] - return makeStoreASRelGraphFixedRoot(pathToGraph, maxDegNodeID) - -def makeStoreASRelGraphFixedRoot(pathToGraph, rootNodeID): - with open(pathToGraph, "r") as f: - inData = f.readlines() - store = dict() - for line in inData: - if line.strip()[0] == "#": continue # Skip comment lines - line = line.replace('|'," ") - nodes = map(int, line.split()[0:2]) - if nodes[0] not in store: - store[nodes[0]] = Node(nodes[0]) - if nodes[0] == rootNodeID: store[nodes[0]].info.treeID += 1000000000 - if nodes[1] not in store: - store[nodes[1]] = Node(nodes[1]) - if nodes[1] == rootNodeID: store[nodes[1]].info.treeID += 1000000000 - linkNodes(store[nodes[0]], store[nodes[1]]) - print "CAIDA AS-relation graph successfully imported, size {}".format(len(store)) - return store - -def makeStoreDimesEdges(pathToGraph, rootNodeID=None): - # Read from a DIMES csv-formatted graph from a gzip file - store = dict() - with gzip.open(pathToGraph, "r") as f: - inData = f.readlines() - size = len(inData) - index = 0 - for edge in inData: - if not index % 1000: - pct = 100.0*index/size - print "Processing edge {}, {:.2f}%".format(index, pct) - index += 1 - dat = edge.rstrip().split(',') - node1 = "N" + str(dat[0].strip()) - node2 = "N" + str(dat[1].strip()) - if '?' in node1 or '?' in node2: continue #Unknown node - if node1 == rootNodeID: node1 = "R" + str(dat[0].strip()) - if node2 == rootNodeID: node2 = "R" + str(dat[1].strip()) - if node1 not in store: store[node1] = Node(node1) - if node2 not in store: store[node2] = Node(node2) - if node1 != node2: linkNodes(store[node1], store[node2]) - print "DIMES graph successfully imported, size {}".format(len(store)) - return store - -def makeStoreGeneratedGraph(pathToGraph, root=None): - with open(pathToGraph, "r") as f: - inData = f.readlines() - store = dict() - for line in inData: - if line.strip()[0] == "#": continue # Skip comment lines - nodes = map(int, line.strip().split(' ')[0:2]) - node1 = nodes[0] - node2 = nodes[1] - if node1 == root: node1 += 1000000 - if node2 == root: node2 += 1000000 - if node1 not in store: store[node1] = Node(node1) - if node2 not in store: store[node2] = Node(node2) - linkNodes(store[node1], store[node2]) - print "Generated graph successfully imported, size {}".format(len(store)) - return store - - -############################################ -# Functions used as parts of network tests # -############################################ - -def idleUntilConverged(store): - nodeIDs = sorted(store.keys()) - timeOfLastChange = 0 - step = 0 - # Idle until the network has converged - while step - timeOfLastChange < 4*TIMEOUT: - step += 1 - print "Step: {}, last change: {}".format(step, timeOfLastChange) - changed = False - for nodeID in nodeIDs: - # Update node status, send messages - changed |= store[nodeID].tick() - for nodeID in nodeIDs: - # Process messages - changed |= store[nodeID].handleMessages() - if changed: timeOfLastChange = step - initTables(store) - return store - -def getCacheIndex(nodes, sourceIndex, destIndex): - return sourceIndex*nodes + destIndex - -def initTables(store): - nodeIDs = sorted(store.keys()) - nNodes = len(nodeIDs) - print "Initializing routing tables for {} nodes".format(nNodes) - for idx in xrange(nNodes): - nodeID = nodeIDs[idx] - store[nodeID].initTable() - print "Routing tables initialized" - return None - -def getCache(store): - nodeIDs = sorted(store.keys()) - nNodes = len(nodeIDs) - nodeIdxs = dict() - for nodeIdx in xrange(nNodes): - nodeIdxs[nodeIDs[nodeIdx]] = nodeIdx - cache = array.array("H", [0]*nNodes*nNodes) - for sourceIdx in xrange(nNodes): - sourceID = nodeIDs[sourceIdx] - print "Building fast lookup table for node {} / {} ({})".format(sourceIdx+1, nNodes, sourceID) - for destIdx in xrange(nNodes): - destID = nodeIDs[destIdx] - if sourceID == destID: nextHop = destID # lookup would fail - else: nextHop = store[sourceID].lookup(store[destID].info) - nextHopIdx = nodeIdxs[nextHop] - cache[getCacheIndex(nNodes, sourceIdx, destIdx)] = nextHopIdx - return cache - -def testPaths(store, dists): - cache = getCache(store) - nodeIDs = sorted(store.keys()) - nNodes = len(nodeIDs) - idxs = dict() - for nodeIdx in xrange(nNodes): - nodeID = nodeIDs[nodeIdx] - idxs[nodeID] = nodeIdx - results = dict() - for sourceIdx in xrange(nNodes): - sourceID = nodeIDs[sourceIdx] - print "Testing paths from node {} / {} ({})".format(sourceIdx+1, len(nodeIDs), sourceID) - #dists = dijkstra(store, sourceID) - for destIdx in xrange(nNodes): - destID = nodeIDs[destIdx] - if destID == sourceID: continue # Skip self - distIdx = getCacheIndex(nNodes, sourceIdx, destIdx) - eHops = dists[distIdx] - if not eHops: continue # The network is split, no path exists - hops = 0 - for pair in ((sourceIdx, destIdx),): - nHops = 0 - locIdx = pair[0] - dIdx = pair[1] - while locIdx != dIdx: - locIdx = cache[getCacheIndex(nNodes, locIdx, dIdx)] - nHops += 1 - if not hops or nHops < hops: hops = nHops - if eHops not in results: results[eHops] = dict() - if hops not in results[eHops]: results[eHops][hops] = 0 - results[eHops][hops] += 1 - return results - -def getAvgStretch(pathMatrix): - avgStretch = 0. - checked = 0. - for eHops in sorted(pathMatrix.keys()): - for nHops in sorted(pathMatrix[eHops].keys()): - count = pathMatrix[eHops][nHops] - stretch = float(nHops)/float(max(1, eHops)) - avgStretch += stretch*count - checked += count - avgStretch /= max(1, checked) - return avgStretch - -def getMaxStretch(pathMatrix): - maxStretch = 0. - for eHops in sorted(pathMatrix.keys()): - for nHops in sorted(pathMatrix[eHops].keys()): - stretch = float(nHops)/float(max(1, eHops)) - maxStretch = max(maxStretch, stretch) - return maxStretch - -def getCertSizes(store): - # Returns nCerts frequency distribution - # De-duplicates common certs (for shared prefixes in the path) - sizes = dict() - for node in store.values(): - certs = set() - for peer in node.peers.values(): - pCerts = set() - assert len(peer.path) == 2 - assert peer.coords[-1] == peer.path[0] - hops = peer.coords + peer.path[1:] - for hopIdx in xrange(len(hops)-1): - send = hops[hopIdx] - if send == node.info.nodeID: continue # We created it, already have it - path = hops[0:hopIdx+2] - # Each cert is signed by the sender - # Includes information about the path from the sender to the next hop - # Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2] - cert = "{}:{}".format(send, path) - certs.add(cert) - size = len(certs) - if size not in sizes: sizes[size] = 0 - sizes[size] += 1 - return sizes - -def getMinLinkCertSizes(store): - # Returns nCerts frequency distribution - # De-duplicates common certs (for shared prefixes in the path) - # Based on the minimum number of certs that must be traded through a particular link - # Handled per link - sizes = dict() - for node in store.values(): - peerCerts = dict() - for peer in node.peers.values(): - pCerts = set() - assert len(peer.path) == 2 - assert peer.coords[-1] == peer.path[0] - hops = peer.coords + peer.path[1:] - for hopIdx in xrange(len(hops)-1): - send = hops[hopIdx] - if send == node.info.nodeID: continue # We created it, already have it - path = hops[0:hopIdx+2] - # Each cert is signed by the sender - # Includes information about the path from the sender to the next hop - # Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2] - cert = "{}:{}".format(send, path) - pCerts.add(cert) - peerCerts[peer.nodeID] = pCerts - for peer in peerCerts: - size = 0 - pCerts = peerCerts[peer] - for cert in pCerts: - required = True - for p2 in peerCerts: - if p2 == peer: continue - p2Certs = peerCerts[p2] - if cert in p2Certs: required = False - if required: size += 1 - if size not in sizes: sizes[size] = 0 - sizes[size] += 1 - return sizes - -def getPathSizes(store): - # Returns frequency distribution of the total number of hops the routing table - # I.e. a node with 3 peers, each with 5 hop coord+path, would count as 3x5=15 - sizes = dict() - for node in store.values(): - size = 0 - for peer in node.peers.values(): - assert len(peer.path) == 2 - assert peer.coords[-1] == peer.path[0] - peerSize = len(peer.coords) + len(peer.path) - 1 # double-counts peer, -1 - size += peerSize - if size not in sizes: sizes[size] = 0 - sizes[size] += 1 - return sizes - -def getPeerSizes(store): - # Returns frequency distribution of the number of peers each node has - sizes = dict() - for node in store.values(): - nPeers = len(node.peers) - if nPeers not in sizes: sizes[nPeers] = 0 - sizes[nPeers] += 1 - return sizes - -def getAvgSize(sizes): - sumSizes = 0 - nNodes = 0 - for size in sizes: - count = sizes[size] - sumSizes += size*count - nNodes += count - avgSize = float(sumSizes)/max(1, nNodes) - return avgSize - -def getMaxSize(sizes): - return max(sizes.keys()) - -def getMinSize(sizes): - return min(sizes.keys()) - -def getResults(pathMatrix): - results = [] - for eHops in sorted(pathMatrix.keys()): - for nHops in sorted(pathMatrix[eHops].keys()): - count = pathMatrix[eHops][nHops] - results.append("{} {} {}".format(eHops, nHops, count)) - return '\n'.join(results) - -#################################### -# Functions to run different tests # -#################################### - -def runTest(store): - # Runs the usual set of tests on the store - # Does not save results, so only meant for quick tests - # To e.g. check the code works, maybe warm up the pypy jit - for node in store.values(): - node.info.time = random.randint(0, TIMEOUT) - node.info.tstamp = TIMEOUT - print "Begin testing network" - dists = None - if not dists: dists = dijkstrall(store) - idleUntilConverged(store) - pathMatrix = testPaths(store, dists) - avgStretch = getAvgStretch(pathMatrix) - maxStretch = getMaxStretch(pathMatrix) - peers = getPeerSizes(store) - certs = getCertSizes(store) - paths = getPathSizes(store) - linkCerts = getMinLinkCertSizes(store) - avgPeerSize = getAvgSize(peers) - maxPeerSize = getMaxSize(peers) - avgCertSize = getAvgSize(certs) - maxCertSize = getMaxSize(certs) - avgPathSize = getAvgSize(paths) - maxPathSize = getMaxSize(paths) - avgLinkCert = getAvgSize(linkCerts) - maxLinkCert = getMaxSize(linkCerts) - totalCerts = sum(map(lambda x: x*certs[x], certs.keys())) - totalLinks = sum(map(lambda x: x*peers[x], peers.keys())) # one-way links - avgCertsPerLink = float(totalCerts)/max(1, totalLinks) - print "Finished testing network" - print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch) - print "Avg / Max nPeers size: {} / {}".format(avgPeerSize, maxPeerSize) - print "Avg / Max nCerts size: {} / {}".format(avgCertSize, maxCertSize) - print "Avg / Max total hops in any node's routing table: {} / {}".format(avgPathSize, maxPathSize) - print "Avg / Max lower bound cert requests per link (one-way): {} / {}".format(avgLinkCert, maxLinkCert) - print "Avg certs per link (one-way): {}".format(avgCertsPerLink) - return # End of function - -def rootNodeASTest(path, outDir="output-treesim-AS", dists=None, proc = 1): - # Checks performance for every possible choice of root node - # Saves output for each root node to a separate file on disk - # path = input path to some caida.org formatted AS-relationship graph - if not os.path.exists(outDir): os.makedirs(outDir) - assert os.path.exists(outDir) - store = makeStoreASRelGraph(path) - nodes = sorted(store.keys()) - for nodeIdx in xrange(len(nodes)): - if nodeIdx % proc != 0: continue # Work belongs to someone else - rootNodeID = nodes[nodeIdx] - outpath = outDir+"/{}".format(rootNodeID) - if os.path.exists(outpath): - print "Skipping {}, already processed".format(rootNodeID) - continue - store = makeStoreASRelGraphFixedRoot(path, rootNodeID) - for node in store.values(): - node.info.time = random.randint(0, TIMEOUT) - node.info.tstamp = TIMEOUT - print "Beginning {}, size {}".format(nodeIdx, len(store)) - if not dists: dists = dijkstrall(store) - idleUntilConverged(store) - pathMatrix = testPaths(store, dists) - avgStretch = getAvgStretch(pathMatrix) - maxStretch = getMaxStretch(pathMatrix) - results = getResults(pathMatrix) - with open(outpath, "w") as f: - f.write(results) - print "Finished test for root AS {} ({} / {})".format(rootNodeID, nodeIdx+1, len(store)) - print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch) - #break # Stop after 1, because they can take forever - return # End of function - -def timelineASTest(): - # Meant to study the performance of the network as a function of network size - # Loops over a set of AS-relationship graphs - # Runs a test on each graph, selecting highest-degree node as the root - # Saves results for each graph to a separate file on disk - outDir = "output-treesim-timeline-AS" - if not os.path.exists(outDir): os.makedirs(outDir) - assert os.path.exists(outDir) - paths = sorted(glob.glob("asrel/datasets/*")) - for path in paths: - date = os.path.basename(path).split(".")[0] - outpath = outDir+"/{}".format(date) - if os.path.exists(outpath): - print "Skipping {}, already processed".format(date) - continue - store = makeStoreASRelGraphMaxDeg(path) - dists = None - for node in store.values(): - node.info.time = random.randint(0, TIMEOUT) - node.info.tstamp = TIMEOUT - print "Beginning {}, size {}".format(date, len(store)) - if not dists: dists = dijkstrall(store) - idleUntilConverged(store) - pathMatrix = testPaths(store, dists) - avgStretch = getAvgStretch(pathMatrix) - maxStretch = getMaxStretch(pathMatrix) - results = getResults(pathMatrix) - with open(outpath, "w") as f: - f.write(results) - print "Finished {} with {} nodes".format(date, len(store)) - print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch) - #break # Stop after 1, because they can take forever - return # End of function - -def timelineDimesTest(): - # Meant to study the performance of the network as a function of network size - # Loops over a set of AS-relationship graphs - # Runs a test on each graph, selecting highest-degree node as the root - # Saves results for each graph to a separate file on disk - outDir = "output-treesim-timeline-dimes" - if not os.path.exists(outDir): os.makedirs(outDir) - assert os.path.exists(outDir) - # Input files are named ASEdgesX_Y where X = month (no leading 0), Y = year - paths = sorted(glob.glob("DIMES/ASEdges/*.gz")) - exists = set(glob.glob(outDir+"/*")) - for path in paths: - date = os.path.basename(path).split(".")[0] - outpath = outDir+"/{}".format(date) - if outpath in exists: - print "Skipping {}, already processed".format(date) - continue - store = makeStoreDimesEdges(path) - # Get the highest degree node and make it root - # Sorted by nodeID just to make it stable in the event of a tie - nodeIDs = sorted(store.keys()) - bestRoot = "" - bestDeg = 0 - for nodeID in nodeIDs: - node = store[nodeID] - if len(node.links) > bestDeg: - bestRoot = nodeID - bestDeg = len(node.links) - assert bestRoot - store = makeStoreDimesEdges(path, bestRoot) - rootID = "R" + bestRoot[1:] - assert rootID in store - # Don't forget to set random seed before setting times - # To make results reproducible - nodeIDs = sorted(store.keys()) - random.seed(12345) - for nodeID in nodeIDs: - node = store[nodeID] - node.info.time = random.randint(0, TIMEOUT) - node.info.tstamp = TIMEOUT - print "Beginning {}, size {}".format(date, len(store)) - if not dists: dists = dijkstrall(store) - idleUntilConverged(store) - pathMatrix = testPaths(store, dists) - avgStretch = getAvgStretch(pathMatrix) - maxStretch = getMaxStretch(pathMatrix) - results = getResults(pathMatrix) - with open(outpath, "w") as f: - f.write(results) - print "Finished {} with {} nodes".format(date, len(store)) - print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch) - break # Stop after 1, because they can take forever - return # End of function - -def scalingTest(maxTests=None, inputDir="graphs"): - # Meant to study the performance of the network as a function of network size - # Loops over a set of nodes in a previously generated graph - # Runs a test on each graph, testing each node as the root - # if maxTests is set, tests only that number of roots (highest degree first) - # Saves results for each graph to a separate file on disk - outDir = "output-treesim-{}".format(inputDir) - if not os.path.exists(outDir): os.makedirs(outDir) - assert os.path.exists(outDir) - paths = sorted(glob.glob("{}/*".format(inputDir))) - exists = set(glob.glob(outDir+"/*")) - for path in paths: - gc.collect() # pypy waits for gc to close files - graph = os.path.basename(path).split(".")[0] - store = makeStoreGeneratedGraph(path) - # Get the highest degree node and make it root - # Sorted by nodeID just to make it stable in the event of a tie - nodeIDs = sorted(store.keys(), key=lambda x: len(store[x].links), reverse=True) - dists = None - if maxTests: nodeIDs = nodeIDs[:maxTests] - for nodeID in nodeIDs: - nodeIDStr = str(nodeID).zfill(len(str(len(store)-1))) - outpath = outDir+"/{}-{}".format(graph, nodeIDStr) - if outpath in exists: - print "Skipping {}-{}, already processed".format(graph, nodeIDStr) - continue - store = makeStoreGeneratedGraph(path, nodeID) - # Don't forget to set random seed before setting times - random.seed(12345) # To make results reproducible - nIDs = sorted(store.keys()) - for nID in nIDs: - node = store[nID] - node.info.time = random.randint(0, TIMEOUT) - node.info.tstamp = TIMEOUT - print "Beginning {}, size {}".format(graph, len(store)) - if not dists: dists = dijkstrall(store) - idleUntilConverged(store) - pathMatrix = testPaths(store, dists) - avgStretch = getAvgStretch(pathMatrix) - maxStretch = getMaxStretch(pathMatrix) - results = getResults(pathMatrix) - with open(outpath, "w") as f: - f.write(results) - print "Finished {} with {} nodes for root {}".format(graph, len(store), nodeID) - print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch) - return # End of function - -################## -# Main Execution # -################## - -if __name__ == "__main__": - if True: # Run a quick test - random.seed(12345) # DEBUG - store = makeStoreSquareGrid(4) - runTest(store) # Quick test - store = None - # Do some real work - #runTest(makeStoreDimesEdges("DIMES/ASEdges/ASEdges1_2007.csv.gz")) - #timelineDimesTest() - #rootNodeASTest("asrel/datasets/19980101.as-rel.txt") - #timelineASTest() - #rootNodeASTest("hype-2016-09-19.list", "output-treesim-hype") - #scalingTest(None, "graphs-20") # First argument 1 to only test 1 root per graph - #store = makeStoreGeneratedGraph("bgp_tables") - #store = makeStoreGeneratedGraph("skitter") - #store = makeStoreASRelGraphMaxDeg("hype-2016-09-19.list") #http://hia.cjdns.ca/watchlist/c/walk.peers.20160919 - #store = makeStoreGeneratedGraph("fc00-2017-08-12.txt") - if store: runTest(store) - #rootNodeASTest("skitter", "output-treesim-skitter", None, 0, 1) - #scalingTest(1, "graphs-20") # First argument 1 to only test 1 root per graph - #scalingTest(1, "graphs-21") # First argument 1 to only test 1 root per graph - #scalingTest(1, "graphs-22") # First argument 1 to only test 1 root per graph - #scalingTest(1, "graphs-23") # First argument 1 to only test 1 root per graph - if not store: - import sys - args = sys.argv - if len(args) == 2: - job_number = int(sys.argv[1]) - rootNodeASTest("fc00-2017-08-12.txt", "fc00", None, job_number) - else: - print "Usage: {} job_number".format(args[0]) - print "job_number = which job set to run on this node (1-indexed)" - diff --git a/misc/sim/treesim.go b/misc/sim/treesim.go deleted file mode 100644 index 22cf881..0000000 --- a/misc/sim/treesim.go +++ /dev/null @@ -1,459 +0,0 @@ -// +build !lint - -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "runtime" - "runtime/pprof" - "strconv" - "strings" - "time" - - "github.com/gologme/log" - - . "github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil" - - . "github.com/yggdrasil-network/yggdrasil-go/src/crypto" -) - -//////////////////////////////////////////////////////////////////////////////// - -type Node struct { - index int - core Core - send chan<- []byte - recv <-chan []byte -} - -func (n *Node) init(index int) { - n.index = index - n.core.Init() - n.send = n.core.DEBUG_getSend() - n.recv = n.core.DEBUG_getRecv() - n.core.DEBUG_simFixMTU() -} - -func (n *Node) printTraffic() { - for { - packet := <-n.recv - fmt.Println(n.index, packet) - //panic("Got a packet") - } -} - -func (n *Node) startPeers() { - //for _, p := range n.core.Peers.Ports { - // go p.MainLoop() - //} - //go n.printTraffic() - //n.core.Peers.DEBUG_startPeers() -} - -func linkNodes(m, n *Node) { - // Don't allow duplicates - if m.core.DEBUG_getPeers().DEBUG_hasPeer(n.core.DEBUG_getSigningPublicKey()) { - return - } - // Create peers - // Buffering reduces packet loss in the sim - // This slightly speeds up testing (fewer delays before retrying a ping) - pLinkPub, pLinkPriv := m.core.DEBUG_newBoxKeys() - qLinkPub, qLinkPriv := m.core.DEBUG_newBoxKeys() - p := m.core.DEBUG_getPeers().DEBUG_newPeer(n.core.DEBUG_getEncryptionPublicKey(), - n.core.DEBUG_getSigningPublicKey(), *m.core.DEBUG_getSharedKey(pLinkPriv, qLinkPub)) - q := n.core.DEBUG_getPeers().DEBUG_newPeer(m.core.DEBUG_getEncryptionPublicKey(), - m.core.DEBUG_getSigningPublicKey(), *n.core.DEBUG_getSharedKey(qLinkPriv, pLinkPub)) - DEBUG_simLinkPeers(p, q) - return -} - -func makeStoreSquareGrid(sideLength int) map[int]*Node { - store := make(map[int]*Node) - nNodes := sideLength * sideLength - idxs := make([]int, 0, nNodes) - // TODO shuffle nodeIDs - for idx := 1; idx <= nNodes; idx++ { - idxs = append(idxs, idx) - } - for _, idx := range idxs { - node := &Node{} - node.init(idx) - store[idx] = node - } - for idx := 0; idx < nNodes; idx++ { - if (idx % sideLength) != 0 { - linkNodes(store[idxs[idx]], store[idxs[idx-1]]) - } - if idx >= sideLength { - linkNodes(store[idxs[idx]], store[idxs[idx-sideLength]]) - } - } - //for _, node := range store { node.initPorts() } - return store -} - -func makeStoreStar(nNodes int) map[int]*Node { - store := make(map[int]*Node) - center := &Node{} - center.init(0) - store[0] = center - for idx := 1; idx < nNodes; idx++ { - node := &Node{} - node.init(idx) - store[idx] = node - linkNodes(center, node) - } - return store -} - -func loadGraph(path string) map[int]*Node { - f, err := os.Open(path) - if err != nil { - panic(err) - } - defer f.Close() - store := make(map[int]*Node) - s := bufio.NewScanner(f) - for s.Scan() { - line := s.Text() - nodeIdxstrs := strings.Split(line, " ") - nodeIdx0, _ := strconv.Atoi(nodeIdxstrs[0]) - nodeIdx1, _ := strconv.Atoi(nodeIdxstrs[1]) - if store[nodeIdx0] == nil { - node := &Node{} - node.init(nodeIdx0) - store[nodeIdx0] = node - } - if store[nodeIdx1] == nil { - node := &Node{} - node.init(nodeIdx1) - store[nodeIdx1] = node - } - linkNodes(store[nodeIdx0], store[nodeIdx1]) - } - //for _, node := range store { node.initPorts() } - return store -} - -//////////////////////////////////////////////////////////////////////////////// - -func startNetwork(store map[[32]byte]*Node) { - for _, node := range store { - node.startPeers() - } -} - -func getKeyedStore(store map[int]*Node) map[[32]byte]*Node { - newStore := make(map[[32]byte]*Node) - for _, node := range store { - newStore[node.core.DEBUG_getSigningPublicKey()] = node - } - return newStore -} - -func testPaths(store map[[32]byte]*Node) bool { - nNodes := len(store) - count := 0 - for _, source := range store { - count++ - fmt.Printf("Testing paths from node %d / %d (%d)\n", count, nNodes, source.index) - for _, dest := range store { - //if source == dest { continue } - destLoc := dest.core.DEBUG_getLocator() - coords := destLoc.DEBUG_getCoords() - temp := 0 - ttl := ^uint64(0) - oldTTL := ttl - for here := source; here != dest; { - temp++ - if temp > 4096 { - fmt.Println("Loop?") - time.Sleep(time.Second) - return false - } - nextPort := here.core.DEBUG_switchLookup(coords) - // First check if "here" is accepting packets from the previous node - // TODO explain how this works - ports := here.core.DEBUG_getPeers().DEBUG_getPorts() - nextPeer := ports[nextPort] - if nextPeer == nil { - fmt.Println("Peer associated with next port is nil") - return false - } - next := store[nextPeer.DEBUG_getSigKey()] - /* - if next == here { - //for idx, link := range here.links { - // fmt.Println("DUMP:", idx, link.nodeID) - //} - if nextPort != 0 { panic("This should not be") } - fmt.Println("Failed to route:", source.index, here.index, dest.index, oldTTL, ttl) - //here.table.DEBUG_dumpTable() - //fmt.Println("Ports:", here.nodeID, here.ports) - return false - panic(fmt.Sprintln("Routing Loop:", - source.index, - here.index, - dest.index)) - } - */ - if temp > 4090 { - fmt.Println("DEBUG:", - source.index, source.core.DEBUG_getLocator(), - here.index, here.core.DEBUG_getLocator(), - dest.index, dest.core.DEBUG_getLocator()) - //here.core.DEBUG_getSwitchTable().DEBUG_dumpTable() - } - if here != source { - // This is sufficient to check for routing loops or blackholes - //break - } - if here == next { - fmt.Println("Drop:", source.index, here.index, dest.index, oldTTL) - return false - } - here = next - } - } - } - return true -} - -func stressTest(store map[[32]byte]*Node) { - fmt.Println("Stress testing network...") - nNodes := len(store) - dests := make([][]byte, 0, nNodes) - for _, dest := range store { - loc := dest.core.DEBUG_getLocator() - coords := loc.DEBUG_getCoords() - dests = append(dests, coords) - } - lookups := 0 - start := time.Now() - for _, source := range store { - for _, coords := range dests { - source.core.DEBUG_switchLookup(coords) - lookups++ - } - } - timed := time.Since(start) - fmt.Printf("%d lookups in %s (%f lookups per second)\n", - lookups, - timed, - float64(lookups)/timed.Seconds()) -} - -func pingNodes(store map[[32]byte]*Node) { - fmt.Println("Sending pings...") - nNodes := len(store) - count := 0 - equiv := func(a []byte, b []byte) bool { - if len(a) != len(b) { - return false - } - for idx := 0; idx < len(a); idx++ { - if a[idx] != b[idx] { - return false - } - } - return true - } - for _, source := range store { - count++ - //if count > 16 { break } - fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index) - sourceKey := source.core.DEBUG_getEncryptionPublicKey() - payload := sourceKey[:] - sourceAddr := source.core.DEBUG_getAddr()[:] - sendTo := func(bs []byte, destAddr []byte) { - packet := make([]byte, 40+len(bs)) - copy(packet[8:24], sourceAddr) - copy(packet[24:40], destAddr) - copy(packet[40:], bs) - packet[0] = 6 << 4 - source.send <- packet - } - destCount := 0 - for _, dest := range store { - destCount += 1 - fmt.Printf("%d Nodes, %d Send, %d Recv\n", nNodes, count, destCount) - if dest == source { - fmt.Println("Skipping self") - continue - } - destAddr := dest.core.DEBUG_getAddr()[:] - ticker := time.NewTicker(150 * time.Millisecond) - sendTo(payload, destAddr) - for loop := true; loop; { - select { - case packet := <-dest.recv: - { - if equiv(payload, packet[len(packet)-len(payload):]) { - loop = false - } - } - case <-ticker.C: - sendTo(payload, destAddr) - //dumpDHTSize(store) // note that this uses racey functions to read things... - } - } - ticker.Stop() - } - //break // Only try sending pings from 1 node - // This is because, for some reason, stopTun() doesn't always close it - // And if two tuns are up, bad things happen (sends via wrong interface) - } - fmt.Println("Finished pinging nodes") -} - -func pingBench(store map[[32]byte]*Node) { - fmt.Println("Benchmarking pings...") - nPings := 0 - payload := make([]byte, 1280+40) // MTU + ipv6 header - var timed time.Duration - //nNodes := len(store) - count := 0 - for _, source := range store { - count++ - //fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index) - getPing := func(key [32]byte, decodedCoords []byte) []byte { - // TODO write some function to do this the right way, put... somewhere... - coords := DEBUG_wire_encode_coords(decodedCoords) - packet := make([]byte, 0, len(key)+len(coords)+len(payload)) - packet = append(packet, key[:]...) - packet = append(packet, coords...) - packet = append(packet, payload[:]...) - return packet - } - for _, dest := range store { - key := dest.core.DEBUG_getEncryptionPublicKey() - loc := dest.core.DEBUG_getLocator() - coords := loc.DEBUG_getCoords() - ping := getPing(key, coords) - // TODO make sure the session is open first - start := time.Now() - for i := 0; i < 1000000; i++ { - source.send <- ping - nPings++ - } - timed += time.Since(start) - break - } - break - } - fmt.Printf("Sent %d pings in %s (%f per second)\n", - nPings, - timed, - float64(nPings)/timed.Seconds()) -} - -func dumpStore(store map[NodeID]*Node) { - for _, node := range store { - fmt.Println("DUMPSTORE:", node.index, node.core.DEBUG_getLocator()) - node.core.DEBUG_getSwitchTable().DEBUG_dumpTable() - } -} - -func dumpDHTSize(store map[[32]byte]*Node) { - var min, max, sum int - for _, node := range store { - num := node.core.DEBUG_getDHTSize() - min = num - max = num - break - } - for _, node := range store { - num := node.core.DEBUG_getDHTSize() - if num < min { - min = num - } - if num > max { - max = num - } - sum += num - } - avg := float64(sum) / float64(len(store)) - fmt.Printf("DHT min %d / avg %f / max %d\n", min, avg, max) -} - -func (n *Node) startTCP(listen string) { - n.core.DEBUG_setupAndStartGlobalTCPInterface(listen) -} - -func (n *Node) connectTCP(remoteAddr string) { - n.core.AddPeer(remoteAddr, remoteAddr) -} - -//////////////////////////////////////////////////////////////////////////////// - -var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") -var memprofile = flag.String("memprofile", "", "write memory profile to this file") - -func main() { - flag.Parse() - if *cpuprofile != "" { - f, err := os.Create(*cpuprofile) - if err != nil { - panic(fmt.Sprintf("could not create CPU profile: ", err)) - } - if err := pprof.StartCPUProfile(f); err != nil { - panic(fmt.Sprintf("could not start CPU profile: ", err)) - } - defer pprof.StopCPUProfile() - } - if *memprofile != "" { - f, err := os.Create(*memprofile) - if err != nil { - panic(fmt.Sprintf("could not create memory profile: ", err)) - } - defer func() { pprof.WriteHeapProfile(f); f.Close() }() - } - fmt.Println("Test") - Util_testAddrIDMask() - idxstore := makeStoreSquareGrid(4) - //idxstore := makeStoreStar(256) - //idxstore := loadGraph("misc/sim/hype-2016-09-19.list") - //idxstore := loadGraph("misc/sim/fc00-2017-08-12.txt") - //idxstore := loadGraph("skitter") - kstore := getKeyedStore(idxstore) - //* - logger := log.New(os.Stderr, "", log.Flags()) - for _, n := range kstore { - n.core.DEBUG_setLogger(logger) - } - //*/ - startNetwork(kstore) - //time.Sleep(10*time.Second) - // Note that testPaths only works if pressure is turned off - // Otherwise congestion can lead to routing loops? - for finished := false; !finished; { - finished = testPaths(kstore) - } - pingNodes(kstore) - //pingBench(kstore) // Only after disabling debug output - //stressTest(kstore) - //time.Sleep(120 * time.Second) - dumpDHTSize(kstore) // note that this uses racey functions to read things... - if false { - // This connects the sim to the local network - for _, node := range kstore { - node.startTCP("localhost:0") - node.connectTCP("localhost:12345") - break // just 1 - } - for _, node := range kstore { - go func() { - // Just dump any packets sent to this node - for range node.recv { - } - }() - } - var block chan struct{} - <-block - } - runtime.GC() -} diff --git a/src/core/link.go b/src/core/link.go index 3bdb02a..9b8e089 100644 --- a/src/core/link.go +++ b/src/core/link.go @@ -250,7 +250,8 @@ func (intf *link) handler() (chan struct{}, error) { intf.links.core.log.Infof("Connected %s: %s, source %s", strings.ToUpper(intf.info.linkType), themString, intf.info.local) // Run the handler - err = intf.links.core.PacketConn.HandleConn(ed25519.PublicKey(intf.info.key[:]), intf.conn) + var metric uint64 // TODO exchange metric in matadata, use max value + err = intf.links.core.PacketConn.HandleConn(ed25519.PublicKey(intf.info.key[:]), intf.conn, metric) // TODO don't report an error if it's just a 'use of closed network connection' if err != nil { intf.links.core.log.Infof("Disconnected %s: %s, source %s; error: %s",