5
0
mirror of https://github.com/cwinfo/yggdrasil-go.git synced 2024-09-19 00:59:37 +00:00

Merge pull request #799 from yggdrasil-network/develop

Version 0.4.0
This commit is contained in:
Neil Alexander 2021-07-04 09:34:38 +01:00 committed by GitHub
commit 35e8ff7c9d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
96 changed files with 2781 additions and 11901 deletions

View File

@ -16,6 +16,11 @@ jobs:
go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.31.0
golangci-lint run
- run:
name: Run Go tests
command: |
go test ./...
build-linux:
docker:
- image: circleci/golang:1.16
@ -157,43 +162,6 @@ jobs:
paths:
- upload
build-windows:
docker:
- image: circleci/golang:1.16
steps:
- checkout
- run:
name: Create artifact upload directory and set variables
command: |
mkdir /tmp/upload
echo 'export CINAME=$(sh contrib/semver/name.sh)' >> $BASH_ENV
echo 'export CIVERSION=$(sh contrib/semver/version.sh --bare)' >> $BASH_ENV
git config --global user.email "$(git log --format='%ae' HEAD -1)";
git config --global user.name "$(git log --format='%an' HEAD -1)";
- run:
name: Install tools
command: |
sudo apt-get update
sudo apt-get -y install msitools wixl
- run:
name: Build for Windows
command: |
rm -f {yggdrasil,yggdrasilctl}
GOOS=windows GOARCH=amd64 ./build && mv yggdrasil.exe /tmp/upload/$CINAME-$CIVERSION-windows-amd64.exe && mv yggdrasilctl.exe /tmp/upload/$CINAME-$CIVERSION-yggdrasilctl-windows-amd64.exe;
GOOS=windows GOARCH=386 ./build && mv yggdrasil.exe /tmp/upload/$CINAME-$CIVERSION-windows-i386.exe && mv yggdrasilctl.exe /tmp/upload/$CINAME-$CIVERSION-yggdrasilctl-windows-i386.exe;
bash contrib/msi/build-msi.sh x64
bash contrib/msi/build-msi.sh x86
mv *.msi /tmp/upload
- persist_to_workspace:
root: /tmp
paths:
- upload
build-other:
docker:
- image: circleci/golang:1.16
@ -224,6 +192,13 @@ jobs:
GOOS=freebsd GOARCH=amd64 ./build && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-freebsd-amd64 && mv yggdrasilctl /tmp/upload/$CINAME-$CIVERSION-yggdrasilctl-freebsd-amd64;
GOOS=freebsd GOARCH=386 ./build && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-freebsd-i386 && mv yggdrasilctl /tmp/upload/$CINAME-$CIVERSION-yggdrasilctl-freebsd-i386;
- run:
name: Build for Windows
command: |
rm -f {yggdrasil,yggdrasilctl}
GOOS=windows GOARCH=amd64 ./build && mv yggdrasil.exe /tmp/upload/$CINAME-$CIVERSION-windows-amd64.exe && mv yggdrasilctl.exe /tmp/upload/$CINAME-$CIVERSION-yggdrasilctl-windows-amd64.exe;
GOOS=windows GOARCH=386 ./build && mv yggdrasil.exe /tmp/upload/$CINAME-$CIVERSION-windows-i386.exe && mv yggdrasilctl.exe /tmp/upload/$CINAME-$CIVERSION-yggdrasilctl-windows-i386.exe;
- persist_to_workspace:
root: /tmp
paths:
@ -247,11 +222,9 @@ workflows:
- lint
- build-linux
- build-macos
- build-windows
- build-other
- upload:
requires:
- build-linux
- build-macos
- build-windows
- build-other

3
.gitmodules vendored
View File

@ -1,3 +0,0 @@
[submodule "doc/yggdrasil-network.github.io"]
path = doc/yggdrasil-network.github.io
url = https://github.com/yggdrasil-network/yggdrasil-network.github.io/

View File

@ -25,6 +25,58 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- in case of vulnerabilities.
-->
## [0.4.0] - 2021-07-04
### Added
- New routing scheme, which is backwards incompatible with previous versions of Yggdrasil
- The wire protocol version number, exchanged as part of the peer setup handshake, has been increased to 0.4
- Nodes running this new version **will not** be able to peer with earlier versions of Yggdrasil
- Please note that **the network may be temporarily unstable** while infrastructure is being upgraded to the new release
- TLS connections now use public key pinning
- If no public key was already pinned, then the public key received as part of the TLS handshake is pinned to the connection
- The public key received as part of the handshake is checked against the pinned keys, and if no match is found, the connection is rejected
### Changed
- IP addresses are now derived from ed25519 public (signing) keys
- Previously, addresses were derived from a hash of X25519 (Diffie-Hellman) keys
- Importantly, this means that **all internal IPv6 addresses will change with this release** — this will affect anyone running public services or relying on Yggdrasil for remote access
- It is now recommended to peer over TLS
- Link-local peers from multicast peer discovery will now connect over TLS, with the key from the multicast beacon pinned to the connection
- `socks://` peers now expect the destination endpoint to be a `tls://` listener, instead of a `tcp://` listener
- Multicast peer discovery is now more configurable
- There are separate configuration options to control if beacons are sent, what port to listen on for incoming connections (if sending beacons), and whether or not to listen for beacons from other nodes (and open connections when receiving a beacon)
- Each configuration entry in the list specifies a regular expression to match against interface names
- If an interface matches multiple regex in the list, it will use the settings for the first entry in the list that it matches with
- The session and routing code has been entirely redesigned and rewritten
- This is still an early work-in-progress, so the code hasn't been as well tested or optimized as the old code base — please bear with us for these next few releases as we work through any bugs or issues
- Generally speaking, we expect to see reduced bandwidth use and improved reliability with the new design, especially in cases where nodes move around or change peerings frequently
- Cryptographic sessions no longer use a single shared (ephemeral) secret for the entire life of the session. Keys are now rotated regularly for ongoing sessions (currently rotated at least once per round trip exchange of traffic, subject to change in future releases)
- Source routing has been added. Under normal circumstances, this is what is used to forward session traffic (e.g. the user's IPv6 traffic)
- DHT-based routing has been added. This is used when the sender does not know a source route to the destination. Forwarding through the DHT is less efficient, but the only information that it requires the sender to know is the destination node's (static) key. This is primarily used during the key exchange at session setup, or as a temporary fallback when a source route fails due to changes in the network
- The new DHT design is no longer RPC-based, does not support crawling and does not inherently allow nodes to look up the owner of an arbitrary key. Responding to lookups is now implemented at the application level and a response is only sent if the destination key matches the node's `/128` IP or `/64` prefix
- The greedy routing scheme, used to forward all traffic in previous releases, is now only used for protocol traffic (i.e. DHT setup and source route discovery)
- The routing logic now lives in a [standalone library](https://github.com/Arceliar/ironwood). You are encouraged **not** to use it, as it's still considered pre-alpha, but it's available for those who want to experiment with the new routing algorithm in other contexts
- Session MTUs may be slightly lower now, in order to accommodate large packet headers if required
- Many of the admin functions available over `yggdrasilctl` have been changed or removed as part of rewrites to the code
- Several remote `debug` functions have been added temporarily, to allow for crawling and census gathering during the transition to the new version, but we intend to remove this at some point in the (possibly distant) future
- The list of available functions will likely be expanded in future releases
- The configuration file format has been updated in response to the changed/removed features
### Removed
- Tunnel routing (a.k.a. crypto-key routing or "CKR") has been removed
- It was far too easy to accidentally break routing altogether by capturing the route to peers with the TUN adapter
- We recommend tunnelling an existing standard over Yggdrasil instead (e.g. `ip6gre`, `ip6gretap` or other similar encapsulations, using Yggdrasil IPv6 addresses as the tunnel endpoints)
- All `TunnelRouting` configuration options will no longer take effect
- Session firewall has been removed
- This was never a true firewall — it didn't behave like a stateful IP firewall, often allowed return traffic unexpectedly and was simply a way to prevent a node from being flooded with unwanted sessions, so the name could be misleading and usually lead to a false sense of security
- Due to design changes, the new code needs to address the possible memory exhaustion attacks in other ways and a single configurable list no longer makes sense
- Users who want a firewall or other packet filter mechansim should configure something supported by their OS instead (e.g. `ip6tables`)
- All `SessionFirewall` configuration options will no longer take effect
- `SIGHUP` handling to reload the configuration at runtime has been removed
- It was not obvious which parts of the configuration could be reloaded at runtime, and which required the application to be killed and restarted to take effect
- Reloading the config without restarting was also a delicate and bug-prone process, and was distracting from more important developments
- `SIGHUP` will be handled normally (i.e. by exiting)
- `cmd/yggrasilsim` has been removed, and is unlikely to return to this repository
## [0.3.16] - 2021-03-18
### Added
- New simulation code under `cmd/yggdrasilsim` (work-in-progress)

View File

@ -11,37 +11,14 @@ allows pretty much any IPv6-capable application to communicate securely with
other Yggdrasil nodes. Yggdrasil does not require you to have IPv6 Internet
connectivity - it also works over IPv4.
Although Yggdrasil shares many similarities with
[cjdns](https://github.com/cjdelisle/cjdns), it employs a different routing
algorithm based on a globally-agreed spanning tree and greedy routing in a
metric space, and aims to implement some novel local backpressure routing
techniques. In theory, Yggdrasil should scale well on networks with
internet-like topologies.
## Supported Platforms
We actively support the following platforms, and packages are available for
some of the below:
Yggdrasil works on a number of platforms, including Linux, macOS, Ubiquiti
EdgeRouter, VyOS, Windows, FreeBSD, OpenBSD and OpenWrt.
- Linux
- `.deb` and `.rpm` packages are built by CI for Debian and Red Hat-based
distributions
- Arch, Nix, Void packages also available within their respective repositories
- macOS
- `.pkg` packages are built by CI
- Ubiquiti EdgeOS
- `.deb` Vyatta packages are built by CI
- Windows
- FreeBSD
- OpenBSD
- OpenWrt
Please see our [Platforms](https://yggdrasil-network.github.io/platforms.html) pages for more
specific information about each of our supported platforms, including
installation steps and caveats.
You may also find other platform-specific wrappers, scripts or tools in the
`contrib` folder.
Please see our [Installation](https://yggdrasil-network.github.io/installation.html)
page for more information. You may also find other platform-specific wrappers, scripts
or tools in the `contrib` folder.
## Building
@ -97,21 +74,18 @@ by giving the Yggdrasil binary the `CAP_NET_ADMIN` capability.
## Documentation
Documentation is available on our [GitHub
Pages](https://yggdrasil-network.github.io) site, or in the base submodule
repository within `doc/yggdrasil-network.github.io`.
Documentation is available [on our website](https://yggdrasil-network.github.io).
- [Configuration file options](https://yggdrasil-network.github.io/configuration.html)
- [Platform-specific documentation](https://yggdrasil-network.github.io/platforms.html)
- [Installing Yggdrasil](https://yggdrasil-network.github.io/installation.html)
- [Configuring Yggdrasil](https://yggdrasil-network.github.io/configuration.html)
- [Frequently asked questions](https://yggdrasil-network.github.io/faq.html)
- [Admin API documentation](https://yggdrasil-network.github.io/admin.html)
- [Version changelog](CHANGELOG.md)
## Community
Feel free to join us on our [Matrix
channel](https://matrix.to/#/#yggdrasil:matrix.org) at `#yggdrasil:matrix.org`
or in the `#yggdrasil` IRC channel on Freenode.
or in the `#yggdrasil` IRC channel on [libera.chat](https://libera.chat).
## License

20
appveyor.yml Normal file
View File

@ -0,0 +1,20 @@
version: '{build}'
pull_requests:
do_not_increment_build_number: true
os: Visual Studio 2019
shallow_clone: false
environment:
MSYS2_PATH_TYPE: inherit
CHERE_INVOKING: enabled_from_arguments
build_script:
- cmd: >-
cd %APPVEYOR_BUILD_FOLDER%
- c:\msys64\usr\bin\bash -lc "./contrib/msi/build-msi.sh x64"
- c:\msys64\usr\bin\bash -lc "./contrib/msi/build-msi.sh x86"
test: off
artifacts:
- path: '*.msi'

14
build
View File

@ -32,18 +32,16 @@ fi
if [ $IOS ]; then
echo "Building framework for iOS"
gomobile bind -target ios -tags mobile -ldflags="$LDFLAGS $STRIP" -gcflags="$GCFLAGS" \
github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil \
github.com/yggdrasil-network/yggdrasil-go/src/config \
go get golang.org/x/mobile/bind
gomobile bind -target ios -tags mobile -o Yggdrasil.framework -ldflags="$LDFLAGS $STRIP" -gcflags="$GCFLAGS" \
github.com/yggdrasil-network/yggdrasil-extras/src/mobile \
github.com/yggdrasil-network/yggdrasil-extras/src/dummy
github.com/yggdrasil-network/yggdrasil-go/src/config
elif [ $ANDROID ]; then
echo "Building aar for Android"
gomobile bind -target android -tags mobile -ldflags="$LDFLAGS $STRIP" -gcflags="$GCFLAGS" \
github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil \
github.com/yggdrasil-network/yggdrasil-go/src/config \
go get golang.org/x/mobile/bind
gomobile bind -target android -tags mobile -o yggdrasil.aar -ldflags="$LDFLAGS $STRIP" -gcflags="$GCFLAGS" \
github.com/yggdrasil-network/yggdrasil-extras/src/mobile \
github.com/yggdrasil-network/yggdrasil-extras/src/dummy
github.com/yggdrasil-network/yggdrasil-go/src/config
else
for CMD in yggdrasil yggdrasilctl ; do
echo "Building: $CMD"

View File

@ -13,116 +13,66 @@ This only matters if it's high enough to make you the root of the tree.
package main
import (
"crypto/ed25519"
"encoding/hex"
"flag"
"fmt"
"net"
"runtime"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
)
var doSig = flag.Bool("sig", false, "generate new signing keys instead")
type keySet struct {
priv []byte
pub []byte
id []byte
ip string
priv ed25519.PrivateKey
pub ed25519.PublicKey
}
func main() {
threads := runtime.GOMAXPROCS(0)
var threadChannels []chan []byte
var currentBest []byte
var currentBest ed25519.PublicKey
newKeys := make(chan keySet, threads)
flag.Parse()
for i := 0; i < threads; i++ {
threadChannels = append(threadChannels, make(chan []byte, threads))
switch {
case *doSig:
go doSigKeys(newKeys, threadChannels[i])
default:
go doBoxKeys(newKeys, threadChannels[i])
}
go doKeys(newKeys)
}
for {
newKey := <-newKeys
if isBetter(currentBest, newKey.id[:]) || len(currentBest) == 0 {
currentBest = newKey.id
for _, channel := range threadChannels {
select {
case channel <- newKey.id:
}
}
fmt.Println("--------------------------------------------------------------------------------")
switch {
case *doSig:
fmt.Println("sigPriv:", hex.EncodeToString(newKey.priv))
fmt.Println("sigPub:", hex.EncodeToString(newKey.pub))
fmt.Println("TreeID:", hex.EncodeToString(newKey.id))
default:
fmt.Println("boxPriv:", hex.EncodeToString(newKey.priv))
fmt.Println("boxPub:", hex.EncodeToString(newKey.pub))
fmt.Println("NodeID:", hex.EncodeToString(newKey.id))
fmt.Println("IP:", newKey.ip)
}
if isBetter(currentBest, newKey.pub) || len(currentBest) == 0 {
currentBest = newKey.pub
fmt.Println("-----")
fmt.Println("Priv:", hex.EncodeToString(newKey.priv))
fmt.Println("Pub:", hex.EncodeToString(newKey.pub))
addr := address.AddrForKey(newKey.pub)
fmt.Println("IP:", net.IP(addr[:]).String())
}
}
}
func isBetter(oldID, newID []byte) bool {
for idx := range oldID {
if newID[idx] != oldID[idx] {
return newID[idx] > oldID[idx]
func isBetter(oldPub, newPub ed25519.PublicKey) bool {
for idx := range oldPub {
if newPub[idx] < oldPub[idx] {
return true
}
if newPub[idx] > oldPub[idx] {
break
}
}
return false
}
func doBoxKeys(out chan<- keySet, in <-chan []byte) {
var bestID crypto.NodeID
for {
select {
case newBestID := <-in:
if isBetter(bestID[:], newBestID) {
copy(bestID[:], newBestID)
}
default:
pub, priv := crypto.NewBoxKeys()
id := crypto.GetNodeID(pub)
if !isBetter(bestID[:], id[:]) {
continue
}
bestID = *id
ip := net.IP(address.AddrForNodeID(id)[:]).String()
out <- keySet{priv[:], pub[:], id[:], ip}
}
}
}
func doSigKeys(out chan<- keySet, in <-chan []byte) {
var bestID crypto.TreeID
for idx := range bestID {
bestID[idx] = 0
func doKeys(out chan<- keySet) {
bestKey := make(ed25519.PublicKey, ed25519.PublicKeySize)
for idx := range bestKey {
bestKey[idx] = 0xff
}
for {
select {
case newBestID := <-in:
if isBetter(bestID[:], newBestID) {
copy(bestID[:], newBestID)
}
default:
pub, priv, err := ed25519.GenerateKey(nil)
if err != nil {
panic(err)
}
pub, priv := crypto.NewSigKeys()
id := crypto.GetTreeID(pub)
if !isBetter(bestID[:], id[:]) {
if !isBetter(bestKey, pub) {
continue
}
bestID = *id
out <- keySet{priv[:], pub[:], id[:], ""}
bestKey = pub
out <- keySet{priv, pub}
}
}

View File

@ -2,6 +2,8 @@ package main
import (
"bytes"
"context"
"crypto/ed25519"
"encoding/hex"
"encoding/json"
"flag"
@ -24,31 +26,31 @@ import (
"github.com/yggdrasil-network/yggdrasil-go/src/address"
"github.com/yggdrasil-network/yggdrasil-go/src/admin"
"github.com/yggdrasil-network/yggdrasil-go/src/config"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/yggdrasil-network/yggdrasil-go/src/module"
"github.com/yggdrasil-network/yggdrasil-go/src/defaults"
"github.com/yggdrasil-network/yggdrasil-go/src/core"
"github.com/yggdrasil-network/yggdrasil-go/src/multicast"
"github.com/yggdrasil-network/yggdrasil-go/src/tuntap"
"github.com/yggdrasil-network/yggdrasil-go/src/version"
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
)
type node struct {
core yggdrasil.Core
state *config.NodeState
tuntap module.Module // tuntap.TunAdapter
multicast module.Module // multicast.Multicast
admin module.Module // admin.AdminSocket
core core.Core
config *config.NodeConfig
tuntap *tuntap.TunAdapter
multicast *multicast.Multicast
admin *admin.AdminSocket
}
func readConfig(useconf *bool, useconffile *string, normaliseconf *bool) *config.NodeConfig {
func readConfig(log *log.Logger, useconf bool, useconffile string, normaliseconf bool) *config.NodeConfig {
// Use a configuration file. If -useconf, the configuration will be read
// from stdin. If -useconffile, the configuration will be read from the
// filesystem.
var conf []byte
var err error
if *useconffile != "" {
if useconffile != "" {
// Read the file from the filesystem
conf, err = ioutil.ReadFile(*useconffile)
conf, err = ioutil.ReadFile(useconffile)
} else {
// Read the file from stdin.
conf, err = ioutil.ReadAll(os.Stdin)
@ -73,32 +75,51 @@ func readConfig(useconf *bool, useconffile *string, normaliseconf *bool) *config
// then parse the configuration we loaded above on top of it. The effect
// of this is that any configuration item that is missing from the provided
// configuration will use a sane default.
cfg := config.GenerateConfig()
cfg := defaults.GenerateConfig()
var dat map[string]interface{}
if err := hjson.Unmarshal(conf, &dat); err != nil {
panic(err)
}
// Check for fields that have changed type recently, e.g. the Listen config
// option is now a []string rather than a string
if listen, ok := dat["Listen"].(string); ok {
dat["Listen"] = []string{listen}
// Check if we have old field names
if _, ok := dat["TunnelRouting"]; ok {
log.Warnln("WARNING: Tunnel routing is no longer supported")
}
if tunnelrouting, ok := dat["TunnelRouting"].(map[string]interface{}); ok {
if c, ok := tunnelrouting["IPv4Sources"]; ok {
delete(tunnelrouting, "IPv4Sources")
tunnelrouting["IPv4LocalSubnets"] = c
if old, ok := dat["SigningPrivateKey"]; ok {
log.Warnln("WARNING: The \"SigningPrivateKey\" configuration option has been renamed to \"PrivateKey\"")
if _, ok := dat["PrivateKey"]; !ok {
if privstr, err := hex.DecodeString(old.(string)); err == nil {
priv := ed25519.PrivateKey(privstr)
pub := priv.Public().(ed25519.PublicKey)
dat["PrivateKey"] = hex.EncodeToString(priv[:])
dat["PublicKey"] = hex.EncodeToString(pub[:])
} else {
log.Warnln("WARNING: The \"SigningPrivateKey\" configuration option contains an invalid value and will be ignored")
}
}
if c, ok := tunnelrouting["IPv6Sources"]; ok {
delete(tunnelrouting, "IPv6Sources")
tunnelrouting["IPv6LocalSubnets"] = c
}
if c, ok := tunnelrouting["IPv4Destinations"]; ok {
delete(tunnelrouting, "IPv4Destinations")
tunnelrouting["IPv4RemoteSubnets"] = c
}
if c, ok := tunnelrouting["IPv6Destinations"]; ok {
delete(tunnelrouting, "IPv6Destinations")
tunnelrouting["IPv6RemoteSubnets"] = c
}
if oldmc, ok := dat["MulticastInterfaces"]; ok {
if oldmcvals, ok := oldmc.([]interface{}); ok {
var newmc []config.MulticastInterfaceConfig
for _, oldmcval := range oldmcvals {
if str, ok := oldmcval.(string); ok {
newmc = append(newmc, config.MulticastInterfaceConfig{
Regex: str,
Beacon: true,
Listen: true,
})
}
}
if newmc != nil {
if oldport, ok := dat["LinkLocalTCPPort"]; ok {
// numbers parse to float64 by default
if port, ok := oldport.(float64); ok {
for idx := range newmc {
newmc[idx].Port = uint16(port)
}
}
}
dat["MulticastInterfaces"] = newmc
}
}
}
// Sanitise the config
@ -106,7 +127,9 @@ func readConfig(useconf *bool, useconffile *string, normaliseconf *bool) *config
if err != nil {
panic(err)
}
json.Unmarshal(confJson, &cfg)
if err := json.Unmarshal(confJson, &cfg); err != nil {
panic(err)
}
// Overlay our newly mapped configuration onto the autoconf node config that
// we generated above.
if err = mapstructure.Decode(dat, &cfg); err != nil {
@ -118,7 +141,7 @@ func readConfig(useconf *bool, useconffile *string, normaliseconf *bool) *config
// Generates a new configuration and returns it in HJSON format. This is used
// with -genconf.
func doGenconf(isjson bool) string {
cfg := config.GenerateConfig()
cfg := defaults.GenerateConfig()
var bs []byte
var err error
if isjson {
@ -158,9 +181,21 @@ func setLogLevel(loglevel string, logger *log.Logger) {
}
}
// The main function is responsible for configuring and starting Yggdrasil.
func main() {
// Configure the command line parameters.
type yggArgs struct {
genconf bool
useconf bool
useconffile string
normaliseconf bool
confjson bool
autoconf bool
ver bool
logto string
getaddr bool
getsnet bool
loglevel string
}
func getArgs() yggArgs {
genconf := flag.Bool("genconf", false, "print a new config to stdout")
useconf := flag.Bool("useconf", false, "read HJSON/JSON config from stdin")
useconffile := flag.String("useconffile", "", "read HJSON/JSON config from specified file path")
@ -173,28 +208,70 @@ func main() {
getsnet := flag.Bool("subnet", false, "returns the IPv6 subnet as derived from the supplied configuration")
loglevel := flag.String("loglevel", "info", "loglevel to enable")
flag.Parse()
return yggArgs{
genconf: *genconf,
useconf: *useconf,
useconffile: *useconffile,
normaliseconf: *normaliseconf,
confjson: *confjson,
autoconf: *autoconf,
ver: *ver,
logto: *logto,
getaddr: *getaddr,
getsnet: *getsnet,
loglevel: *loglevel,
}
}
// The main function is responsible for configuring and starting Yggdrasil.
func run(args yggArgs, ctx context.Context, done chan struct{}) {
defer close(done)
// Create a new logger that logs output to stdout.
var logger *log.Logger
switch args.logto {
case "stdout":
logger = log.New(os.Stdout, "", log.Flags())
case "syslog":
if syslogger, err := gsyslog.NewLogger(gsyslog.LOG_NOTICE, "DAEMON", version.BuildName()); err == nil {
logger = log.New(syslogger, "", log.Flags())
}
default:
if logfd, err := os.OpenFile(args.logto, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644); err == nil {
logger = log.New(logfd, "", log.Flags())
}
}
if logger == nil {
logger = log.New(os.Stdout, "", log.Flags())
logger.Warnln("Logging defaulting to stdout")
}
if args.normaliseconf {
setLogLevel("error", logger)
} else {
setLogLevel(args.loglevel, logger)
}
var cfg *config.NodeConfig
var err error
switch {
case *ver:
case args.ver:
fmt.Println("Build name:", version.BuildName())
fmt.Println("Build version:", version.BuildVersion())
return
case *autoconf:
case args.autoconf:
// Use an autoconf-generated config, this will give us random keys and
// port numbers, and will use an automatically selected TUN/TAP interface.
cfg = config.GenerateConfig()
case *useconffile != "" || *useconf:
cfg = defaults.GenerateConfig()
case args.useconffile != "" || args.useconf:
// Read the configuration from either stdin or from the filesystem
cfg = readConfig(useconf, useconffile, normaliseconf)
cfg = readConfig(logger, args.useconf, args.useconffile, args.normaliseconf)
// If the -normaliseconf option was specified then remarshal the above
// configuration and print it back to stdout. This lets the user update
// their configuration file with newly mapped names (like above) or to
// convert from plain JSON to commented HJSON.
if *normaliseconf {
if args.normaliseconf {
var bs []byte
if *confjson {
if args.confjson {
bs, err = json.MarshalIndent(cfg, "", " ")
} else {
bs, err = hjson.Marshal(cfg)
@ -205,9 +282,10 @@ func main() {
fmt.Println(string(bs))
return
}
case *genconf:
case args.genconf:
// Generate a new configuration and print it to stdout.
fmt.Println(doGenconf(*confjson))
fmt.Println(doGenconf(args.confjson))
return
default:
// No flags were provided, therefore print the list of flags to stdout.
flag.PrintDefaults()
@ -219,25 +297,23 @@ func main() {
return
}
// Have we been asked for the node address yet? If so, print it and then stop.
getNodeID := func() *crypto.NodeID {
if pubkey, err := hex.DecodeString(cfg.EncryptionPublicKey); err == nil {
var box crypto.BoxPubKey
copy(box[:], pubkey)
return crypto.GetNodeID(&box)
getNodeKey := func() ed25519.PublicKey {
if pubkey, err := hex.DecodeString(cfg.PrivateKey); err == nil {
return ed25519.PrivateKey(pubkey).Public().(ed25519.PublicKey)
}
return nil
}
switch {
case *getaddr:
if nodeid := getNodeID(); nodeid != nil {
addr := *address.AddrForNodeID(nodeid)
case args.getaddr:
if key := getNodeKey(); key != nil {
addr := address.AddrForKey(key)
ip := net.IP(addr[:])
fmt.Println(ip.String())
}
return
case *getsnet:
if nodeid := getNodeID(); nodeid != nil {
snet := *address.SubnetForNodeID(nodeid)
case args.getsnet:
if key := getNodeKey(); key != nil {
snet := address.SubnetForKey(key)
ipnet := net.IPNet{
IP: append(snet[:], 0, 0, 0, 0, 0, 0, 0, 0),
Mask: net.CIDRMask(len(snet)*8, 128),
@ -247,170 +323,84 @@ func main() {
return
default:
}
// Create a new logger that logs output to stdout.
var logger *log.Logger
switch *logto {
case "stdout":
logger = log.New(os.Stdout, "", log.Flags())
case "syslog":
if syslogger, err := gsyslog.NewLogger(gsyslog.LOG_NOTICE, "DAEMON", version.BuildName()); err == nil {
logger = log.New(syslogger, "", log.Flags())
}
default:
if logfd, err := os.OpenFile(*logto, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644); err == nil {
logger = log.New(logfd, "", log.Flags())
}
}
if logger == nil {
logger = log.New(os.Stdout, "", log.Flags())
logger.Warnln("Logging defaulting to stdout")
}
setLogLevel(*loglevel, logger)
// Setup the Yggdrasil node itself. The node{} type includes a Core, so we
// don't need to create this manually.
n := node{}
n := node{config: cfg}
// Now start Yggdrasil - this starts the DHT, router, switch and other core
// components needed for Yggdrasil to operate
n.state, err = n.core.Start(cfg, logger)
if err != nil {
if err = n.core.Start(cfg, logger); err != nil {
logger.Errorln("An error occurred during startup")
panic(err)
}
// Register the session firewall gatekeeper function
n.core.SetSessionGatekeeper(n.sessionFirewall)
// Allocate our modules
n.admin = &admin.AdminSocket{}
n.multicast = &multicast.Multicast{}
n.tuntap = &tuntap.TunAdapter{}
// Start the admin socket
n.admin.Init(&n.core, n.state, logger, nil)
if err := n.admin.Start(); err != nil {
if err := n.admin.Init(&n.core, cfg, logger, nil); err != nil {
logger.Errorln("An error occurred initialising admin socket:", err)
} else if err := n.admin.Start(); err != nil {
logger.Errorln("An error occurred starting admin socket:", err)
}
n.admin.SetupAdminHandlers(n.admin.(*admin.AdminSocket))
n.admin.SetupAdminHandlers(n.admin)
// Start the multicast interface
n.multicast.Init(&n.core, n.state, logger, nil)
if err := n.multicast.Start(); err != nil {
if err := n.multicast.Init(&n.core, cfg, logger, nil); err != nil {
logger.Errorln("An error occurred initialising multicast:", err)
} else if err := n.multicast.Start(); err != nil {
logger.Errorln("An error occurred starting multicast:", err)
}
n.multicast.SetupAdminHandlers(n.admin.(*admin.AdminSocket))
n.multicast.SetupAdminHandlers(n.admin)
// Start the TUN/TAP interface
if listener, err := n.core.ConnListen(); err == nil {
if dialer, err := n.core.ConnDialer(); err == nil {
n.tuntap.Init(&n.core, n.state, logger, tuntap.TunOptions{Listener: listener, Dialer: dialer})
if err := n.tuntap.Start(); err != nil {
logger.Errorln("An error occurred starting TUN/TAP:", err)
}
n.tuntap.SetupAdminHandlers(n.admin.(*admin.AdminSocket))
} else {
logger.Errorln("Unable to get Dialer:", err)
}
} else {
logger.Errorln("Unable to get Listener:", err)
if err := n.tuntap.Init(&n.core, cfg, logger, nil); err != nil {
logger.Errorln("An error occurred initialising TUN/TAP:", err)
} else if err := n.tuntap.Start(); err != nil {
logger.Errorln("An error occurred starting TUN/TAP:", err)
}
n.tuntap.SetupAdminHandlers(n.admin)
// Make some nice output that tells us what our IPv6 address and subnet are.
// This is just logged to stdout for the user.
address := n.core.Address()
subnet := n.core.Subnet()
public := n.core.GetSelf().Key
logger.Infof("Your public key is %s", hex.EncodeToString(public[:]))
logger.Infof("Your IPv6 address is %s", address.String())
logger.Infof("Your IPv6 subnet is %s", subnet.String())
// Catch interrupts from the operating system to exit gracefully.
c := make(chan os.Signal, 1)
r := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
signal.Notify(r, os.Interrupt, syscall.SIGHUP)
<-ctx.Done()
// Capture the service being stopped on Windows.
minwinsvc.SetOnExit(n.shutdown)
defer n.shutdown()
// Wait for the terminate/interrupt signal. Once a signal is received, the
// deferred Stop function above will run which will shut down TUN/TAP.
for {
select {
case <-c:
goto exit
case <-r:
if *useconffile != "" {
cfg = readConfig(useconf, useconffile, normaliseconf)
logger.Infoln("Reloading configuration from", *useconffile)
n.core.UpdateConfig(cfg)
n.tuntap.UpdateConfig(cfg)
n.multicast.UpdateConfig(cfg)
} else {
logger.Errorln("Reloading config at runtime is only possible with -useconffile")
}
}
}
exit:
n.shutdown()
}
func (n *node) shutdown() {
n.admin.Stop()
n.multicast.Stop()
n.tuntap.Stop()
_ = n.admin.Stop()
_ = n.multicast.Stop()
_ = n.tuntap.Stop()
n.core.Stop()
}
func (n *node) sessionFirewall(pubkey *crypto.BoxPubKey, initiator bool) bool {
n.state.Mutex.RLock()
defer n.state.Mutex.RUnlock()
// Allow by default if the session firewall is disabled
if !n.state.Current.SessionFirewall.Enable {
return true
}
// Prepare for checking whitelist/blacklist
var box crypto.BoxPubKey
// Reject blacklisted nodes
for _, b := range n.state.Current.SessionFirewall.BlacklistEncryptionPublicKeys {
key, err := hex.DecodeString(b)
if err == nil {
copy(box[:crypto.BoxPubKeyLen], key)
if box == *pubkey {
return false
}
func main() {
args := getArgs()
hup := make(chan os.Signal, 1)
//signal.Notify(hup, os.Interrupt, syscall.SIGHUP)
term := make(chan os.Signal, 1)
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
for {
done := make(chan struct{})
ctx, cancel := context.WithCancel(context.Background())
go run(args, ctx, done)
select {
case <-hup:
cancel()
<-done
case <-term:
cancel()
<-done
return
case <-done:
return
}
}
// Allow whitelisted nodes
for _, b := range n.state.Current.SessionFirewall.WhitelistEncryptionPublicKeys {
key, err := hex.DecodeString(b)
if err == nil {
copy(box[:crypto.BoxPubKeyLen], key)
if box == *pubkey {
return true
}
}
}
// Allow outbound sessions if appropriate
if n.state.Current.SessionFirewall.AlwaysAllowOutbound {
if initiator {
return true
}
}
// Look and see if the pubkey is that of a direct peer
var isDirectPeer bool
for _, peer := range n.core.GetPeers() {
if peer.PublicKey == *pubkey {
isDirectPeer = true
break
}
}
// Allow direct peers if appropriate
if n.state.Current.SessionFirewall.AllowFromDirect && isDirectPeer {
return true
}
// Allow remote nodes if appropriate
if n.state.Current.SessionFirewall.AllowFromRemote && !isDirectPeer {
return true
}
// Finally, default-deny if not matching any of the above rules
return false
}

View File

@ -47,8 +47,11 @@ func run() int {
fmt.Fprintf(flag.CommandLine.Output(), "Usage: %s [options] command [key=value] [key=value] ...\n\n", os.Args[0])
fmt.Println("Options:")
flag.PrintDefaults()
fmt.Println("\nPlease note that options must always specified BEFORE the command\non the command line or they will be ignored.\n")
fmt.Println("Commands:\n - Use \"list\" for a list of available commands\n")
fmt.Println()
fmt.Println("Please note that options must always specified BEFORE the command\non the command line or they will be ignored.")
fmt.Println()
fmt.Println("Commands:\n - Use \"list\" for a list of available commands")
fmt.Println()
fmt.Println("Examples:")
fmt.Println(" - ", os.Args[0], "list")
fmt.Println(" - ", os.Args[0], "getPeers")
@ -288,6 +291,9 @@ func run() int {
if subnet, ok := v.(map[string]interface{})["subnet"].(string); ok {
fmt.Println("IPv6 subnet:", subnet)
}
if boxSigKey, ok := v.(map[string]interface{})["key"].(string); ok {
fmt.Println("Public key:", boxSigKey)
}
if coords, ok := v.(map[string]interface{})["coords"].(string); ok {
fmt.Println("Coords:", coords)
}

View File

@ -1,61 +0,0 @@
package main
import (
"fmt"
"sort"
"time"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
)
func doListen(recvNode *simNode) {
// TODO be able to stop the listeners somehow so they don't leak across different tests
for {
c, err := recvNode.listener.Accept()
if err != nil {
panic(err)
}
c.Close()
}
}
func dialTest(sendNode, recvNode *simNode) {
if sendNode.id == recvNode.id {
fmt.Println("Skipping dial to self")
return
}
var mask crypto.NodeID
for idx := range mask {
mask[idx] = 0xff
}
for {
c, err := sendNode.dialer.DialByNodeIDandMask(nil, &recvNode.nodeID, &mask)
if c != nil {
c.Close()
return
}
if err != nil {
fmt.Println("Dial failed:", err)
}
time.Sleep(time.Second)
}
}
func dialStore(store nodeStore) {
var nodeIdxs []int
for idx, n := range store {
nodeIdxs = append(nodeIdxs, idx)
go doListen(n)
}
sort.Slice(nodeIdxs, func(i, j int) bool {
return nodeIdxs[i] < nodeIdxs[j]
})
for _, idx := range nodeIdxs {
sendNode := store[idx]
for _, jdx := range nodeIdxs {
recvNode := store[jdx]
fmt.Printf("Dialing from node %d to node %d / %d...\n", idx, jdx, len(store))
dialTest(sendNode, recvNode)
}
}
}

View File

@ -1,6 +0,0 @@
package main
func main() {
store := makeStoreSquareGrid(4)
dialStore(store)
}

View File

@ -1,28 +0,0 @@
package main
import (
"io/ioutil"
"github.com/gologme/log"
"github.com/yggdrasil-network/yggdrasil-go/src/config"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
)
type simNode struct {
core yggdrasil.Core
id int
nodeID crypto.NodeID
dialer *yggdrasil.Dialer
listener *yggdrasil.Listener
}
func newNode(id int) *simNode {
n := simNode{id: id}
n.core.Start(config.GenerateConfig(), log.New(ioutil.Discard, "", 0))
n.nodeID = *n.core.NodeID()
n.dialer, _ = n.core.ConnDialer()
n.listener, _ = n.core.ConnListen()
return &n
}

View File

@ -1,41 +0,0 @@
package main
type nodeStore map[int]*simNode
func makeStoreSingle() nodeStore {
s := make(nodeStore)
s[0] = newNode(0)
return s
}
func linkNodes(a *simNode, b *simNode) {
la := a.core.NewSimlink()
lb := b.core.NewSimlink()
la.SetDestination(lb)
lb.SetDestination(la)
la.Start()
lb.Start()
}
func makeStoreSquareGrid(sideLength int) nodeStore {
store := make(nodeStore)
nNodes := sideLength * sideLength
idxs := make([]int, 0, nNodes)
// TODO shuffle nodeIDs
for idx := 1; idx <= nNodes; idx++ {
idxs = append(idxs, idx)
}
for _, idx := range idxs {
n := newNode(idx)
store[idx] = n
}
for idx := 0; idx < nNodes; idx++ {
if (idx % sideLength) != 0 {
linkNodes(store[idxs[idx]], store[idxs[idx-1]])
}
if idx >= sideLength {
linkNodes(store[idxs[idx]], store[idxs[idx-sideLength]])
}
}
return store
}

View File

@ -6,6 +6,7 @@ This file generates crypto keys for [ansible-yggdrasil](https://github.com/jcgru
package main
import (
"crypto/ed25519"
"encoding/hex"
"flag"
"fmt"
@ -14,7 +15,6 @@ import (
"github.com/cheggaaa/pb/v3"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
)
var numHosts = flag.Int("hosts", 1, "number of host vars to generate")
@ -23,7 +23,6 @@ var keyTries = flag.Int("tries", 1000, "number of tries before taking the best k
type keySet struct {
priv []byte
pub []byte
id []byte
ip string
}
@ -37,27 +36,15 @@ func main() {
return
}
var encryptionKeys []keySet
var keys []keySet
for i := 0; i < *numHosts+1; i++ {
encryptionKeys = append(encryptionKeys, newBoxKey())
keys = append(keys, newKey())
bar.Increment()
}
encryptionKeys = sortKeySetArray(encryptionKeys)
keys = sortKeySetArray(keys)
for i := 0; i < *keyTries-*numHosts-1; i++ {
encryptionKeys[0] = newBoxKey()
encryptionKeys = bubbleUpTo(encryptionKeys, 0)
bar.Increment()
}
var signatureKeys []keySet
for i := 0; i < *numHosts+1; i++ {
signatureKeys = append(signatureKeys, newSigKey())
bar.Increment()
}
signatureKeys = sortKeySetArray(signatureKeys)
for i := 0; i < *keyTries-*numHosts-1; i++ {
signatureKeys[0] = newSigKey()
signatureKeys = bubbleUpTo(signatureKeys, 0)
keys[0] = newKey()
keys = bubbleUpTo(keys, 0)
bar.Increment()
}
@ -70,43 +57,36 @@ func main() {
return
}
defer file.Close()
file.WriteString(fmt.Sprintf("yggdrasil_encryption_public_key: %v\n", hex.EncodeToString(encryptionKeys[i].pub)))
file.WriteString("yggdrasil_encryption_private_key: \"{{ vault_yggdrasil_encryption_private_key }}\"\n")
file.WriteString(fmt.Sprintf("yggdrasil_signing_public_key: %v\n", hex.EncodeToString(signatureKeys[i].pub)))
file.WriteString("yggdrasil_signing_private_key: \"{{ vault_yggdrasil_signing_private_key }}\"\n")
file.WriteString(fmt.Sprintf("ansible_host: %v\n", encryptionKeys[i].ip))
file.WriteString(fmt.Sprintf("yggdrasil_public_key: %v\n", hex.EncodeToString(keys[i].pub)))
file.WriteString("yggdrasil_private_key: \"{{ vault_yggdrasil_private_key }}\"\n")
file.WriteString(fmt.Sprintf("ansible_host: %v\n", keys[i].ip))
file, err = os.Create(fmt.Sprintf("host_vars/%x/vault", i))
if err != nil {
return
}
defer file.Close()
file.WriteString(fmt.Sprintf("vault_yggdrasil_encryption_private_key: %v\n", hex.EncodeToString(encryptionKeys[i].priv)))
file.WriteString(fmt.Sprintf("vault_yggdrasil_signing_private_key: %v\n", hex.EncodeToString(signatureKeys[i].priv)))
file.WriteString(fmt.Sprintf("vault_yggdrasil_private_key: %v\n", hex.EncodeToString(keys[i].priv)))
bar.Increment()
}
bar.Finish()
}
func newBoxKey() keySet {
pub, priv := crypto.NewBoxKeys()
id := crypto.GetNodeID(pub)
ip := net.IP(address.AddrForNodeID(id)[:]).String()
return keySet{priv[:], pub[:], id[:], ip}
}
func newSigKey() keySet {
pub, priv := crypto.NewSigKeys()
id := crypto.GetTreeID(pub)
return keySet{priv[:], pub[:], id[:], ""}
func newKey() keySet {
pub, priv, err := ed25519.GenerateKey(nil)
if err != nil {
panic(err)
}
ip := net.IP(address.AddrForKey(pub)[:]).String()
return keySet{priv[:], pub[:], ip}
}
func isBetter(oldID, newID []byte) bool {
for idx := range oldID {
if newID[idx] > oldID[idx] {
if newID[idx] < oldID[idx] {
return true
}
if newID[idx] < oldID[idx] {
if newID[idx] > oldID[idx] {
return false
}
}
@ -122,7 +102,7 @@ func sortKeySetArray(sets []keySet) []keySet {
func bubbleUpTo(sets []keySet, num int) []keySet {
for i := 0; i < len(sets)-num-1; i++ {
if isBetter(sets[i+1].id, sets[i].id) {
if isBetter(sets[i+1].pub, sets[i].pub) {
var tmp = sets[i]
sets[i] = sets[i+1]
sets[i+1] = tmp

View File

@ -8,7 +8,6 @@ ENV CGO_ENABLED=0
RUN apk add git && ./build && go build -o /src/genkeys cmd/genkeys/main.go
FROM docker.io/alpine
LABEL maintainer="Christer Waren/CWINFO <christer.waren@cwinfo.org>"
COPY --from=builder /src/yggdrasil /usr/bin/yggdrasil
COPY --from=builder /src/yggdrasilctl /usr/bin/yggdrasilctl

View File

@ -1,7 +1,9 @@
#!/bin/bash
#!/bin/sh
# This script generates an MSI file for Yggdrasil for a given architecture. It
# needs to run on Linux or macOS with Go 1.16, wixl and msitools installed.
# needs to run on Windows within MSYS2 and Go 1.13 or later must be installed on
# the system and within the PATH. This is ran currently by Appveyor (see
# appveyor.yml in the repository root) for both x86 and x64.
#
# Author: Neil Alexander <neilalexander@users.noreply.github.com>
@ -26,10 +28,29 @@ then
git checkout ${APPVEYOR_REPO_BRANCH}
fi
# Install prerequisites within MSYS2
pacman -S --needed --noconfirm unzip git curl
# Download the wix tools!
if [ ! -d wixbin ];
then
curl -LO https://github.com/wixtoolset/wix3/releases/download/wix3112rtm/wix311-binaries.zip
if [ `md5sum wix311-binaries.zip | cut -f 1 -d " "` != "47a506f8ab6666ee3cc502fb07d0ee2a" ];
then
echo "wix package didn't match expected checksum"
exit 1
fi
mkdir -p wixbin
unzip -o wix311-binaries.zip -d wixbin || (
echo "failed to unzip WiX"
exit 1
)
fi
# Build Yggdrasil!
[ "${PKGARCH}" == "x64" ] && GOOS=windows GOARCH=amd64 CGO_ENABLED=0 ./build -p -l "-aslr"
[ "${PKGARCH}" == "x86" ] && GOOS=windows GOARCH=386 CGO_ENABLED=0 ./build -p -l "-aslr"
[ "${PKGARCH}" == "arm" ] && GOOS=windows GOARCH=arm CGO_ENABLED=0 ./build -p -l "-aslr"
[ "${PKGARCH}" == "x64" ] && GOOS=windows GOARCH=amd64 CGO_ENABLED=0 ./build
[ "${PKGARCH}" == "x86" ] && GOOS=windows GOARCH=386 CGO_ENABLED=0 ./build
[ "${PKGARCH}" == "arm" ] && GOOS=windows GOARCH=arm CGO_ENABLED=0 ./build
#[ "${PKGARCH}" == "arm64" ] && GOOS=windows GOARCH=arm64 CGO_ENABLED=0 ./build
# Create the postinstall script
@ -39,24 +60,25 @@ if not exist %ALLUSERSPROFILE%\\Yggdrasil (
)
if not exist %ALLUSERSPROFILE%\\Yggdrasil\\yggdrasil.conf (
if exist yggdrasil.exe (
if not exist %ALLUSERSPROFILE%\\Yggdrasil\\yggdrasil.conf (
yggdrasil.exe -genconf > %ALLUSERSPROFILE%\\Yggdrasil\\yggdrasil.conf
)
yggdrasil.exe -genconf > %ALLUSERSPROFILE%\\Yggdrasil\\yggdrasil.conf
)
)
EOF
# Work out metadata for the package info
PKGNAME=$(sh contrib/semver/name.sh)
PKGVERSION=$(sh contrib/semver/version.sh --bare)
PKGVERSION=$(sh contrib/msi/msversion.sh --bare)
PKGVERSIONMS=$(echo $PKGVERSION | tr - .)
[ "${PKGARCH}" == "x64" ] && \
PKGGUID="77757838-1a23-40a5-a720-c3b43e0260cc" PKGINSTFOLDER="ProgramFiles64Folder" || \
PKGGUID="54a3294e-a441-4322-aefb-3bb40dd022bb" PKGINSTFOLDER="ProgramFilesFolder"
# Download the Wintun driver
curl -o wintun.zip https://www.wintun.net/builds/wintun-0.10.2.zip
unzip wintun.zip
if [ ! -d wintun ];
then
curl -o wintun.zip https://www.wintun.net/builds/wintun-0.11.zip
unzip wintun.zip
fi
if [ $PKGARCH = "x64" ]; then
PKGWINTUNDLL=wintun/bin/amd64/wintun.dll
elif [ $PKGARCH = "x86" ]; then
@ -87,7 +109,6 @@ cat > wix.xml << EOF
Language="1033"
Codepage="1252"
Version="${PKGVERSIONMS}"
Platform="${PKGARCH}"
Manufacturer="github.com/yggdrasil-network">
<Package
@ -100,7 +121,6 @@ cat > wix.xml << EOF
InstallScope="perMachine"
Languages="1033"
Compressed="yes"
Platform="${PKGARCH}"
SummaryCodepage="1252" />
<MajorUpgrade
@ -189,7 +209,9 @@ cat > wix.xml << EOF
<InstallExecuteSequence>
<Custom
Action="UpdateGenerateConfig"
Before="StartServices" />
Before="StartServices">
NOT Installed AND NOT REMOVE
</Custom>
</InstallExecuteSequence>
</Product>
@ -197,4 +219,7 @@ cat > wix.xml << EOF
EOF
# Generate the MSI
wixl -v wix.xml -a ${PKGARCH} -o ${PKGNAME}-${PKGVERSION}-${PKGARCH}.msi
CANDLEFLAGS="-nologo"
LIGHTFLAGS="-nologo -spdb -sice:ICE71 -sice:ICE61"
wixbin/candle $CANDLEFLAGS -out ${PKGNAME}-${PKGVERSION}-${PKGARCH}.wixobj -arch ${PKGARCH} wix.xml && \
wixbin/light $LIGHTFLAGS -ext WixUtilExtension.dll -out ${PKGNAME}-${PKGVERSION}-${PKGARCH}.msi ${PKGNAME}-${PKGVERSION}-${PKGARCH}.wixobj

46
contrib/msi/msversion.sh Normal file
View File

@ -0,0 +1,46 @@
#!/bin/sh
# Get the last tag
TAG=$(git describe --abbrev=0 --tags --match="v[0-9]*\.[0-9]*\.[0-9]*" 2>/dev/null)
# Did getting the tag succeed?
if [ $? != 0 ] || [ -z "$TAG" ]; then
printf -- "unknown"
exit 0
fi
# Get the current branch
BRANCH=$(git symbolic-ref -q HEAD --short 2>/dev/null)
# Did getting the branch succeed?
if [ $? != 0 ] || [ -z "$BRANCH" ]; then
BRANCH="master"
fi
# Split out into major, minor and patch numbers
MAJOR=$(echo $TAG | cut -c 2- | cut -d "." -f 1)
MINOR=$(echo $TAG | cut -c 2- | cut -d "." -f 2)
PATCH=$(echo $TAG | cut -c 2- | cut -d "." -f 3 | awk -F"rc" '{print $1}')
# Output in the desired format
if [ $((PATCH)) -eq 0 ]; then
printf '%s%d.%d' "$PREPEND" "$((MAJOR))" "$((MINOR))"
else
printf '%s%d.%d.%d' "$PREPEND" "$((MAJOR))" "$((MINOR))" "$((PATCH))"
fi
# Add the build tag on non-master branches
if [ "$BRANCH" != "master" ]; then
BUILD=$(git rev-list --count $TAG..HEAD 2>/dev/null)
# Did getting the count of commits since the tag succeed?
if [ $? != 0 ] || [ -z "$BUILD" ]; then
printf -- "-unknown"
exit 0
fi
# Is the build greater than zero?
if [ $((BUILD)) -gt 0 ]; then
printf -- "-%04d" "$((BUILD))"
fi
fi

View File

@ -1,46 +1,11 @@
#!/bin/sh
# Get the last tag
TAG=$(git describe --abbrev=0 --tags --match="v[0-9]*\.[0-9]*\.[0-9]*" 2>/dev/null)
# Did getting the tag succeed?
if [ $? != 0 ] || [ -z "$TAG" ]; then
printf -- "unknown"
exit 0
fi
# Get the current branch
BRANCH=$(git symbolic-ref -q HEAD --short 2>/dev/null)
# Did getting the branch succeed?
if [ $? != 0 ] || [ -z "$BRANCH" ]; then
BRANCH="master"
fi
# Split out into major, minor and patch numbers
MAJOR=$(echo $TAG | cut -c 2- | cut -d "." -f 1)
MINOR=$(echo $TAG | cut -c 2- | cut -d "." -f 2)
PATCH=$(echo $TAG | cut -c 2- | cut -d "." -f 3)
# Output in the desired format
if [ $((PATCH)) -eq 0 ]; then
printf '%s%d.%d' "$PREPEND" "$((MAJOR))" "$((MINOR))"
else
printf '%s%d.%d.%d' "$PREPEND" "$((MAJOR))" "$((MINOR))" "$((PATCH))"
fi
# Add the build tag on non-master branches
if [ "$BRANCH" != "master" ]; then
BUILD=$(git rev-list --count $TAG..HEAD 2>/dev/null)
# Did getting the count of commits since the tag succeed?
if [ $? != 0 ] || [ -z "$BUILD" ]; then
printf -- "-unknown"
exit 0
fi
# Is the build greater than zero?
if [ $((BUILD)) -gt 0 ]; then
printf -- "-%04d" "$((BUILD))"
fi
fi
case "$*" in
*--bare*)
# Remove the "v" prefix
git describe --tags --match="v[0-9]*\.[0-9]*\.[0-9]*" | cut -c 2-
;;
*)
git describe --tags --match="v[0-9]*\.[0-9]*\.[0-9]*"
;;
esac

View File

@ -1,148 +0,0 @@
# Yggdrasil
Note: This is a very rough early draft.
Yggdrasil is an encrypted IPv6 network running in the [`200::/7` address range](https://en.wikipedia.org/wiki/Unique_local_address).
It is an experimental/toy network, so failure is acceptable, as long as it's instructive to see how it breaks if/when everything falls apart.
IP addresses are derived from cryptographic keys, to reduce the need for public key infrastructure.
A form of locator/identifier separation (similar in goal to [LISP](https://en.wikipedia.org/wiki/Locator/Identifier_Separation_Protocol)) is used to map static identifiers (IP addresses) onto dynamic routing information (locators), using a [distributed hash table](https://en.wikipedia.org/wiki/Distributed_hash_table) (DHT).
Locators are used to approximate the distance between nodes in the network, where the approximate distance is the length of a real worst-case-scenario path through the network.
This is (arguably) easier to secure and requires less information about the network than commonly used routing schemes.
While not technically a [compact routing scheme](https://arxiv.org/abs/0708.2309), tests on real-world networks suggest that routing in this style incurs stretch comparable to the name-dependent compact routing schemes designed for static networks.
Compared to compact routing schemes, Yggdrasil appears to have smaller average routing table sizes, works on dynamic networks, and is name-independent.
It currently lacks the provable bounds of compact routing schemes, and there's a serious argument to be made that it cheats by stretching the definition of some of the above terms, but the main point to be emphasized is that there are trade-offs between different concerns when trying to route traffic, and we'd rather make every part *good* than try to make any one part *perfect*.
In that sense, Yggdrasil seems to be competitive, on what are supposedly realistic networks, with compact routing schemes.
## Addressing
Yggdrasil uses a truncated version of a `NodeID` to assign addresses.
An address is assigned from the `200::/7` prefix, according to the following:
1. Begin with `0x02` as the first byte of the address, or `0x03` if it's a `/64` prefix.
2. Count the number of leading `1` bits in the NodeID.
3. Set the second byte of the address to the number of leading `1` bits in the NodeID (8 bit unsigned integer, at most 255).
4. Append the NodeID to the remaining bits of the address, truncating the leading `1` bits and the first `0` bit, to a total address size of 128 bits.
The last bit of the first byte is used to flag if an address is for a router (`200::/8`), or part of an advertised prefix (`300::/8`), where each router owns a `/64` that matches their address (except with the eight bit set to 1 instead of 0).
This allows the prefix to be advertised to the router's LAN, so unsupported devices can still connect to the network (e.g. network printers).
The NodeID is a [sha512sum](https://en.wikipedia.org/wiki/SHA-512) of a node's public encryption key.
Addresses are checked that they match NodeID, to prevent address spoofing.
As such, while a 128 bit IPv6 address is likely too short to be considered secure by cryptographer standards, there is a significant cost in attempting to cause an address collision.
Addresses can be made more secure by brute force generating a large number of leading `1` bits in the NodeID.
When connecting to a node, the IP address is unpacked into the known bits of the NodeID and a matching bitmask to track which bits are significant.
A node is only communicated with if its `NodeID` matches its public key and the known `NodeID` bits from the address.
It is important to note that only `NodeID` is used internally for routing, so the addressing scheme could in theory be changed without breaking compatibility with intermediate routers.
This has been done once, when moving the address range from the `fd00::/8` ULA range to the reserved-but-[deprecated](https://tools.ietf.org/html/rfc4048) `200::/7` range.
Further addressing scheme changes could occur if, for example, an IPv7 format ever emerges.
### Cryptography
Public key encryption is done using the `golang.org/x/crypto/nacl/box`, which uses [Curve25519](https://en.wikipedia.org/wiki/Curve25519), [XSalsa20](https://en.wikipedia.org/wiki/Salsa20), and [Poly1305](https://en.wikipedia.org/wiki/Poly1305) for key exchange, encryption, and authentication (interoperable with [NaCl](https://en.wikipedia.org/wiki/NaCl_(software))).
Permanent keys are used only for protocol traffic, with random nonces generated on a per-packet basis using `crypto/rand` from Go's standard library.
Ephemeral session keys (for [forward secrecy](https://en.wikipedia.org/wiki/Forward_secrecy)) are generated for encapsulated IPv6 traffic, using the same set of primitives, with random initial nonces that are subsequently incremented.
A list of recently received session nonces is kept (as a bitmask) and checked to reject duplicated packets, in an effort to block duplicate packets and replay attacks.
A separate set of keys are generated and used for signing with [Ed25519](https://en.wikipedia.org/wiki/Ed25519), which is used by the routing layer to secure construction of a spanning tree.
### Prefixes
Recall that each node's address is in the lower half of the address range, I.e. `200::/8`. A `/64` prefix is made available to each node under `300::/8`, where the remaining bits of the prefix match the node's address under `200::/8`.
A node may optionally advertise a prefix on their local area network, which allows unsupported or legacy devices with IPv6 support to connect to the network.
Note that there are 64 fewer bits of `NodeID` available to check in each address from a routing prefix, so it makes sense to brute force a `NodeID` with more significant bits in the address if this approach is to be used.
Running `genkeys.go` will do this by default.
## Locators and Routing
Locators are generated using information from a spanning tree (described below).
The result is that each node has a set of [coordinates in a greedy metric space](https://en.wikipedia.org/wiki/Greedy_embedding).
These coordinates are used as a distance label.
Given the coordinates of any two nodes, it is possible to calculate the length of some real path through the network between the two nodes.
Traffic is forwarded using a [greedy routing](https://en.wikipedia.org/wiki/Small-world_routing#Greedy_routing) scheme, where each node forwards the packet to a one-hop neighbor that is closer to the destination (according to this distance metric) than the current node.
In particular, when a packet needs to be forwarded, a node will forward it to whatever peer is closest to the destination in the greedy [metric space](https://en.wikipedia.org/wiki/Metric_space) used by the network, provided that the peer is closer to the destination than the current node.
If no closer peers are idle, then the packet is queued in FIFO order, with separate queues per destination coords (currently, as a bit of a hack, IPv6 flow labels are embedded after the end of the significant part of the coords, so queues distinguish between different traffic streams with the same destination).
Whenever the node finishes forwarding a packet to a peer, it checks the queues, and will forward the first packet from the queue with the maximum `<age of first packet>/<queue size in bytes>`, i.e. the bandwidth the queue is attempting to use, subject to the constraint that the peer is a valid next hop (i.e. closer to the destination than the current node).
If no non-empty queue is available, then the peer is added to the idle set, forward packets when the need arises.
This acts as a crude approximation of backpressure routing, where the remote queue sizes are assumed to be equal to the distance of a node from a destination (rather than communicating queue size information), and packets are never forwarded "backwards" through the network, but congestion on a local link is routed around when possible.
The queue selection strategy behaves similar to shortest-queue-first, in that a larger fraction of available bandwidth to sessions that attempt to use less bandwidth, and is loosely based on the rationale behind some proposed solutions to the [cake-cutting](https://en.wikipedia.org/wiki/Fair_cake-cutting) problem.
The queue size is limited to 4 MB. If a packet is added to a queue and the total size of all queues is larger than this threshold, then a random queue is selected (with odds proportional to relative queue sizes), and the first packet from that queue is dropped, with the process repeated until the total queue size drops below the allowed threshold.
Note that this forwarding procedure generalizes to nodes that are not one-hop neighbors, but the current implementation omits the use of more distant neighbors, as this is expected to be a minor optimization (it would add per-link control traffic to pass path-vector-like information about a subset of the network, which is a lot of overhead compared to the current setup).
### Spanning Tree
A [spanning tree](https://en.wikipedia.org/wiki/Spanning_tree) is constructed with the tree rooted at the highest TreeID, where TreeID is equal to a sha512sum of a node's public [Ed25519](https://en.wikipedia.org/wiki/Ed25519) key (used for signing).
A node sends periodic advertisement messages to each neighbor.
The advertisement contains the coords that match the path from the root through the node, plus one additional hop from the node to the neighbor being advertised to.
Each hop in this advertisement includes a matching ed25519 signature.
These signatures prevent nodes from forging arbitrary routing advertisements.
The first hop, from the root, also includes a sequence number, which must be updated periodically.
A node will blacklist the current root (keeping a record of the last sequence number observed) if the root fails to update for longer than some timeout (currently hard coded at 1 minute).
Normally, a root node will update their sequence number for frequently than this (once every 30 seconds).
Nodes are throttled to ignore updates with a new sequence number for some period after updating their most recently seen sequence number (currently this cooldown is 15 seconds).
The implementation chooses to set the sequence number equal to the unix time on the root's clock, so that a new (higher) sequence number will be selected if the root is restarted and the clock is not set back.
Other than the root node, every other node in the network must select one of its neighbors to use as their parent.
This selection is done by tracking when each neighbor first sends us a message with a new timestamp from the root, to determine the ordering of the latency of each path from the root, to each neighbor, and then to the node that's searching for a parent.
These relative latencies are tracked by, for each neighbor, keeping a score vs each other neighbor.
If a neighbor sends a message with an updated timestamp before another neighbor, then the faster neighbor's score is increased by 1.
If the neighbor sends a message slower, then the score is decreased by 2, to make sure that a node must be reliably faster (at least 2/3 of the time) to see a net score increase over time.
If a node begins to advertise new coordinates, then its score vs all other nodes is reset to 0.
A node switches to a new parent if a neighbor's score (vs the current parent) reaches some threshold, currently 240, which corresponds to about 2 hours of being a reliably faster path.
The intended outcome of this process is that stable connections from fixed infrastructure near the "core" of the network should (eventually) select parents that minimize latency from the root to themselves, while the more dynamic parts of the network, presumably more towards the edges, will try to favor reliability when selecting a parent.
The distance metric between nodes is simply the distance between the nodes if they routed on the spanning tree.
This is equal to the sum of the distance from each node to the last common ancestor of the two nodes being compared.
The locator then consists of a root's key, timestamp, and coordinates representing each hop in the path from the root to the node.
In practice, only the coords are used for routing, while the root and timestamp, along with all the per-hop signatures, are needed to securely construct the spanning tree.
## Name-independent routing
A [Chord](https://en.wikipedia.org/wiki/Chord_(peer-to-peer))-like Distributed Hash Table (DHT) is used as a distributed database that maps NodeIDs onto coordinates in the spanning tree metric space.
The DHT is Chord-like in that it uses a successor/predecessor structure to do lookups in `O(n)` time with `O(1)` entries, then augments this with some additional information, adding roughly `O(logn)` additional entries, to reduce the lookup time to something around `O(logn)`.
In the long term, the idea is to favor spending our bandwidth making sure the minimum `O(1)` part is right, to prioritize correctness, and then try to conserve bandwidth (and power) by being a bit lazy about checking the remaining `O(logn)` portion when it's not in use.
To be specific, the DHT stores the immediate successor of a node, plus the next node it manages to find which is strictly closer (by the tree hop-count metric) than all previous nodes.
The same process is repeated for predecessor nodes, and lookups walk the network in the predecessor direction, with each key being owned by its successor (to make sure defaulting to 0 for unknown bits of a `NodeID` doesn't cause us to overshoot the target during a lookup).
In addition, all of a node's one-hop neighbors are included in the DHT, since we get this information "for free", and we must include it in our DHT to ensure that the network doesn't diverge to a broken state (though I suspect that only adding parents or parent-child relationships may be sufficient -- worth trying to prove or disprove, if somebody's bored).
The DHT differs from Chord in that there are no values in the key:value store -- it only stores information about DHT peers -- and that it uses a [Kademlia](https://en.wikipedia.org/wiki/Kademlia)-inspired iterative-parallel lookup process.
To summarize the entire routing procedure, when given only a node's IP address, the goal is to find a route to the destination.
That happens through 3 steps:
1. The address is unpacked into the known bits of a NodeID and a bitmask to signal which bits of the NodeID are known (the unknown bits are ignored).
2. A DHT search is performed, which normally results in a response from the node closest in the DHT keyspace to the target `NodeID`. The response contains the node's curve25519 public key, which is checked to match the `NodeID` (and therefore the address), as well as the node's coordinates.
3. Using the keys and coords from the above step, an ephemeral key exchange occurs between the source and destination nodes. These ephemeral session keys are used to encrypt any ordinary IPv6 traffic that may be encapsulated and sent between the nodes.
From that point, the session keys and coords are cached and used to encrypt and send traffic between nodes. This is *mostly* transparent to the user: the initial DHT lookup and key exchange takes at least 2 round trips, so there's some delay before session setup completes and normal IPv6 traffic can flow. This is similar to the delay caused by a DNS lookup, although it generally takes longer, as a DHT lookup requires multiple iterations to reach the destination.
## Project Status and Plans
The current (Go) implementation is considered alpha, so compatibility with future versions is neither guaranteed nor expected.
While users are discouraged from running anything truly critical on top of it, as of writing, it seems reliable enough for day-to-day use.
As an "alpha" quality release, Yggdrasil *should* at least be able to detect incompatible versions when it sees them, and warn the users that an update may be needed.
A "beta" quality release should know enough to be compatible in the face of wire format changes, and reasonably feature complete.
A "stable" 1.0 release, if it ever happens, would probably be feature complete, with no expectation of future wire format changes, and free of known critical bugs.
Roughly speaking, there are a few obvious ways the project could turn out:
1. The developers could lose interest before it goes anywhere.
2. The project could be reasonably complete (beta or stable), but never gain a significant number of users.
3. The network may grow large enough that fundamental (non-fixable) design problems appear, which is hopefully a learning experience, but the project may die as a result.
4. The network may grow large, but never hit any design problems, in which case we need to think about either moving the important parts into other projects ([cjdns](https://github.com/cjdelisle/cjdns)) or rewriting compatible implementations that are better optimized for the target platforms (e.g. a linux kernel module).
That last one is probably impossible, because the speed of light would *eventually* become a problem, for a sufficiently large network.
If the only thing limiting network growth turns out to be the underlying physics, then that arguably counts as a win.
Also, note that some design decisions were made for ease-of-programming or ease-of-testing reasons, and likely need to be reconsidered at some point.
In particular, Yggdrasil currently uses TCP for connections with one-hop neighbors, which introduces an additional layer of buffering that can lead to increased and/or unstable latency in congested areas of the network.

@ -1 +0,0 @@
Subproject commit c876890a51d9140e68d5cec7fbeb2146c2562792

22
go.mod
View File

@ -3,22 +3,24 @@ module github.com/yggdrasil-network/yggdrasil-go
go 1.16
require (
github.com/Arceliar/ironwood v0.0.0-20210619124114-6ad55cae5031
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979
github.com/cheggaaa/pb/v3 v3.0.6
github.com/fatih/color v1.10.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/cheggaaa/pb/v3 v3.0.8
github.com/fatih/color v1.12.0 // indirect
github.com/gologme/log v1.2.0
github.com/hashicorp/go-syslog v1.0.0
github.com/hjson/hjson-go v3.1.0+incompatible
github.com/kardianos/minwinsvc v1.0.0
github.com/mattn/go-runewidth v0.0.10 // indirect
github.com/mattn/go-isatty v0.0.13 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/mitchellh/mapstructure v1.4.1
github.com/rivo/uniseg v0.2.0 // indirect
github.com/vishvananda/netlink v1.1.0
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b
golang.org/x/text v0.3.6-0.20210220033129-8f690f22cf1c
golang.zx2c4.com/wireguard v0.0.0-20210306175010-7e3b8371a1bf
golang.zx2c4.com/wireguard/windows v0.3.8
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a // indirect
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b
golang.org/x/sys v0.0.0-20210611083646-a4fc73990273
golang.org/x/text v0.3.7-0.20210503195748-5c7c50ebbd4f
golang.zx2c4.com/wireguard v0.0.0-20210604143328-f9b48a961cd2
golang.zx2c4.com/wireguard/windows v0.3.14
)

61
go.sum
View File

@ -1,12 +1,15 @@
github.com/Arceliar/ironwood v0.0.0-20210619124114-6ad55cae5031 h1:DZVDfYhVdu+0wAiRHoY1olyNkKxIot9UjBnbQFzuUlM=
github.com/Arceliar/ironwood v0.0.0-20210619124114-6ad55cae5031/go.mod h1:RP72rucOFm5udrnEzTmIWLRVGQiV/fSUAQXJ0RST/nk=
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979 h1:WndgpSW13S32VLQ3ugUxx2EnnWmgba1kCqPkd4Gk1yQ=
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979/go.mod h1:6Lkn+/zJilRMsKmbmG1RPoamiArC6HS73xbwRyp3UyI=
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/cheggaaa/pb/v3 v3.0.6 h1:ULPm1wpzvj60FvmCrX7bIaB80UgbhI+zSaQJKRfCbAs=
github.com/cheggaaa/pb/v3 v3.0.6/go.mod h1:X1L61/+36nz9bjIsrDU52qHKOQukUQe2Ge+YvGuquCw=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
github.com/cheggaaa/pb/v3 v3.0.8 h1:bC8oemdChbke2FHIIGy9mn4DPJ2caZYQnfbRqwmdCoA=
github.com/cheggaaa/pb/v3 v3.0.8/go.mod h1:UICbiLec/XO6Hw6k+BHEtHeQFzzBH4i2/qk/ow1EJTA=
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc=
github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/gologme/log v1.2.0 h1:Ya5Ip/KD6FX7uH0S31QO87nCCSucKtF44TLbTtO7V4c=
github.com/gologme/log v1.2.0/go.mod h1:gq31gQ8wEHkR+WekdWsqDuf8pXTUZA9BnnzTuPz1Y9U=
github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=
@ -17,15 +20,14 @@ github.com/kardianos/minwinsvc v1.0.0 h1:+JfAi8IBJna0jY2dJGZqi7o15z13JelFIklJCAE
github.com/kardianos/minwinsvc v1.0.0/go.mod h1:Bgd0oc+D0Qo3bBytmNtyRKVlp85dAloLKhfxanPFFRc=
github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg=
github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA=
github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@ -37,14 +39,17 @@ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc=
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b h1:k+E048sYJHyVnsr1GDrRZWQ32D2C7lWs9JRc0bel53A=
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -53,20 +58,22 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225014209-683adc9d29d7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305215415-5cdee2b1b5a0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b h1:ggRgirZABFolTmi3sn6Ivd9SipZwLedQ5wR0aAKnFxU=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210309040221-94ec62e08169/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210611083646-a4fc73990273 h1:faDu4veV+8pcThn4fewv6TVlNCezafGoC1gM/mxQLbQ=
golang.org/x/sys v0.0.0-20210611083646-a4fc73990273/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6-0.20210220033129-8f690f22cf1c h1:SW/oilbeWd6f32u3ZvuYGqZ+wivcp//I3Dy/gByk7Wk=
golang.org/x/text v0.3.6-0.20210220033129-8f690f22cf1c/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7-0.20210503195748-5c7c50ebbd4f h1:yQJrRE0hDxDFmZLlRaw+3vusO4fwNHgHIjUOMO7bHYI=
golang.org/x/text v0.3.7-0.20210503195748-5c7c50ebbd4f/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.zx2c4.com/wireguard v0.0.0-20210225140808-70b7b7158fc9/go.mod h1:39ZQQ95hUxDxT7opsWy/rtfgvXXc8s30qfZ02df69Fo=
golang.zx2c4.com/wireguard v0.0.0-20210306175010-7e3b8371a1bf h1:AtdIMfzvVNPXN4kVY/yWS8mvpQogSwtCRJk2y/LBPpg=
golang.zx2c4.com/wireguard v0.0.0-20210306175010-7e3b8371a1bf/go.mod h1:ojGPy+9W6ZSM8anL+xC67fvh8zPQJwA6KpFOHyDWLX4=
golang.zx2c4.com/wireguard/windows v0.3.8 h1:FvfBEhdZZTwthLuPHdyP6zpivYL3enopxd4XpggAufM=
golang.zx2c4.com/wireguard/windows v0.3.8/go.mod h1:lm7dxHcBuzMNq706Ge1tZKZKw4+19vG9dLOhoDX05HQ=
golang.zx2c4.com/wireguard v0.0.0-20210510202332-9844c74f67ec/go.mod h1:a057zjmoc00UN7gVkaJt2sXVK523kMJcogDTEvPIasg=
golang.zx2c4.com/wireguard v0.0.0-20210604143328-f9b48a961cd2 h1:wfOOSvHgIzTZ9h5Vb6yUFZNn7uf3bT7PeYsHOO7tYDM=
golang.zx2c4.com/wireguard v0.0.0-20210604143328-f9b48a961cd2/go.mod h1:laHzsbfMhGSobUmruXWAyMKKHSqvIcrqZJMyHD+/3O8=
golang.zx2c4.com/wireguard/windows v0.3.14 h1:5yIDYyrQyGkLqV+tzY4ilMNeIvQeMXAz0glZz9u179A=
golang.zx2c4.com/wireguard/windows v0.3.14/go.mod h1:3P4IEAsb+BjlKZmpUXgy74c0iX9AVwwr3WcVJ8nPgME=

File diff suppressed because it is too large Load Diff

View File

@ -1,62 +0,0 @@
import glob
import sys
inputDirPath = sys.argv[1]
inputFilePaths = glob.glob(inputDirPath+"/*")
inputFilePaths.sort()
merged = dict()
stretches = []
total = 0
for inputFilePath in inputFilePaths:
print "Processing file {}".format(inputFilePath)
with open(inputFilePath, 'r') as f:
inData = f.readlines()
pathsChecked = 0.
avgStretch = 0.
for line in inData:
dat = line.rstrip('\n').split(' ')
eHops = int(dat[0])
nHops = int(dat[1])
count = int(dat[2])
if eHops not in merged: merged[eHops] = dict()
if nHops not in merged[eHops]: merged[eHops][nHops] = 0
merged[eHops][nHops] += count
total += count
pathsChecked += count
stretch = float(nHops)/eHops
avgStretch += stretch*count
finStretch = avgStretch / max(1, pathsChecked)
stretches.append(str(finStretch))
hopsUsed = 0.
hopsNeeded = 0.
avgStretch = 0.
results = []
for eHops in sorted(merged.keys()):
for nHops in sorted(merged[eHops].keys()):
count = merged[eHops][nHops]
result = "{} {} {}".format(eHops, nHops, count)
results.append(result)
hopsUsed += nHops*count
hopsNeeded += eHops*count
stretch = float(nHops)/eHops
avgStretch += stretch*count
print result
bandwidthUsage = hopsUsed/max(1, hopsNeeded)
avgStretch /= max(1, total)
with open("results.txt", "w") as f:
f.write('\n'.join(results))
with open("stretches.txt", "w") as f:
f.write('\n'.join(stretches))
print "Total files processed: {}".format(len(inputFilePaths))
print "Total paths found: {}".format(total)
print "Bandwidth usage: {}".format(bandwidthUsage)
print "Average stretch: {}".format(avgStretch)

View File

@ -1,2 +0,0 @@
#!/bin/bash
go run -tags debug misc/sim/treesim.go "$@"

View File

@ -1,901 +0,0 @@
# Tree routing scheme (named Yggdrasil, after the world tree from Norse mythology)
# Steps:
# 1: Pick any node, here I'm using highest nodeID
# 2: Build spanning tree, each node stores path back to root
# Optionally with weights for each hop
# Ties broken by preferring a parent with higher degree
# 3: Distance metric: self->peer + (via tree) peer->dest
# 4: Perform (modified) greedy lookup via this metric for each direction (A->B and B->A)
# 5: Source-route traffic using the better of those two paths
# Note: This makes no attempt to simulate a dynamic network
# E.g. A node's peers cannot be disconnected
# TODO:
# Make better use of drop?
# In particular, we should be ignoring *all* recently dropped *paths* to the root
# To minimize route flapping
# Not really an issue in the sim, but probably needed for a real network
import array
import gc
import glob
import gzip
import heapq
import os
import random
import time
#############
# Constants #
#############
# Reminder of where link cost comes in
LINK_COST = 1
# Timeout before dropping something, in simulated seconds
TIMEOUT = 60
###########
# Classes #
###########
class PathInfo:
def __init__(self, nodeID):
self.nodeID = nodeID # e.g. IP
self.coords = [] # Position in tree
self.tstamp = 0 # Timestamp from sender, to keep track of old vs new info
self.degree = 0 # Number of peers the sender has, used to break ties
# The above should be signed
self.path = [nodeID] # Path to node (in path-vector route)
self.time = 0 # Time info was updated, to keep track of e.g. timeouts
self.treeID = nodeID # Hack, let tree use different ID than IP, used so we can dijkstra once and test many roots
def clone(self):
# Return a deep-enough copy of the path
clone = PathInfo(None)
clone.nodeID = self.nodeID
clone.coords = self.coords[:]
clone.tstamp = self.tstamp
clone.degree = self.degree
clone.path = self.path[:]
clone.time = self.time
clone.treeID = self.treeID
return clone
# End class PathInfo
class Node:
def __init__(self, nodeID):
self.info = PathInfo(nodeID) # Self NodeInfo
self.root = None # PathInfo to node at root of tree
self.drop = dict() # PathInfo to nodes from clus that have timed out
self.peers = dict() # PathInfo to peers
self.links = dict() # Links to peers (to pass messages)
self.msgs = [] # Said messages
self.table = dict() # Pre-computed lookup table of peer info
def tick(self):
# Do periodic maintenance stuff, including push updates
self.info.time += 1
if self.info.time > self.info.tstamp + TIMEOUT/4:
# Update timestamp at least once every 1/4 timeout period
# This should probably be randomized in a real implementation
self.info.tstamp = self.info.time
self.info.degree = 0# TODO decide if degree should be used, len(self.peers)
changed = False # Used to track when the network has converged
changed |= self.cleanRoot()
self.cleanDropped()
# Should probably send messages infrequently if there's nothing new to report
if self.info.tstamp == self.info.time:
msg = self.createMessage()
self.sendMessage(msg)
return changed
def cleanRoot(self):
changed = False
if self.root and self.info.time - self.root.time > TIMEOUT:
print "DEBUG: clean root,", self.root.path
self.drop[self.root.treeID] = self.root
self.root = None
changed = True
if not self.root or self.root.treeID < self.info.treeID:
# No need to drop someone who'se worse than us
self.info.coords = [self.info.nodeID]
self.root = self.info.clone()
changed = True
elif self.root.treeID == self.info.treeID:
self.root = self.info.clone()
return changed
def cleanDropped(self):
# May actually be a treeID... better to iterate over keys explicitly
nodeIDs = sorted(self.drop.keys())
for nodeID in nodeIDs:
node = self.drop[nodeID]
if self.info.time - node.time > 4*TIMEOUT:
del self.drop[nodeID]
return None
def createMessage(self):
# Message is just a tuple
# First element is the sender
# Second element is the root
# We will .clone() everything during the send operation
msg = (self.info, self.root)
return msg
def sendMessage(self, msg):
for link in self.links.values():
newMsg = (msg[0].clone(), msg[1].clone())
link.msgs.append(newMsg)
return None
def handleMessages(self):
changed = False
while self.msgs:
changed |= self.handleMessage(self.msgs.pop())
return changed
def handleMessage(self, msg):
changed = False
for node in msg:
# Update the path and timestamp for the sender and root info
node.path.append(self.info.nodeID)
node.time = self.info.time
# Update the sender's info in our list of peers
sender = msg[0]
self.peers[sender.nodeID] = sender
# Decide if we want to update the root
root = msg[1]
updateRoot = False
isSameParent = False
isBetterParent = False
if len(self.root.path) > 1 and len(root.path) > 1:
parent = self.peers[self.root.path[-2]]
if parent.nodeID == sender.nodeID: isSameParent = True
if sender.degree > parent.degree:
# This would also be where you check path uptime/reliability/whatever
# All else being equal, we prefer parents with high degree
# We are trusting peers to report degree correctly in this case
# So expect some performance reduction if your peers aren't trustworthy
# (Lies can increase average stretch by a few %)
isBetterParent = True
if self.info.nodeID in root.path[:-1]: pass # No loopy routes allowed
elif root.treeID in self.drop and self.drop[root.treeID].tstamp >= root.tstamp: pass
elif not self.root: updateRoot = True
elif self.root.treeID < root.treeID: updateRoot = True
elif self.root.treeID != root.treeID: pass
elif self.root.tstamp > root.tstamp: pass
elif len(root.path) < len(self.root.path): updateRoot = True
elif isBetterParent and len(root.path) == len(self.root.path): updateRoot = True
elif isSameParent and self.root.tstamp < root.tstamp: updateRoot = True
if updateRoot:
if not self.root or self.root.path != root.path: changed = True
self.root = root
self.info.coords = self.root.path
return changed
def lookup(self, dest):
# Note: Can loop in an unconverged network
# The person looking up the route is responsible for checking for loops
best = None
bestDist = 0
for node in self.peers.itervalues():
# dist = distance to node + dist (on tree) from node to dest
dist = len(node.path)-1 + treeDist(node.coords, dest.coords)
if not best or dist < bestDist:
best = node
bestDist = dist
if best:
next = best.path[-2]
assert next in self.peers
return next
else:
# We failed to look something up
# TODO some way to signal this which doesn't crash
assert False
def initTable(self):
# Pre-computes a lookup table for destination coords
# Insert parent first so you prefer them as a next-hop
self.table.clear()
parent = self.info.nodeID
if len(self.info.coords) >= 2: parent = self.info.coords[-2]
for peer in self.peers.itervalues():
current = self.table
for coord in peer.coords:
if coord not in current: current[coord] = (peer.nodeID, dict())
old = current[coord]
next = old[1]
oldPeer = self.peers[old[0]]
oldDist = len(oldPeer.coords)
oldDeg = oldPeer.degree
newDist = len(peer.coords)
newDeg = peer.degree
# Prefer parent
# Else prefer short distance from root
# If equal distance, prefer high degree
if peer.nodeID == parent: current[coord] = (peer.nodeID, next)
elif newDist < oldDist: current[coord] = (peer.nodeID, next)
elif newDist == oldDist and newDeg > oldDeg: current[coord] = (peer.nodeID, next)
current = next
return None
def lookup_new(self, dest):
# Use pre-computed lookup table to look up next hop for dest coords
assert self.table
if len(self.info.coords) >= 2: parent = self.info.coords[-2]
else: parent = None
current = (parent, self.table)
c = None
for coord in dest.coords:
c = coord
if coord not in current[1]: break
current = current[1][coord]
next = current[0]
if c in self.peers: next = c
if next not in self.peers:
assert next == None
# You're the root of a different connected component
# You'd drop the packet in this case
# To make the path cache not die, need to return a valid next hop...
# Returning self for that reason
next = self.info.nodeID
return next
# End class Node
####################
# Helper Functions #
####################
def getIndexOfLCA(source, dest):
# Return index of last common ancestor in source/dest coords
# -1 if no common ancestor (e.g. different roots)
lcaIdx = -1
minLen = min(len(source), len(dest))
for idx in xrange(minLen):
if source[idx] == dest[idx]: lcaIdx = idx
else: break
return lcaIdx
def treePath(source, dest):
# Return path with source at head and dest at tail
lastMatch = getIndexOfLCA(source, dest)
path = dest[-1:lastMatch:-1] + source[lastMatch:]
assert path[0] == dest[-1]
assert path[-1] == source[-1]
return path
def treeDist(source, dest):
dist = len(source) + len(dest)
lcaIdx = getIndexOfLCA(source, dest)
dist -= 2*(lcaIdx+1)
return dist
def dijkstra(nodestore, startingNodeID):
# Idea to use heapq and basic implementation taken from stackexchange post
# http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python
nodeIDs = sorted(nodestore.keys())
nNodes = len(nodeIDs)
idxs = dict()
for nodeIdx in xrange(nNodes):
nodeID = nodeIDs[nodeIdx]
idxs[nodeID] = nodeIdx
dists = array.array("H", [0]*nNodes)
queue = [(0, startingNodeID)]
while queue:
dist, nodeID = heapq.heappop(queue)
idx = idxs[nodeID]
if not dists[idx]: # Unvisited, otherwise we skip it
dists[idx] = dist
for peer in nodestore[nodeID].links:
if not dists[idxs[peer]]:
# Peer is also unvisited, so add to queue
heapq.heappush(queue, (dist+LINK_COST, peer))
return dists
def dijkstrall(nodestore):
# Idea to use heapq and basic implementation taken from stackexchange post
# http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python
nodeIDs = sorted(nodestore.keys())
nNodes = len(nodeIDs)
idxs = dict()
for nodeIdx in xrange(nNodes):
nodeID = nodeIDs[nodeIdx]
idxs[nodeID] = nodeIdx
dists = array.array("H", [0]*nNodes*nNodes) # use GetCacheIndex(nNodes, start, end)
for sourceIdx in xrange(nNodes):
print "Finding shortest paths for node {} / {} ({})".format(sourceIdx+1, nNodes, nodeIDs[sourceIdx])
queue = [(0, sourceIdx)]
while queue:
dist, nodeIdx = heapq.heappop(queue)
distIdx = getCacheIndex(nNodes, sourceIdx, nodeIdx)
if not dists[distIdx]: # Unvisited, otherwise we skip it
dists[distIdx] = dist
for peer in nodestore[nodeIDs[nodeIdx]].links:
pIdx = idxs[peer]
pdIdx = getCacheIndex(nNodes, sourceIdx, pIdx)
if not dists[pdIdx]:
# Peer is also unvisited, so add to queue
heapq.heappush(queue, (dist+LINK_COST, pIdx))
return dists
def linkNodes(node1, node2):
node1.links[node2.info.nodeID] = node2
node2.links[node1.info.nodeID] = node1
############################
# Store topology functions #
############################
def makeStoreSquareGrid(sideLength, randomize=True):
# Simple grid in a sideLength*sideLength square
# Just used to validate that the code runs
store = dict()
nodeIDs = list(range(sideLength*sideLength))
if randomize: random.shuffle(nodeIDs)
for nodeID in nodeIDs:
store[nodeID] = Node(nodeID)
for index in xrange(len(nodeIDs)):
if (index % sideLength != 0): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-1]])
if (index >= sideLength): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-sideLength]])
print "Grid store created, size {}".format(len(store))
return store
def makeStoreASRelGraph(pathToGraph):
#Existing network graphs, in caida.org's asrel format (ASx|ASy|z per line, z denotes relationship type)
with open(pathToGraph, "r") as f:
inData = f.readlines()
store = dict()
for line in inData:
if line.strip()[0] == "#": continue # Skip comment lines
line = line.replace('|'," ")
nodes = map(int, line.split()[0:2])
if nodes[0] not in store: store[nodes[0]] = Node(nodes[0])
if nodes[1] not in store: store[nodes[1]] = Node(nodes[1])
linkNodes(store[nodes[0]], store[nodes[1]])
print "CAIDA AS-relation graph successfully imported, size {}".format(len(store))
return store
def makeStoreASRelGraphMaxDeg(pathToGraph, degIdx=0):
with open(pathToGraph, "r") as f:
inData = f.readlines()
store = dict()
nodeDeg = dict()
for line in inData:
if line.strip()[0] == "#": continue # Skip comment lines
line = line.replace('|'," ")
nodes = map(int, line.split()[0:2])
if nodes[0] not in nodeDeg: nodeDeg[nodes[0]] = 0
if nodes[1] not in nodeDeg: nodeDeg[nodes[1]] = 0
nodeDeg[nodes[0]] += 1
nodeDeg[nodes[1]] += 1
sortedNodes = sorted(nodeDeg.keys(), \
key=lambda x: (nodeDeg[x], x), \
reverse=True)
maxDegNodeID = sortedNodes[degIdx]
return makeStoreASRelGraphFixedRoot(pathToGraph, maxDegNodeID)
def makeStoreASRelGraphFixedRoot(pathToGraph, rootNodeID):
with open(pathToGraph, "r") as f:
inData = f.readlines()
store = dict()
for line in inData:
if line.strip()[0] == "#": continue # Skip comment lines
line = line.replace('|'," ")
nodes = map(int, line.split()[0:2])
if nodes[0] not in store:
store[nodes[0]] = Node(nodes[0])
if nodes[0] == rootNodeID: store[nodes[0]].info.treeID += 1000000000
if nodes[1] not in store:
store[nodes[1]] = Node(nodes[1])
if nodes[1] == rootNodeID: store[nodes[1]].info.treeID += 1000000000
linkNodes(store[nodes[0]], store[nodes[1]])
print "CAIDA AS-relation graph successfully imported, size {}".format(len(store))
return store
def makeStoreDimesEdges(pathToGraph, rootNodeID=None):
# Read from a DIMES csv-formatted graph from a gzip file
store = dict()
with gzip.open(pathToGraph, "r") as f:
inData = f.readlines()
size = len(inData)
index = 0
for edge in inData:
if not index % 1000:
pct = 100.0*index/size
print "Processing edge {}, {:.2f}%".format(index, pct)
index += 1
dat = edge.rstrip().split(',')
node1 = "N" + str(dat[0].strip())
node2 = "N" + str(dat[1].strip())
if '?' in node1 or '?' in node2: continue #Unknown node
if node1 == rootNodeID: node1 = "R" + str(dat[0].strip())
if node2 == rootNodeID: node2 = "R" + str(dat[1].strip())
if node1 not in store: store[node1] = Node(node1)
if node2 not in store: store[node2] = Node(node2)
if node1 != node2: linkNodes(store[node1], store[node2])
print "DIMES graph successfully imported, size {}".format(len(store))
return store
def makeStoreGeneratedGraph(pathToGraph, root=None):
with open(pathToGraph, "r") as f:
inData = f.readlines()
store = dict()
for line in inData:
if line.strip()[0] == "#": continue # Skip comment lines
nodes = map(int, line.strip().split(' ')[0:2])
node1 = nodes[0]
node2 = nodes[1]
if node1 == root: node1 += 1000000
if node2 == root: node2 += 1000000
if node1 not in store: store[node1] = Node(node1)
if node2 not in store: store[node2] = Node(node2)
linkNodes(store[node1], store[node2])
print "Generated graph successfully imported, size {}".format(len(store))
return store
############################################
# Functions used as parts of network tests #
############################################
def idleUntilConverged(store):
nodeIDs = sorted(store.keys())
timeOfLastChange = 0
step = 0
# Idle until the network has converged
while step - timeOfLastChange < 4*TIMEOUT:
step += 1
print "Step: {}, last change: {}".format(step, timeOfLastChange)
changed = False
for nodeID in nodeIDs:
# Update node status, send messages
changed |= store[nodeID].tick()
for nodeID in nodeIDs:
# Process messages
changed |= store[nodeID].handleMessages()
if changed: timeOfLastChange = step
initTables(store)
return store
def getCacheIndex(nodes, sourceIndex, destIndex):
return sourceIndex*nodes + destIndex
def initTables(store):
nodeIDs = sorted(store.keys())
nNodes = len(nodeIDs)
print "Initializing routing tables for {} nodes".format(nNodes)
for idx in xrange(nNodes):
nodeID = nodeIDs[idx]
store[nodeID].initTable()
print "Routing tables initialized"
return None
def getCache(store):
nodeIDs = sorted(store.keys())
nNodes = len(nodeIDs)
nodeIdxs = dict()
for nodeIdx in xrange(nNodes):
nodeIdxs[nodeIDs[nodeIdx]] = nodeIdx
cache = array.array("H", [0]*nNodes*nNodes)
for sourceIdx in xrange(nNodes):
sourceID = nodeIDs[sourceIdx]
print "Building fast lookup table for node {} / {} ({})".format(sourceIdx+1, nNodes, sourceID)
for destIdx in xrange(nNodes):
destID = nodeIDs[destIdx]
if sourceID == destID: nextHop = destID # lookup would fail
else: nextHop = store[sourceID].lookup(store[destID].info)
nextHopIdx = nodeIdxs[nextHop]
cache[getCacheIndex(nNodes, sourceIdx, destIdx)] = nextHopIdx
return cache
def testPaths(store, dists):
cache = getCache(store)
nodeIDs = sorted(store.keys())
nNodes = len(nodeIDs)
idxs = dict()
for nodeIdx in xrange(nNodes):
nodeID = nodeIDs[nodeIdx]
idxs[nodeID] = nodeIdx
results = dict()
for sourceIdx in xrange(nNodes):
sourceID = nodeIDs[sourceIdx]
print "Testing paths from node {} / {} ({})".format(sourceIdx+1, len(nodeIDs), sourceID)
#dists = dijkstra(store, sourceID)
for destIdx in xrange(nNodes):
destID = nodeIDs[destIdx]
if destID == sourceID: continue # Skip self
distIdx = getCacheIndex(nNodes, sourceIdx, destIdx)
eHops = dists[distIdx]
if not eHops: continue # The network is split, no path exists
hops = 0
for pair in ((sourceIdx, destIdx),):
nHops = 0
locIdx = pair[0]
dIdx = pair[1]
while locIdx != dIdx:
locIdx = cache[getCacheIndex(nNodes, locIdx, dIdx)]
nHops += 1
if not hops or nHops < hops: hops = nHops
if eHops not in results: results[eHops] = dict()
if hops not in results[eHops]: results[eHops][hops] = 0
results[eHops][hops] += 1
return results
def getAvgStretch(pathMatrix):
avgStretch = 0.
checked = 0.
for eHops in sorted(pathMatrix.keys()):
for nHops in sorted(pathMatrix[eHops].keys()):
count = pathMatrix[eHops][nHops]
stretch = float(nHops)/float(max(1, eHops))
avgStretch += stretch*count
checked += count
avgStretch /= max(1, checked)
return avgStretch
def getMaxStretch(pathMatrix):
maxStretch = 0.
for eHops in sorted(pathMatrix.keys()):
for nHops in sorted(pathMatrix[eHops].keys()):
stretch = float(nHops)/float(max(1, eHops))
maxStretch = max(maxStretch, stretch)
return maxStretch
def getCertSizes(store):
# Returns nCerts frequency distribution
# De-duplicates common certs (for shared prefixes in the path)
sizes = dict()
for node in store.values():
certs = set()
for peer in node.peers.values():
pCerts = set()
assert len(peer.path) == 2
assert peer.coords[-1] == peer.path[0]
hops = peer.coords + peer.path[1:]
for hopIdx in xrange(len(hops)-1):
send = hops[hopIdx]
if send == node.info.nodeID: continue # We created it, already have it
path = hops[0:hopIdx+2]
# Each cert is signed by the sender
# Includes information about the path from the sender to the next hop
# Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2]
cert = "{}:{}".format(send, path)
certs.add(cert)
size = len(certs)
if size not in sizes: sizes[size] = 0
sizes[size] += 1
return sizes
def getMinLinkCertSizes(store):
# Returns nCerts frequency distribution
# De-duplicates common certs (for shared prefixes in the path)
# Based on the minimum number of certs that must be traded through a particular link
# Handled per link
sizes = dict()
for node in store.values():
peerCerts = dict()
for peer in node.peers.values():
pCerts = set()
assert len(peer.path) == 2
assert peer.coords[-1] == peer.path[0]
hops = peer.coords + peer.path[1:]
for hopIdx in xrange(len(hops)-1):
send = hops[hopIdx]
if send == node.info.nodeID: continue # We created it, already have it
path = hops[0:hopIdx+2]
# Each cert is signed by the sender
# Includes information about the path from the sender to the next hop
# Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2]
cert = "{}:{}".format(send, path)
pCerts.add(cert)
peerCerts[peer.nodeID] = pCerts
for peer in peerCerts:
size = 0
pCerts = peerCerts[peer]
for cert in pCerts:
required = True
for p2 in peerCerts:
if p2 == peer: continue
p2Certs = peerCerts[p2]
if cert in p2Certs: required = False
if required: size += 1
if size not in sizes: sizes[size] = 0
sizes[size] += 1
return sizes
def getPathSizes(store):
# Returns frequency distribution of the total number of hops the routing table
# I.e. a node with 3 peers, each with 5 hop coord+path, would count as 3x5=15
sizes = dict()
for node in store.values():
size = 0
for peer in node.peers.values():
assert len(peer.path) == 2
assert peer.coords[-1] == peer.path[0]
peerSize = len(peer.coords) + len(peer.path) - 1 # double-counts peer, -1
size += peerSize
if size not in sizes: sizes[size] = 0
sizes[size] += 1
return sizes
def getPeerSizes(store):
# Returns frequency distribution of the number of peers each node has
sizes = dict()
for node in store.values():
nPeers = len(node.peers)
if nPeers not in sizes: sizes[nPeers] = 0
sizes[nPeers] += 1
return sizes
def getAvgSize(sizes):
sumSizes = 0
nNodes = 0
for size in sizes:
count = sizes[size]
sumSizes += size*count
nNodes += count
avgSize = float(sumSizes)/max(1, nNodes)
return avgSize
def getMaxSize(sizes):
return max(sizes.keys())
def getMinSize(sizes):
return min(sizes.keys())
def getResults(pathMatrix):
results = []
for eHops in sorted(pathMatrix.keys()):
for nHops in sorted(pathMatrix[eHops].keys()):
count = pathMatrix[eHops][nHops]
results.append("{} {} {}".format(eHops, nHops, count))
return '\n'.join(results)
####################################
# Functions to run different tests #
####################################
def runTest(store):
# Runs the usual set of tests on the store
# Does not save results, so only meant for quick tests
# To e.g. check the code works, maybe warm up the pypy jit
for node in store.values():
node.info.time = random.randint(0, TIMEOUT)
node.info.tstamp = TIMEOUT
print "Begin testing network"
dists = None
if not dists: dists = dijkstrall(store)
idleUntilConverged(store)
pathMatrix = testPaths(store, dists)
avgStretch = getAvgStretch(pathMatrix)
maxStretch = getMaxStretch(pathMatrix)
peers = getPeerSizes(store)
certs = getCertSizes(store)
paths = getPathSizes(store)
linkCerts = getMinLinkCertSizes(store)
avgPeerSize = getAvgSize(peers)
maxPeerSize = getMaxSize(peers)
avgCertSize = getAvgSize(certs)
maxCertSize = getMaxSize(certs)
avgPathSize = getAvgSize(paths)
maxPathSize = getMaxSize(paths)
avgLinkCert = getAvgSize(linkCerts)
maxLinkCert = getMaxSize(linkCerts)
totalCerts = sum(map(lambda x: x*certs[x], certs.keys()))
totalLinks = sum(map(lambda x: x*peers[x], peers.keys())) # one-way links
avgCertsPerLink = float(totalCerts)/max(1, totalLinks)
print "Finished testing network"
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
print "Avg / Max nPeers size: {} / {}".format(avgPeerSize, maxPeerSize)
print "Avg / Max nCerts size: {} / {}".format(avgCertSize, maxCertSize)
print "Avg / Max total hops in any node's routing table: {} / {}".format(avgPathSize, maxPathSize)
print "Avg / Max lower bound cert requests per link (one-way): {} / {}".format(avgLinkCert, maxLinkCert)
print "Avg certs per link (one-way): {}".format(avgCertsPerLink)
return # End of function
def rootNodeASTest(path, outDir="output-treesim-AS", dists=None, proc = 1):
# Checks performance for every possible choice of root node
# Saves output for each root node to a separate file on disk
# path = input path to some caida.org formatted AS-relationship graph
if not os.path.exists(outDir): os.makedirs(outDir)
assert os.path.exists(outDir)
store = makeStoreASRelGraph(path)
nodes = sorted(store.keys())
for nodeIdx in xrange(len(nodes)):
if nodeIdx % proc != 0: continue # Work belongs to someone else
rootNodeID = nodes[nodeIdx]
outpath = outDir+"/{}".format(rootNodeID)
if os.path.exists(outpath):
print "Skipping {}, already processed".format(rootNodeID)
continue
store = makeStoreASRelGraphFixedRoot(path, rootNodeID)
for node in store.values():
node.info.time = random.randint(0, TIMEOUT)
node.info.tstamp = TIMEOUT
print "Beginning {}, size {}".format(nodeIdx, len(store))
if not dists: dists = dijkstrall(store)
idleUntilConverged(store)
pathMatrix = testPaths(store, dists)
avgStretch = getAvgStretch(pathMatrix)
maxStretch = getMaxStretch(pathMatrix)
results = getResults(pathMatrix)
with open(outpath, "w") as f:
f.write(results)
print "Finished test for root AS {} ({} / {})".format(rootNodeID, nodeIdx+1, len(store))
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
#break # Stop after 1, because they can take forever
return # End of function
def timelineASTest():
# Meant to study the performance of the network as a function of network size
# Loops over a set of AS-relationship graphs
# Runs a test on each graph, selecting highest-degree node as the root
# Saves results for each graph to a separate file on disk
outDir = "output-treesim-timeline-AS"
if not os.path.exists(outDir): os.makedirs(outDir)
assert os.path.exists(outDir)
paths = sorted(glob.glob("asrel/datasets/*"))
for path in paths:
date = os.path.basename(path).split(".")[0]
outpath = outDir+"/{}".format(date)
if os.path.exists(outpath):
print "Skipping {}, already processed".format(date)
continue
store = makeStoreASRelGraphMaxDeg(path)
dists = None
for node in store.values():
node.info.time = random.randint(0, TIMEOUT)
node.info.tstamp = TIMEOUT
print "Beginning {}, size {}".format(date, len(store))
if not dists: dists = dijkstrall(store)
idleUntilConverged(store)
pathMatrix = testPaths(store, dists)
avgStretch = getAvgStretch(pathMatrix)
maxStretch = getMaxStretch(pathMatrix)
results = getResults(pathMatrix)
with open(outpath, "w") as f:
f.write(results)
print "Finished {} with {} nodes".format(date, len(store))
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
#break # Stop after 1, because they can take forever
return # End of function
def timelineDimesTest():
# Meant to study the performance of the network as a function of network size
# Loops over a set of AS-relationship graphs
# Runs a test on each graph, selecting highest-degree node as the root
# Saves results for each graph to a separate file on disk
outDir = "output-treesim-timeline-dimes"
if not os.path.exists(outDir): os.makedirs(outDir)
assert os.path.exists(outDir)
# Input files are named ASEdgesX_Y where X = month (no leading 0), Y = year
paths = sorted(glob.glob("DIMES/ASEdges/*.gz"))
exists = set(glob.glob(outDir+"/*"))
for path in paths:
date = os.path.basename(path).split(".")[0]
outpath = outDir+"/{}".format(date)
if outpath in exists:
print "Skipping {}, already processed".format(date)
continue
store = makeStoreDimesEdges(path)
# Get the highest degree node and make it root
# Sorted by nodeID just to make it stable in the event of a tie
nodeIDs = sorted(store.keys())
bestRoot = ""
bestDeg = 0
for nodeID in nodeIDs:
node = store[nodeID]
if len(node.links) > bestDeg:
bestRoot = nodeID
bestDeg = len(node.links)
assert bestRoot
store = makeStoreDimesEdges(path, bestRoot)
rootID = "R" + bestRoot[1:]
assert rootID in store
# Don't forget to set random seed before setting times
# To make results reproducible
nodeIDs = sorted(store.keys())
random.seed(12345)
for nodeID in nodeIDs:
node = store[nodeID]
node.info.time = random.randint(0, TIMEOUT)
node.info.tstamp = TIMEOUT
print "Beginning {}, size {}".format(date, len(store))
if not dists: dists = dijkstrall(store)
idleUntilConverged(store)
pathMatrix = testPaths(store, dists)
avgStretch = getAvgStretch(pathMatrix)
maxStretch = getMaxStretch(pathMatrix)
results = getResults(pathMatrix)
with open(outpath, "w") as f:
f.write(results)
print "Finished {} with {} nodes".format(date, len(store))
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
break # Stop after 1, because they can take forever
return # End of function
def scalingTest(maxTests=None, inputDir="graphs"):
# Meant to study the performance of the network as a function of network size
# Loops over a set of nodes in a previously generated graph
# Runs a test on each graph, testing each node as the root
# if maxTests is set, tests only that number of roots (highest degree first)
# Saves results for each graph to a separate file on disk
outDir = "output-treesim-{}".format(inputDir)
if not os.path.exists(outDir): os.makedirs(outDir)
assert os.path.exists(outDir)
paths = sorted(glob.glob("{}/*".format(inputDir)))
exists = set(glob.glob(outDir+"/*"))
for path in paths:
gc.collect() # pypy waits for gc to close files
graph = os.path.basename(path).split(".")[0]
store = makeStoreGeneratedGraph(path)
# Get the highest degree node and make it root
# Sorted by nodeID just to make it stable in the event of a tie
nodeIDs = sorted(store.keys(), key=lambda x: len(store[x].links), reverse=True)
dists = None
if maxTests: nodeIDs = nodeIDs[:maxTests]
for nodeID in nodeIDs:
nodeIDStr = str(nodeID).zfill(len(str(len(store)-1)))
outpath = outDir+"/{}-{}".format(graph, nodeIDStr)
if outpath in exists:
print "Skipping {}-{}, already processed".format(graph, nodeIDStr)
continue
store = makeStoreGeneratedGraph(path, nodeID)
# Don't forget to set random seed before setting times
random.seed(12345) # To make results reproducible
nIDs = sorted(store.keys())
for nID in nIDs:
node = store[nID]
node.info.time = random.randint(0, TIMEOUT)
node.info.tstamp = TIMEOUT
print "Beginning {}, size {}".format(graph, len(store))
if not dists: dists = dijkstrall(store)
idleUntilConverged(store)
pathMatrix = testPaths(store, dists)
avgStretch = getAvgStretch(pathMatrix)
maxStretch = getMaxStretch(pathMatrix)
results = getResults(pathMatrix)
with open(outpath, "w") as f:
f.write(results)
print "Finished {} with {} nodes for root {}".format(graph, len(store), nodeID)
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
return # End of function
##################
# Main Execution #
##################
if __name__ == "__main__":
if True: # Run a quick test
random.seed(12345) # DEBUG
store = makeStoreSquareGrid(4)
runTest(store) # Quick test
store = None
# Do some real work
#runTest(makeStoreDimesEdges("DIMES/ASEdges/ASEdges1_2007.csv.gz"))
#timelineDimesTest()
#rootNodeASTest("asrel/datasets/19980101.as-rel.txt")
#timelineASTest()
#rootNodeASTest("hype-2016-09-19.list", "output-treesim-hype")
#scalingTest(None, "graphs-20") # First argument 1 to only test 1 root per graph
#store = makeStoreGeneratedGraph("bgp_tables")
#store = makeStoreGeneratedGraph("skitter")
#store = makeStoreASRelGraphMaxDeg("hype-2016-09-19.list") #http://hia.cjdns.ca/watchlist/c/walk.peers.20160919
#store = makeStoreGeneratedGraph("fc00-2017-08-12.txt")
if store: runTest(store)
#rootNodeASTest("skitter", "output-treesim-skitter", None, 0, 1)
#scalingTest(1, "graphs-20") # First argument 1 to only test 1 root per graph
#scalingTest(1, "graphs-21") # First argument 1 to only test 1 root per graph
#scalingTest(1, "graphs-22") # First argument 1 to only test 1 root per graph
#scalingTest(1, "graphs-23") # First argument 1 to only test 1 root per graph
if not store:
import sys
args = sys.argv
if len(args) == 2:
job_number = int(sys.argv[1])
rootNodeASTest("fc00-2017-08-12.txt", "fc00", None, job_number)
else:
print "Usage: {} job_number".format(args[0])
print "job_number = which job set to run on this node (1-indexed)"

View File

@ -1,459 +0,0 @@
// +build !lint
package main
import (
"bufio"
"flag"
"fmt"
"os"
"runtime"
"runtime/pprof"
"strconv"
"strings"
"time"
"github.com/gologme/log"
. "github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
. "github.com/yggdrasil-network/yggdrasil-go/src/crypto"
)
////////////////////////////////////////////////////////////////////////////////
type Node struct {
index int
core Core
send chan<- []byte
recv <-chan []byte
}
func (n *Node) init(index int) {
n.index = index
n.core.Init()
n.send = n.core.DEBUG_getSend()
n.recv = n.core.DEBUG_getRecv()
n.core.DEBUG_simFixMTU()
}
func (n *Node) printTraffic() {
for {
packet := <-n.recv
fmt.Println(n.index, packet)
//panic("Got a packet")
}
}
func (n *Node) startPeers() {
//for _, p := range n.core.Peers.Ports {
// go p.MainLoop()
//}
//go n.printTraffic()
//n.core.Peers.DEBUG_startPeers()
}
func linkNodes(m, n *Node) {
// Don't allow duplicates
if m.core.DEBUG_getPeers().DEBUG_hasPeer(n.core.DEBUG_getSigningPublicKey()) {
return
}
// Create peers
// Buffering reduces packet loss in the sim
// This slightly speeds up testing (fewer delays before retrying a ping)
pLinkPub, pLinkPriv := m.core.DEBUG_newBoxKeys()
qLinkPub, qLinkPriv := m.core.DEBUG_newBoxKeys()
p := m.core.DEBUG_getPeers().DEBUG_newPeer(n.core.DEBUG_getEncryptionPublicKey(),
n.core.DEBUG_getSigningPublicKey(), *m.core.DEBUG_getSharedKey(pLinkPriv, qLinkPub))
q := n.core.DEBUG_getPeers().DEBUG_newPeer(m.core.DEBUG_getEncryptionPublicKey(),
m.core.DEBUG_getSigningPublicKey(), *n.core.DEBUG_getSharedKey(qLinkPriv, pLinkPub))
DEBUG_simLinkPeers(p, q)
return
}
func makeStoreSquareGrid(sideLength int) map[int]*Node {
store := make(map[int]*Node)
nNodes := sideLength * sideLength
idxs := make([]int, 0, nNodes)
// TODO shuffle nodeIDs
for idx := 1; idx <= nNodes; idx++ {
idxs = append(idxs, idx)
}
for _, idx := range idxs {
node := &Node{}
node.init(idx)
store[idx] = node
}
for idx := 0; idx < nNodes; idx++ {
if (idx % sideLength) != 0 {
linkNodes(store[idxs[idx]], store[idxs[idx-1]])
}
if idx >= sideLength {
linkNodes(store[idxs[idx]], store[idxs[idx-sideLength]])
}
}
//for _, node := range store { node.initPorts() }
return store
}
func makeStoreStar(nNodes int) map[int]*Node {
store := make(map[int]*Node)
center := &Node{}
center.init(0)
store[0] = center
for idx := 1; idx < nNodes; idx++ {
node := &Node{}
node.init(idx)
store[idx] = node
linkNodes(center, node)
}
return store
}
func loadGraph(path string) map[int]*Node {
f, err := os.Open(path)
if err != nil {
panic(err)
}
defer f.Close()
store := make(map[int]*Node)
s := bufio.NewScanner(f)
for s.Scan() {
line := s.Text()
nodeIdxstrs := strings.Split(line, " ")
nodeIdx0, _ := strconv.Atoi(nodeIdxstrs[0])
nodeIdx1, _ := strconv.Atoi(nodeIdxstrs[1])
if store[nodeIdx0] == nil {
node := &Node{}
node.init(nodeIdx0)
store[nodeIdx0] = node
}
if store[nodeIdx1] == nil {
node := &Node{}
node.init(nodeIdx1)
store[nodeIdx1] = node
}
linkNodes(store[nodeIdx0], store[nodeIdx1])
}
//for _, node := range store { node.initPorts() }
return store
}
////////////////////////////////////////////////////////////////////////////////
func startNetwork(store map[[32]byte]*Node) {
for _, node := range store {
node.startPeers()
}
}
func getKeyedStore(store map[int]*Node) map[[32]byte]*Node {
newStore := make(map[[32]byte]*Node)
for _, node := range store {
newStore[node.core.DEBUG_getSigningPublicKey()] = node
}
return newStore
}
func testPaths(store map[[32]byte]*Node) bool {
nNodes := len(store)
count := 0
for _, source := range store {
count++
fmt.Printf("Testing paths from node %d / %d (%d)\n", count, nNodes, source.index)
for _, dest := range store {
//if source == dest { continue }
destLoc := dest.core.DEBUG_getLocator()
coords := destLoc.DEBUG_getCoords()
temp := 0
ttl := ^uint64(0)
oldTTL := ttl
for here := source; here != dest; {
temp++
if temp > 4096 {
fmt.Println("Loop?")
time.Sleep(time.Second)
return false
}
nextPort := here.core.DEBUG_switchLookup(coords)
// First check if "here" is accepting packets from the previous node
// TODO explain how this works
ports := here.core.DEBUG_getPeers().DEBUG_getPorts()
nextPeer := ports[nextPort]
if nextPeer == nil {
fmt.Println("Peer associated with next port is nil")
return false
}
next := store[nextPeer.DEBUG_getSigKey()]
/*
if next == here {
//for idx, link := range here.links {
// fmt.Println("DUMP:", idx, link.nodeID)
//}
if nextPort != 0 { panic("This should not be") }
fmt.Println("Failed to route:", source.index, here.index, dest.index, oldTTL, ttl)
//here.table.DEBUG_dumpTable()
//fmt.Println("Ports:", here.nodeID, here.ports)
return false
panic(fmt.Sprintln("Routing Loop:",
source.index,
here.index,
dest.index))
}
*/
if temp > 4090 {
fmt.Println("DEBUG:",
source.index, source.core.DEBUG_getLocator(),
here.index, here.core.DEBUG_getLocator(),
dest.index, dest.core.DEBUG_getLocator())
//here.core.DEBUG_getSwitchTable().DEBUG_dumpTable()
}
if here != source {
// This is sufficient to check for routing loops or blackholes
//break
}
if here == next {
fmt.Println("Drop:", source.index, here.index, dest.index, oldTTL)
return false
}
here = next
}
}
}
return true
}
func stressTest(store map[[32]byte]*Node) {
fmt.Println("Stress testing network...")
nNodes := len(store)
dests := make([][]byte, 0, nNodes)
for _, dest := range store {
loc := dest.core.DEBUG_getLocator()
coords := loc.DEBUG_getCoords()
dests = append(dests, coords)
}
lookups := 0
start := time.Now()
for _, source := range store {
for _, coords := range dests {
source.core.DEBUG_switchLookup(coords)
lookups++
}
}
timed := time.Since(start)
fmt.Printf("%d lookups in %s (%f lookups per second)\n",
lookups,
timed,
float64(lookups)/timed.Seconds())
}
func pingNodes(store map[[32]byte]*Node) {
fmt.Println("Sending pings...")
nNodes := len(store)
count := 0
equiv := func(a []byte, b []byte) bool {
if len(a) != len(b) {
return false
}
for idx := 0; idx < len(a); idx++ {
if a[idx] != b[idx] {
return false
}
}
return true
}
for _, source := range store {
count++
//if count > 16 { break }
fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index)
sourceKey := source.core.DEBUG_getEncryptionPublicKey()
payload := sourceKey[:]
sourceAddr := source.core.DEBUG_getAddr()[:]
sendTo := func(bs []byte, destAddr []byte) {
packet := make([]byte, 40+len(bs))
copy(packet[8:24], sourceAddr)
copy(packet[24:40], destAddr)
copy(packet[40:], bs)
packet[0] = 6 << 4
source.send <- packet
}
destCount := 0
for _, dest := range store {
destCount += 1
fmt.Printf("%d Nodes, %d Send, %d Recv\n", nNodes, count, destCount)
if dest == source {
fmt.Println("Skipping self")
continue
}
destAddr := dest.core.DEBUG_getAddr()[:]
ticker := time.NewTicker(150 * time.Millisecond)
sendTo(payload, destAddr)
for loop := true; loop; {
select {
case packet := <-dest.recv:
{
if equiv(payload, packet[len(packet)-len(payload):]) {
loop = false
}
}
case <-ticker.C:
sendTo(payload, destAddr)
//dumpDHTSize(store) // note that this uses racey functions to read things...
}
}
ticker.Stop()
}
//break // Only try sending pings from 1 node
// This is because, for some reason, stopTun() doesn't always close it
// And if two tuns are up, bad things happen (sends via wrong interface)
}
fmt.Println("Finished pinging nodes")
}
func pingBench(store map[[32]byte]*Node) {
fmt.Println("Benchmarking pings...")
nPings := 0
payload := make([]byte, 1280+40) // MTU + ipv6 header
var timed time.Duration
//nNodes := len(store)
count := 0
for _, source := range store {
count++
//fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index)
getPing := func(key [32]byte, decodedCoords []byte) []byte {
// TODO write some function to do this the right way, put... somewhere...
coords := DEBUG_wire_encode_coords(decodedCoords)
packet := make([]byte, 0, len(key)+len(coords)+len(payload))
packet = append(packet, key[:]...)
packet = append(packet, coords...)
packet = append(packet, payload[:]...)
return packet
}
for _, dest := range store {
key := dest.core.DEBUG_getEncryptionPublicKey()
loc := dest.core.DEBUG_getLocator()
coords := loc.DEBUG_getCoords()
ping := getPing(key, coords)
// TODO make sure the session is open first
start := time.Now()
for i := 0; i < 1000000; i++ {
source.send <- ping
nPings++
}
timed += time.Since(start)
break
}
break
}
fmt.Printf("Sent %d pings in %s (%f per second)\n",
nPings,
timed,
float64(nPings)/timed.Seconds())
}
func dumpStore(store map[NodeID]*Node) {
for _, node := range store {
fmt.Println("DUMPSTORE:", node.index, node.core.DEBUG_getLocator())
node.core.DEBUG_getSwitchTable().DEBUG_dumpTable()
}
}
func dumpDHTSize(store map[[32]byte]*Node) {
var min, max, sum int
for _, node := range store {
num := node.core.DEBUG_getDHTSize()
min = num
max = num
break
}
for _, node := range store {
num := node.core.DEBUG_getDHTSize()
if num < min {
min = num
}
if num > max {
max = num
}
sum += num
}
avg := float64(sum) / float64(len(store))
fmt.Printf("DHT min %d / avg %f / max %d\n", min, avg, max)
}
func (n *Node) startTCP(listen string) {
n.core.DEBUG_setupAndStartGlobalTCPInterface(listen)
}
func (n *Node) connectTCP(remoteAddr string) {
n.core.AddPeer(remoteAddr, remoteAddr)
}
////////////////////////////////////////////////////////////////////////////////
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main() {
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err))
}
if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err))
}
defer pprof.StopCPUProfile()
}
if *memprofile != "" {
f, err := os.Create(*memprofile)
if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err))
}
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
}
fmt.Println("Test")
Util_testAddrIDMask()
idxstore := makeStoreSquareGrid(4)
//idxstore := makeStoreStar(256)
//idxstore := loadGraph("misc/sim/hype-2016-09-19.list")
//idxstore := loadGraph("misc/sim/fc00-2017-08-12.txt")
//idxstore := loadGraph("skitter")
kstore := getKeyedStore(idxstore)
//*
logger := log.New(os.Stderr, "", log.Flags())
for _, n := range kstore {
n.core.DEBUG_setLogger(logger)
}
//*/
startNetwork(kstore)
//time.Sleep(10*time.Second)
// Note that testPaths only works if pressure is turned off
// Otherwise congestion can lead to routing loops?
for finished := false; !finished; {
finished = testPaths(kstore)
}
pingNodes(kstore)
//pingBench(kstore) // Only after disabling debug output
//stressTest(kstore)
//time.Sleep(120 * time.Second)
dumpDHTSize(kstore) // note that this uses racey functions to read things...
if false {
// This connects the sim to the local network
for _, node := range kstore {
node.startTCP("localhost:0")
node.connectTCP("localhost:12345")
break // just 1
}
for _, node := range kstore {
go func() {
// Just dump any packets sent to this node
for range node.recv {
}
}()
}
var block chan struct{}
<-block
}
runtime.GC()
}

View File

@ -3,9 +3,7 @@
package address
import (
"fmt"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"crypto/ed25519"
)
// Address represents an IPv6 address in the yggdrasil address range.
@ -45,25 +43,34 @@ func (s *Subnet) IsValid() bool {
return (*s)[l-1] == prefix[l-1]|0x01
}
// AddrForNodeID takes a *NodeID as an argument and returns an *Address.
// AddrForKey takes an ed25519.PublicKey as an argument and returns an *Address.
// This function returns nil if the key length is not ed25519.PublicKeySize.
// This address begins with the contents of GetPrefix(), with the last bit set to 0 to indicate an address.
// The following 8 bits are set to the number of leading 1 bits in the NodeID.
// The NodeID, excluding the leading 1 bits and the first leading 0 bit, is truncated to the appropriate length and makes up the remainder of the address.
func AddrForNodeID(nid *crypto.NodeID) *Address {
// The following 8 bits are set to the number of leading 1 bits in the bitwise inverse of the public key.
// The bitwise inverse of the key, excluding the leading 1 bits and the first leading 0 bit, is truncated to the appropriate length and makes up the remainder of the address.
func AddrForKey(publicKey ed25519.PublicKey) *Address {
// 128 bit address
// Begins with prefix
// Next bit is a 0
// Next 7 bits, interpreted as a uint, are # of leading 1s in the NodeID
// Leading 1s and first leading 0 of the NodeID are truncated off
// The rest is appended to the IPv6 address (truncated to 128 bits total)
if len(publicKey) != ed25519.PublicKeySize {
return nil
}
var buf [ed25519.PublicKeySize]byte
copy(buf[:], publicKey)
for idx := range buf {
buf[idx] = ^buf[idx]
}
var addr Address
var temp []byte
done := false
ones := byte(0)
bits := byte(0)
nBits := 0
for idx := 0; idx < 8*len(nid); idx++ {
bit := (nid[idx/8] & (0x80 >> byte(idx%8))) >> byte(7-(idx%8))
for idx := 0; idx < 8*len(buf); idx++ {
bit := (buf[idx/8] & (0x80 >> byte(idx%8))) >> byte(7-(idx%8))
if !done && bit != 0 {
ones++
continue
@ -86,15 +93,19 @@ func AddrForNodeID(nid *crypto.NodeID) *Address {
return &addr
}
// SubnetForNodeID takes a *NodeID as an argument and returns an *Address.
// This subnet begins with the address prefix, with the last bit set to 1 to indicate a prefix.
// The following 8 bits are set to the number of leading 1 bits in the NodeID.
// The NodeID, excluding the leading 1 bits and the first leading 0 bit, is truncated to the appropriate length and makes up the remainder of the subnet.
func SubnetForNodeID(nid *crypto.NodeID) *Subnet {
// SubnetForKey takes an ed25519.PublicKey as an argument and returns a *Subnet.
// This function returns nil if the key length is not ed25519.PublicKeySize.
// The subnet begins with the address prefix, with the last bit set to 1 to indicate a prefix.
// The following 8 bits are set to the number of leading 1 bits in the bitwise inverse of the key.
// The bitwise inverse of the key, excluding the leading 1 bits and the first leading 0 bit, is truncated to the appropriate length and makes up the remainder of the subnet.
func SubnetForKey(publicKey ed25519.PublicKey) *Subnet {
// Exactly as the address version, with two exceptions:
// 1) The first bit after the fixed prefix is a 1 instead of a 0
// 2) It's truncated to a subnet prefix length instead of 128 bits
addr := *AddrForNodeID(nid)
addr := AddrForKey(publicKey)
if addr == nil {
return nil
}
var snet Subnet
copy(snet[:], addr[:])
prefix := GetPrefix()
@ -102,75 +113,34 @@ func SubnetForNodeID(nid *crypto.NodeID) *Subnet {
return &snet
}
// GetNodeIDandMask returns two *NodeID.
// The first is a NodeID with all the bits known from the Address set to their correct values.
// The second is a bitmask with 1 bit set for each bit that was known from the Address.
// This is used to look up NodeIDs in the DHT and tell if they match an Address.
func (a *Address) GetNodeIDandMask() (*crypto.NodeID, *crypto.NodeID) {
// Mask is a bitmask to mark the bits visible from the address
// This means truncated leading 1s, first leading 0, and visible part of addr
var nid crypto.NodeID
var mask crypto.NodeID
// GetKet returns the partial ed25519.PublicKey for the Address.
// This is used for key lookup.
func (a *Address) GetKey() ed25519.PublicKey {
var key [ed25519.PublicKeySize]byte
prefix := GetPrefix()
ones := int(a[len(prefix)])
for idx := 0; idx < ones; idx++ {
nid[idx/8] |= 0x80 >> byte(idx%8)
key[idx/8] |= 0x80 >> byte(idx%8)
}
nidOffset := ones + 1
keyOffset := ones + 1
addrOffset := 8*len(prefix) + 8
for idx := addrOffset; idx < 8*len(a); idx++ {
bits := a[idx/8] & (0x80 >> byte(idx%8))
bits <<= byte(idx % 8)
nidIdx := nidOffset + (idx - addrOffset)
bits >>= byte(nidIdx % 8)
nid[nidIdx/8] |= bits
keyIdx := keyOffset + (idx - addrOffset)
bits >>= byte(keyIdx % 8)
key[keyIdx/8] |= bits
}
maxMask := 8*(len(a)-len(prefix)-1) + ones + 1
for idx := 0; idx < maxMask; idx++ {
mask[idx/8] |= 0x80 >> byte(idx%8)
for idx := range key {
key[idx] = ^key[idx]
}
return &nid, &mask
return ed25519.PublicKey(key[:])
}
// GetNodeIDLengthString returns a string representation of the known bits of the NodeID, along with the number of known bits, for use with yggdrasil.Dialer's Dial and DialContext functions.
func (a *Address) GetNodeIDLengthString() string {
nid, mask := a.GetNodeIDandMask()
l := mask.PrefixLength()
return fmt.Sprintf("%s/%d", nid.String(), l)
}
// GetNodeIDandMask returns two *NodeID.
// The first is a NodeID with all the bits known from the Subnet set to their correct values.
// The second is a bitmask with 1 bit set for each bit that was known from the Subnet.
// This is used to look up NodeIDs in the DHT and tell if they match a Subnet.
func (s *Subnet) GetNodeIDandMask() (*crypto.NodeID, *crypto.NodeID) {
// As with the address version, but visible parts of the subnet prefix instead
var nid crypto.NodeID
var mask crypto.NodeID
prefix := GetPrefix()
ones := int(s[len(prefix)])
for idx := 0; idx < ones; idx++ {
nid[idx/8] |= 0x80 >> byte(idx%8)
}
nidOffset := ones + 1
addrOffset := 8*len(prefix) + 8
for idx := addrOffset; idx < 8*len(s); idx++ {
bits := s[idx/8] & (0x80 >> byte(idx%8))
bits <<= byte(idx % 8)
nidIdx := nidOffset + (idx - addrOffset)
bits >>= byte(nidIdx % 8)
nid[nidIdx/8] |= bits
}
maxMask := 8*(len(s)-len(prefix)-1) + ones + 1
for idx := 0; idx < maxMask; idx++ {
mask[idx/8] |= 0x80 >> byte(idx%8)
}
return &nid, &mask
}
// GetNodeIDLengthString returns a string representation of the known bits of the NodeID, along with the number of known bits, for use with yggdrasil.Dialer's Dial and DialContext functions.
func (s *Subnet) GetNodeIDLengthString() string {
nid, mask := s.GetNodeIDandMask()
l := mask.PrefixLength()
return fmt.Sprintf("%s/%d", nid.String(), l)
// GetKet returns the partial ed25519.PublicKey for the Subnet.
// This is used for key lookup.
func (s *Subnet) GetKey() ed25519.PublicKey {
var addr Address
copy(addr[:], s[:])
return addr.GetKey()
}

View File

@ -1,48 +1,57 @@
package admin
import (
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"net"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/gologme/log"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
"github.com/yggdrasil-network/yggdrasil-go/src/config"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/yggdrasil-network/yggdrasil-go/src/util"
"github.com/yggdrasil-network/yggdrasil-go/src/version"
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
"github.com/yggdrasil-network/yggdrasil-go/src/core"
)
// TODO: Add authentication
type AdminSocket struct {
core *yggdrasil.Core
core *core.Core
log *log.Logger
listenaddr string
listener net.Listener
handlers map[string]handler
started bool
done chan struct{}
}
// Info refers to information that is returned to the admin socket handler.
type Info map[string]interface{}
type AdminSocketResponse struct {
Status string `json:"status"`
Request struct {
Name string `json:"request"`
KeepAlive bool `json:"keepalive"`
} `json:"request"`
Response interface{} `json:"response"`
}
type handler struct {
args []string // List of human-readable argument names
handler func(Info) (Info, error) // First is input map, second is output
args []string // List of human-readable argument names
handler func(json.RawMessage) (interface{}, error) // First is input map, second is output
}
type ListResponse struct {
List map[string]ListEntry `json:"list"`
}
type ListEntry struct {
Fields []string `json:"fields"`
}
// AddHandler is called for each admin function to add the handler and help documentation to the API.
func (a *AdminSocket) AddHandler(name string, args []string, handlerfunc func(Info) (Info, error)) error {
func (a *AdminSocket) AddHandler(name string, args []string, handlerfunc func(json.RawMessage) (interface{}, error)) error {
if _, ok := a.handlers[strings.ToLower(name)]; ok {
return errors.New("handler already exists")
}
@ -54,304 +63,121 @@ func (a *AdminSocket) AddHandler(name string, args []string, handlerfunc func(In
}
// Init runs the initial admin setup.
func (a *AdminSocket) Init(c *yggdrasil.Core, state *config.NodeState, log *log.Logger, options interface{}) error {
func (a *AdminSocket) Init(c *core.Core, nc *config.NodeConfig, log *log.Logger, options interface{}) error {
a.core = c
a.log = log
a.handlers = make(map[string]handler)
current := state.GetCurrent()
a.listenaddr = current.AdminListen
a.AddHandler("list", []string{}, func(in Info) (Info, error) {
handlers := make(map[string]interface{})
for handlername, handler := range a.handlers {
handlers[handlername] = Info{"fields": handler.args}
nc.RLock()
a.listenaddr = nc.AdminListen
nc.RUnlock()
a.done = make(chan struct{})
close(a.done) // Start in a done / not-started state
_ = a.AddHandler("list", []string{}, func(_ json.RawMessage) (interface{}, error) {
res := &ListResponse{
List: map[string]ListEntry{},
}
return Info{"list": handlers}, nil
for name, handler := range a.handlers {
res.List[name] = ListEntry{
Fields: handler.args,
}
}
return res, nil
})
a.core.SetAdmin(a)
return nil
}
func (a *AdminSocket) UpdateConfig(config *config.NodeConfig) {
a.log.Debugln("Reloading admin configuration...")
if a.listenaddr != config.AdminListen {
a.listenaddr = config.AdminListen
if a.IsStarted() {
a.Stop()
}
a.Start()
}
}
func (a *AdminSocket) SetupAdminHandlers(na *AdminSocket) {
a.AddHandler("getSelf", []string{}, func(in Info) (Info, error) {
ip := a.core.Address().String()
subnet := a.core.Subnet()
return Info{
"self": Info{
ip: Info{
"box_pub_key": a.core.EncryptionPublicKey(),
"build_name": version.BuildName(),
"build_version": version.BuildVersion(),
"coords": fmt.Sprintf("%v", a.core.Coords()),
"subnet": subnet.String(),
},
},
}, nil
_ = a.AddHandler("getSelf", []string{}, func(in json.RawMessage) (interface{}, error) {
req := &GetSelfRequest{}
res := &GetSelfResponse{}
if err := json.Unmarshal(in, &req); err != nil {
return nil, err
}
if err := a.getSelfHandler(req, res); err != nil {
return nil, err
}
return res, nil
})
a.AddHandler("getPeers", []string{}, func(in Info) (Info, error) {
peers := make(Info)
for _, p := range a.core.GetPeers() {
addr := *address.AddrForNodeID(crypto.GetNodeID(&p.PublicKey))
so := net.IP(addr[:]).String()
peers[so] = Info{
"port": p.Port,
"uptime": p.Uptime.Seconds(),
"bytes_sent": p.BytesSent,
"bytes_recvd": p.BytesRecvd,
"proto": p.Protocol,
"endpoint": p.Endpoint,
"box_pub_key": hex.EncodeToString(p.PublicKey[:]),
}
_ = a.AddHandler("getPeers", []string{}, func(in json.RawMessage) (interface{}, error) {
req := &GetPeersRequest{}
res := &GetPeersResponse{}
if err := json.Unmarshal(in, &req); err != nil {
return nil, err
}
return Info{"peers": peers}, nil
if err := a.getPeersHandler(req, res); err != nil {
return nil, err
}
return res, nil
})
a.AddHandler("getSwitchPeers", []string{}, func(in Info) (Info, error) {
switchpeers := make(Info)
for _, s := range a.core.GetSwitchPeers() {
addr := *address.AddrForNodeID(crypto.GetNodeID(&s.PublicKey))
so := fmt.Sprint(s.Port)
switchpeers[so] = Info{
"ip": net.IP(addr[:]).String(),
"coords": fmt.Sprintf("%v", s.Coords),
"port": s.Port,
"bytes_sent": s.BytesSent,
"bytes_recvd": s.BytesRecvd,
"proto": s.Protocol,
"endpoint": s.Endpoint,
"box_pub_key": hex.EncodeToString(s.PublicKey[:]),
}
_ = a.AddHandler("getDHT", []string{}, func(in json.RawMessage) (interface{}, error) {
req := &GetDHTRequest{}
res := &GetDHTResponse{}
if err := json.Unmarshal(in, &req); err != nil {
return nil, err
}
return Info{"switchpeers": switchpeers}, nil
if err := a.getDHTHandler(req, res); err != nil {
return nil, err
}
return res, nil
})
/*
a.AddHandler("getSwitchQueues", []string{}, func(in Info) (Info, error) {
queues := a.core.GetSwitchQueues()
return Info{"switchqueues": queues.asMap()}, nil
})
*/
a.AddHandler("getDHT", []string{}, func(in Info) (Info, error) {
dht := make(Info)
for _, d := range a.core.GetDHT() {
addr := *address.AddrForNodeID(crypto.GetNodeID(&d.PublicKey))
so := net.IP(addr[:]).String()
dht[so] = Info{
"coords": fmt.Sprintf("%v", d.Coords),
"last_seen": d.LastSeen.Seconds(),
"box_pub_key": hex.EncodeToString(d.PublicKey[:]),
}
_ = a.AddHandler("getPaths", []string{}, func(in json.RawMessage) (interface{}, error) {
req := &GetPathsRequest{}
res := &GetPathsResponse{}
if err := json.Unmarshal(in, &req); err != nil {
return nil, err
}
return Info{"dht": dht}, nil
if err := a.getPathsHandler(req, res); err != nil {
return nil, err
}
return res, nil
})
a.AddHandler("getSessions", []string{}, func(in Info) (Info, error) {
sessions := make(Info)
for _, s := range a.core.GetSessions() {
addr := *address.AddrForNodeID(crypto.GetNodeID(&s.PublicKey))
so := net.IP(addr[:]).String()
sessions[so] = Info{
"coords": fmt.Sprintf("%v", s.Coords),
"bytes_sent": s.BytesSent,
"bytes_recvd": s.BytesRecvd,
"mtu": s.MTU,
"uptime": s.Uptime.Seconds(),
"was_mtu_fixed": s.WasMTUFixed,
"box_pub_key": hex.EncodeToString(s.PublicKey[:]),
}
_ = a.AddHandler("getSessions", []string{}, func(in json.RawMessage) (interface{}, error) {
req := &GetSessionsRequest{}
res := &GetSessionsResponse{}
if err := json.Unmarshal(in, &req); err != nil {
return nil, err
}
return Info{"sessions": sessions}, nil
})
a.AddHandler("addPeer", []string{"uri", "[interface]"}, func(in Info) (Info, error) {
// Set sane defaults
intf := ""
// Has interface been specified?
if itf, ok := in["interface"]; ok {
intf = itf.(string)
}
if a.core.AddPeer(in["uri"].(string), intf) == nil {
return Info{
"added": []string{
in["uri"].(string),
},
}, nil
}
return Info{
"not_added": []string{
in["uri"].(string),
},
}, errors.New("Failed to add peer")
})
a.AddHandler("disconnectPeer", []string{"port"}, func(in Info) (Info, error) {
port, err := strconv.ParseInt(fmt.Sprint(in["port"]), 10, 64)
if err != nil {
return Info{}, err
}
if a.core.DisconnectPeer(uint64(port)) == nil {
return Info{
"disconnected": []string{
fmt.Sprint(port),
},
}, nil
} else {
return Info{
"not_disconnected": []string{
fmt.Sprint(port),
},
}, errors.New("Failed to disconnect peer")
}
})
a.AddHandler("removePeer", []string{"uri", "[interface]"}, func(in Info) (Info, error) {
// Set sane defaults
intf := ""
// Has interface been specified?
if itf, ok := in["interface"]; ok {
intf = itf.(string)
}
if a.core.RemovePeer(in["uri"].(string), intf) == nil {
return Info{
"removed": []string{
in["uri"].(string),
},
}, nil
} else {
return Info{
"not_removed": []string{
in["uri"].(string),
},
}, errors.New("Failed to remove peer")
}
return Info{
"not_removed": []string{
in["uri"].(string),
},
}, errors.New("Failed to remove peer")
})
a.AddHandler("getAllowedEncryptionPublicKeys", []string{}, func(in Info) (Info, error) {
return Info{"allowed_box_pubs": a.core.GetAllowedEncryptionPublicKeys()}, nil
})
a.AddHandler("addAllowedEncryptionPublicKey", []string{"box_pub_key"}, func(in Info) (Info, error) {
if a.core.AddAllowedEncryptionPublicKey(in["box_pub_key"].(string)) == nil {
return Info{
"added": []string{
in["box_pub_key"].(string),
},
}, nil
}
return Info{
"not_added": []string{
in["box_pub_key"].(string),
},
}, errors.New("Failed to add allowed key")
})
a.AddHandler("removeAllowedEncryptionPublicKey", []string{"box_pub_key"}, func(in Info) (Info, error) {
if a.core.RemoveAllowedEncryptionPublicKey(in["box_pub_key"].(string)) == nil {
return Info{
"removed": []string{
in["box_pub_key"].(string),
},
}, nil
}
return Info{
"not_removed": []string{
in["box_pub_key"].(string),
},
}, errors.New("Failed to remove allowed key")
})
a.AddHandler("dhtPing", []string{"box_pub_key", "coords", "[target]"}, func(in Info) (Info, error) {
var reserr error
var result yggdrasil.DHTRes
if in["target"] == nil {
in["target"] = "none"
}
coords := util.DecodeCoordString(in["coords"].(string))
var boxPubKey crypto.BoxPubKey
if b, err := hex.DecodeString(in["box_pub_key"].(string)); err == nil {
copy(boxPubKey[:], b)
if n, err := hex.DecodeString(in["target"].(string)); err == nil {
var targetNodeID crypto.NodeID
copy(targetNodeID[:], n)
result, reserr = a.core.DHTPing(boxPubKey, coords, &targetNodeID)
} else {
result, reserr = a.core.DHTPing(boxPubKey, coords, nil)
}
} else {
return Info{}, err
}
if reserr != nil {
return Info{}, reserr
}
infos := make(map[string]map[string]string, len(result.Infos))
for _, dinfo := range result.Infos {
info := map[string]string{
"box_pub_key": hex.EncodeToString(dinfo.PublicKey[:]),
"coords": fmt.Sprintf("%v", dinfo.Coords),
}
addr := net.IP(address.AddrForNodeID(crypto.GetNodeID(&dinfo.PublicKey))[:]).String()
infos[addr] = info
}
return Info{"nodes": infos}, nil
})
a.AddHandler("getNodeInfo", []string{"[box_pub_key]", "[coords]", "[nocache]"}, func(in Info) (Info, error) {
var nocache bool
if in["nocache"] != nil {
nocache = in["nocache"].(string) == "true"
}
var boxPubKey crypto.BoxPubKey
var coords []uint64
if in["box_pub_key"] == nil && in["coords"] == nil {
nodeinfo := a.core.MyNodeInfo()
var jsoninfo interface{}
if err := json.Unmarshal(nodeinfo, &jsoninfo); err != nil {
return Info{}, err
}
return Info{"nodeinfo": jsoninfo}, nil
} else if in["box_pub_key"] == nil || in["coords"] == nil {
return Info{}, errors.New("Expecting both box_pub_key and coords")
} else {
if b, err := hex.DecodeString(in["box_pub_key"].(string)); err == nil {
copy(boxPubKey[:], b)
} else {
return Info{}, err
}
coords = util.DecodeCoordString(in["coords"].(string))
}
result, err := a.core.GetNodeInfo(boxPubKey, coords, nocache)
if err == nil {
var m map[string]interface{}
if err = json.Unmarshal(result, &m); err == nil {
return Info{"nodeinfo": m}, nil
}
return Info{}, err
}
return Info{}, err
if err := a.getSessionsHandler(req, res); err != nil {
return nil, err
}
return res, nil
})
//_ = a.AddHandler("getNodeInfo", []string{"key"}, t.proto.nodeinfo.nodeInfoAdminHandler)
//_ = a.AddHandler("debug_remoteGetSelf", []string{"key"}, t.proto.getSelfHandler)
//_ = a.AddHandler("debug_remoteGetPeers", []string{"key"}, t.proto.getPeersHandler)
//_ = a.AddHandler("debug_remoteGetDHT", []string{"key"}, t.proto.getDHTHandler)
}
// Start runs the admin API socket to listen for / respond to admin API calls.
func (a *AdminSocket) Start() error {
if a.listenaddr != "none" && a.listenaddr != "" {
a.done = make(chan struct{})
go a.listen()
a.started = true
}
return nil
}
// IsStarted returns true if the module has been started.
func (a *AdminSocket) IsStarted() bool {
return a.started
select {
case <-a.done:
// Not blocking, so we're not currently running
return false
default:
// Blocked, so we must have started
return true
}
}
// Stop will stop the admin API and close the socket.
func (a *AdminSocket) Stop() error {
if a.listener != nil {
a.started = false
select {
case <-a.done:
default:
close(a.done)
}
return a.listener.Close()
}
return nil
@ -408,6 +234,14 @@ func (a *AdminSocket) listen() {
conn, err := a.listener.Accept()
if err == nil {
go a.handleRequest(conn)
} else {
select {
case <-a.done:
// Not blocked, so we havent started or already stopped
return
default:
// Blocked, so we're supposed to keep running
}
}
}
}
@ -415,20 +249,20 @@ func (a *AdminSocket) listen() {
// handleRequest calls the request handler for each request sent to the admin API.
func (a *AdminSocket) handleRequest(conn net.Conn) {
decoder := json.NewDecoder(conn)
decoder.DisallowUnknownFields()
encoder := json.NewEncoder(conn)
encoder.SetIndent("", " ")
recv := make(Info)
send := make(Info)
defer conn.Close()
defer func() {
r := recover()
if r != nil {
send = Info{
"status": "error",
"error": "Check your syntax and input types",
}
a.log.Debugln("Admin socket error:", r)
if err := encoder.Encode(&send); err != nil {
if err := encoder.Encode(&ErrorResponse{
Error: "Check your syntax and input types",
}); err != nil {
a.log.Debugln("Admin socket JSON encode error:", err)
}
conn.Close()
@ -436,83 +270,39 @@ func (a *AdminSocket) handleRequest(conn net.Conn) {
}()
for {
// Start with a clean slate on each request
recv = Info{}
send = Info{}
// Decode the input
if err := decoder.Decode(&recv); err != nil {
a.log.Debugln("Admin socket JSON decode error:", err)
return
}
// Send the request back with the response, and default to "error"
// unless the status is changed below by one of the handlers
send["request"] = recv
send["status"] = "error"
n := strings.ToLower(recv["request"].(string))
if _, ok := recv["request"]; !ok {
send["error"] = "No request sent"
goto respond
}
if h, ok := a.handlers[n]; ok {
// Check that we have all the required arguments
for _, arg := range h.args {
// An argument in [square brackets] is optional and not required,
// so we can safely ignore those
if strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") {
continue
var err error
var buf json.RawMessage
_ = decoder.Decode(&buf)
var resp AdminSocketResponse
resp.Status = "success"
if err = json.Unmarshal(buf, &resp.Request); err == nil {
if resp.Request.Name == "" {
resp.Status = "error"
resp.Response = &ErrorResponse{
Error: "No request specified",
}
// Check if the field is missing
if _, ok := recv[arg]; !ok {
send = Info{
"status": "error",
"error": "Expected field missing: " + arg,
"expecting": arg,
} else if h, ok := a.handlers[strings.ToLower(resp.Request.Name)]; ok {
resp.Response, err = h.handler(buf)
if err != nil {
resp.Status = "error"
resp.Response = &ErrorResponse{
Error: err.Error(),
}
goto respond
}
}
// By this point we should have all the fields we need, so call
// the handler
response, err := h.handler(recv)
if err != nil {
send["error"] = err.Error()
if response != nil {
send["response"] = response
goto respond
}
} else {
send["status"] = "success"
if response != nil {
send["response"] = response
goto respond
resp.Status = "error"
resp.Response = &ErrorResponse{
Error: fmt.Sprintf("Unknown action '%s', try 'list' for help", resp.Request.Name),
}
}
}
if err = encoder.Encode(resp); err != nil {
a.log.Debugln("Encode error:", err)
}
if !resp.Request.KeepAlive {
break
} else {
// Start with a clean response on each request, which defaults to an error
// state. If a handler is found below then this will be overwritten
send = Info{
"request": recv,
"status": "error",
"error": fmt.Sprintf("Unknown action '%s', try 'list' for help", recv["request"].(string)),
}
goto respond
}
// Send the response back
respond:
if err := encoder.Encode(&send); err != nil {
return
}
// If "keepalive" isn't true then close the connection
if keepalive, ok := recv["keepalive"]; !ok || !keepalive.(bool) {
conn.Close()
continue
}
}
}

5
src/admin/error.go Normal file
View File

@ -0,0 +1,5 @@
package admin
type ErrorResponse struct {
Error string `json:"error"`
}

34
src/admin/getdht.go Normal file
View File

@ -0,0 +1,34 @@
package admin
import (
"encoding/hex"
"net"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
)
type GetDHTRequest struct{}
type GetDHTResponse struct {
DHT map[string]DHTEntry `json:"dht"`
}
type DHTEntry struct {
PublicKey string `json:"key"`
Port uint64 `json:"port"`
Rest uint64 `json:"rest"`
}
func (a *AdminSocket) getDHTHandler(req *GetDHTRequest, res *GetDHTResponse) error {
res.DHT = map[string]DHTEntry{}
for _, d := range a.core.GetDHT() {
addr := address.AddrForKey(d.Key)
so := net.IP(addr[:]).String()
res.DHT[so] = DHTEntry{
PublicKey: hex.EncodeToString(d.Key[:]),
Port: d.Port,
Rest: d.Rest,
}
}
return nil
}

33
src/admin/getpaths.go Normal file
View File

@ -0,0 +1,33 @@
package admin
import (
"encoding/hex"
"net"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
)
type GetPathsRequest struct {
}
type GetPathsResponse struct {
Paths map[string]PathEntry `json:"paths"`
}
type PathEntry struct {
PublicKey string `json:"key"`
Path []uint64 `json:"path"`
}
func (a *AdminSocket) getPathsHandler(req *GetPathsRequest, res *GetPathsResponse) error {
res.Paths = map[string]PathEntry{}
for _, p := range a.core.GetPaths() {
addr := address.AddrForKey(p.Key)
so := net.IP(addr[:]).String()
res.Paths[so] = PathEntry{
PublicKey: hex.EncodeToString(p.Key),
Path: p.Path,
}
}
return nil
}

37
src/admin/getpeers.go Normal file
View File

@ -0,0 +1,37 @@
package admin
import (
"encoding/hex"
"net"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
)
type GetPeersRequest struct {
}
type GetPeersResponse struct {
Peers map[string]PeerEntry `json:"peers"`
}
type PeerEntry struct {
PublicKey string `json:"key"`
Port uint64 `json:"port"`
Coords []uint64 `json:"coords"`
Remote string `json:"remote"`
}
func (a *AdminSocket) getPeersHandler(req *GetPeersRequest, res *GetPeersResponse) error {
res.Peers = map[string]PeerEntry{}
for _, p := range a.core.GetPeers() {
addr := address.AddrForKey(p.Key)
so := net.IP(addr[:]).String()
res.Peers[so] = PeerEntry{
PublicKey: hex.EncodeToString(p.Key),
Port: p.Port,
Coords: p.Coords,
Remote: p.Remote,
}
}
return nil
}

36
src/admin/getself.go Normal file
View File

@ -0,0 +1,36 @@
package admin
import (
"encoding/hex"
"github.com/yggdrasil-network/yggdrasil-go/src/version"
)
type GetSelfRequest struct{}
type GetSelfResponse struct {
Self map[string]SelfEntry `json:"self"`
}
type SelfEntry struct {
BuildName string `json:"build_name"`
BuildVersion string `json:"build_version"`
PublicKey string `json:"key"`
Coords []uint64 `json:"coords"`
Subnet string `json:"subnet"`
}
func (a *AdminSocket) getSelfHandler(req *GetSelfRequest, res *GetSelfResponse) error {
res.Self = make(map[string]SelfEntry)
self := a.core.GetSelf()
addr := a.core.Address().String()
snet := a.core.Subnet()
res.Self[addr] = SelfEntry{
BuildName: version.BuildName(),
BuildVersion: version.BuildVersion(),
PublicKey: hex.EncodeToString(self.Key[:]),
Subnet: snet.String(),
Coords: self.Coords,
}
return nil
}

30
src/admin/getsessions.go Normal file
View File

@ -0,0 +1,30 @@
package admin
import (
"encoding/hex"
"net"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
)
type GetSessionsRequest struct{}
type GetSessionsResponse struct {
Sessions map[string]SessionEntry `json:"sessions"`
}
type SessionEntry struct {
PublicKey string `json:"key"`
}
func (a *AdminSocket) getSessionsHandler(req *GetSessionsRequest, res *GetSessionsResponse) error {
res.Sessions = map[string]SessionEntry{}
for _, s := range a.core.GetSessions() {
addr := address.AddrForKey(s.Key)
so := net.IP(addr[:]).String()
res.Sessions[so] = SessionEntry{
PublicKey: hex.EncodeToString(s.Key[:]),
}
}
return nil
}

View File

@ -17,143 +17,45 @@ configuration option that is not provided.
package config
import (
"crypto/ed25519"
"encoding/hex"
"sync"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/yggdrasil-network/yggdrasil-go/src/defaults"
"github.com/yggdrasil-network/yggdrasil-go/src/types"
)
type MTU = types.MTU
// NodeState represents the active and previous configuration of an Yggdrasil
// node. A NodeState object is returned when starting an Yggdrasil node. Note
// that this structure and related functions are likely to disappear soon.
type NodeState struct {
Current NodeConfig
Previous NodeConfig
Mutex sync.RWMutex
}
// Current returns the active node configuration.
func (s *NodeState) GetCurrent() NodeConfig {
s.Mutex.RLock()
defer s.Mutex.RUnlock()
return s.Current
}
// Previous returns the previous node configuration.
func (s *NodeState) GetPrevious() NodeConfig {
s.Mutex.RLock()
defer s.Mutex.RUnlock()
return s.Previous
}
// Replace the node configuration with new configuration.
func (s *NodeState) Replace(n NodeConfig) {
s.Mutex.Lock()
defer s.Mutex.Unlock()
s.Previous = s.Current
s.Current = n
}
// NodeConfig is the main configuration structure, containing configuration
// options that are necessary for an Yggdrasil node to run. You will need to
// supply one of these structs to the Yggdrasil core when starting a node.
type NodeConfig struct {
Peers []string `comment:"List of connection strings for outbound peer connections in URI format,\ne.g. tcp://a.b.c.d:e or socks://a.b.c.d:e/f.g.h.i:j. These connections\nwill obey the operating system routing table, therefore you should\nuse this section when you may connect via different interfaces."`
InterfacePeers map[string][]string `comment:"List of connection strings for outbound peer connections in URI format,\narranged by source interface, e.g. { \"eth0\": [ tcp://a.b.c.d:e ] }.\nNote that SOCKS peerings will NOT be affected by this option and should\ngo in the \"Peers\" section instead."`
Listen []string `comment:"Listen addresses for incoming connections. You will need to add\nlisteners in order to accept incoming peerings from non-local nodes.\nMulticast peer discovery will work regardless of any listeners set\nhere. Each listener should be specified in URI format as above, e.g.\ntcp://0.0.0.0:0 or tcp://[::]:0 to listen on all interfaces."`
AdminListen string `comment:"Listen address for admin connections. Default is to listen for local\nconnections either on TCP/9001 or a UNIX socket depending on your\nplatform. Use this value for yggdrasilctl -endpoint=X. To disable\nthe admin socket, use the value \"none\" instead."`
MulticastInterfaces []string `comment:"Regular expressions for which interfaces multicast peer discovery\nshould be enabled on. If none specified, multicast peer discovery is\ndisabled. The default value is .* which uses all interfaces."`
AllowedEncryptionPublicKeys []string `comment:"List of peer encryption public keys to allow incoming TCP peering\nconnections from. If left empty/undefined then all connections will\nbe allowed by default. This does not affect outgoing peerings, nor\ndoes it affect link-local peers discovered via multicast."`
EncryptionPublicKey string `comment:"Your public encryption key. Your peers may ask you for this to put\ninto their AllowedEncryptionPublicKeys configuration."`
EncryptionPrivateKey string `comment:"Your private encryption key. DO NOT share this with anyone!"`
SigningPublicKey string `comment:"Your public signing key. You should not ordinarily need to share\nthis with anyone."`
SigningPrivateKey string `comment:"Your private signing key. DO NOT share this with anyone!"`
LinkLocalTCPPort uint16 `comment:"The port number to be used for the link-local TCP listeners for the\nconfigured MulticastInterfaces. This option does not affect listeners\nspecified in the Listen option. Unless you plan to firewall link-local\ntraffic, it is best to leave this as the default value of 0. This\noption cannot currently be changed by reloading config during runtime."`
IfName string `comment:"Local network interface name for TUN adapter, or \"auto\" to select\nan interface automatically, or \"none\" to run without TUN."`
IfMTU MTU `comment:"Maximum Transmission Unit (MTU) size for your local TUN interface.\nDefault is the largest supported size for your platform. The lowest\npossible value is 1280."`
SessionFirewall SessionFirewall `comment:"The session firewall controls who can send/receive network traffic\nto/from. This is useful if you want to protect this node without\nresorting to using a real firewall. This does not affect traffic\nbeing routed via this node to somewhere else. Rules are prioritised as\nfollows: blacklist, whitelist, always allow outgoing, direct, remote."`
TunnelRouting TunnelRouting `comment:"Allow tunneling non-Yggdrasil traffic over Yggdrasil. This effectively\nallows you to use Yggdrasil to route to, or to bridge other networks,\nsimilar to a VPN tunnel. Tunnelling works between any two nodes and\ndoes not require them to be directly peered."`
SwitchOptions SwitchOptions `comment:"Advanced options for tuning the switch. Normally you will not need\nto edit these options."`
NodeInfoPrivacy bool `comment:"By default, nodeinfo contains some defaults including the platform,\narchitecture and Yggdrasil version. These can help when surveying\nthe network and diagnosing network routing problems. Enabling\nnodeinfo privacy prevents this, so that only items specified in\n\"NodeInfo\" are sent back if specified."`
NodeInfo map[string]interface{} `comment:"Optional node info. This must be a { \"key\": \"value\", ... } map\nor set as null. This is entirely optional but, if set, is visible\nto the whole network on request."`
sync.RWMutex `json:"-"`
Peers []string `comment:"List of connection strings for outbound peer connections in URI format,\ne.g. tls://a.b.c.d:e or socks://a.b.c.d:e/f.g.h.i:j. These connections\nwill obey the operating system routing table, therefore you should\nuse this section when you may connect via different interfaces."`
InterfacePeers map[string][]string `comment:"List of connection strings for outbound peer connections in URI format,\narranged by source interface, e.g. { \"eth0\": [ tls://a.b.c.d:e ] }.\nNote that SOCKS peerings will NOT be affected by this option and should\ngo in the \"Peers\" section instead."`
Listen []string `comment:"Listen addresses for incoming connections. You will need to add\nlisteners in order to accept incoming peerings from non-local nodes.\nMulticast peer discovery will work regardless of any listeners set\nhere. Each listener should be specified in URI format as above, e.g.\ntls://0.0.0.0:0 or tls://[::]:0 to listen on all interfaces."`
AdminListen string `comment:"Listen address for admin connections. Default is to listen for local\nconnections either on TCP/9001 or a UNIX socket depending on your\nplatform. Use this value for yggdrasilctl -endpoint=X. To disable\nthe admin socket, use the value \"none\" instead."`
MulticastInterfaces []MulticastInterfaceConfig `comment:"Configuration for which interfaces multicast peer discovery should be\nenabled on. Each entry in the list should be a json object which may\ncontain Regex, Beacon, Listen, and Port. Regex is a regular expression\nwhich is matched against an interface name, and interfaces use the\nfirst configuration that they match gainst. Beacon configures whether\nor not the node should send link-local multicast beacons to advertise\ntheir presence, while listening for incoming connections on Port.\nListen controls whether or not the node listens for multicast beacons\nand opens outgoing connections."`
AllowedPublicKeys []string `comment:"List of peer public keys to allow incoming peering connections\nfrom. If left empty/undefined then all connections will be allowed\nby default. This does not affect outgoing peerings, nor does it\naffect link-local peers discovered via multicast."`
PublicKey string `comment:"Your public key. Your peers may ask you for this to put\ninto their AllowedPublicKeys configuration."`
PrivateKey string `comment:"Your private key. DO NOT share this with anyone!"`
IfName string `comment:"Local network interface name for TUN adapter, or \"auto\" to select\nan interface automatically, or \"none\" to run without TUN."`
IfMTU uint64 `comment:"Maximum Transmission Unit (MTU) size for your local TUN interface.\nDefault is the largest supported size for your platform. The lowest\npossible value is 1280."`
NodeInfoPrivacy bool `comment:"By default, nodeinfo contains some defaults including the platform,\narchitecture and Yggdrasil version. These can help when surveying\nthe network and diagnosing network routing problems. Enabling\nnodeinfo privacy prevents this, so that only items specified in\n\"NodeInfo\" are sent back if specified."`
NodeInfo map[string]interface{} `comment:"Optional node info. This must be a { \"key\": \"value\", ... } map\nor set as null. This is entirely optional but, if set, is visible\nto the whole network on request."`
}
// SessionFirewall controls the session firewall configuration.
type SessionFirewall struct {
Enable bool `comment:"Enable or disable the session firewall. If disabled, network traffic\nfrom any node will be allowed. If enabled, the below rules apply."`
AllowFromDirect bool `comment:"Allow network traffic from directly connected peers."`
AllowFromRemote bool `comment:"Allow network traffic from remote nodes on the network that you are\nnot directly peered with."`
AlwaysAllowOutbound bool `comment:"Allow outbound network traffic regardless of AllowFromDirect or\nAllowFromRemote. This does allow a remote node to send unsolicited\ntraffic back to you for the length of the session."`
WhitelistEncryptionPublicKeys []string `comment:"List of public keys from which network traffic is always accepted,\nregardless of AllowFromDirect or AllowFromRemote."`
BlacklistEncryptionPublicKeys []string `comment:"List of public keys from which network traffic is always rejected,\nregardless of the whitelist, AllowFromDirect or AllowFromRemote."`
}
// TunnelRouting contains the crypto-key routing tables for tunneling regular
// IPv4 or IPv6 subnets across the Yggdrasil network.
type TunnelRouting struct {
Enable bool `comment:"Enable or disable tunnel routing."`
IPv6RemoteSubnets map[string]string `comment:"IPv6 subnets belonging to remote nodes, mapped to the node's public\nkey, e.g. { \"aaaa:bbbb:cccc::/e\": \"boxpubkey\", ... }"`
IPv6LocalSubnets []string `comment:"IPv6 subnets belonging to this node's end of the tunnels. Only traffic\nfrom these ranges (or the Yggdrasil node's IPv6 address/subnet)\nwill be tunnelled."`
IPv4RemoteSubnets map[string]string `comment:"IPv4 subnets belonging to remote nodes, mapped to the node's public\nkey, e.g. { \"a.b.c.d/e\": \"boxpubkey\", ... }"`
IPv4LocalSubnets []string `comment:"IPv4 subnets belonging to this node's end of the tunnels. Only traffic\nfrom these ranges will be tunnelled."`
}
// SwitchOptions contains tuning options for the switch. These are advanced
// options and shouldn't be changed unless necessary.
type SwitchOptions struct {
MaxTotalQueueSize uint64 `comment:"Maximum size of all switch queues combined (in bytes)."`
}
// Generates default configuration and returns a pointer to the resulting
// NodeConfig. This is used when outputting the -genconf parameter and also when
// using -autoconf.
func GenerateConfig() *NodeConfig {
// Generate encryption keys.
bpub, bpriv := crypto.NewBoxKeys()
spub, spriv := crypto.NewSigKeys()
// Create a node configuration and populate it.
cfg := NodeConfig{}
cfg.Listen = []string{}
cfg.AdminListen = defaults.GetDefaults().DefaultAdminListen
cfg.EncryptionPublicKey = hex.EncodeToString(bpub[:])
cfg.EncryptionPrivateKey = hex.EncodeToString(bpriv[:])
cfg.SigningPublicKey = hex.EncodeToString(spub[:])
cfg.SigningPrivateKey = hex.EncodeToString(spriv[:])
cfg.Peers = []string{}
cfg.InterfacePeers = map[string][]string{}
cfg.AllowedEncryptionPublicKeys = []string{}
cfg.MulticastInterfaces = defaults.GetDefaults().DefaultMulticastInterfaces
cfg.IfName = defaults.GetDefaults().DefaultIfName
cfg.IfMTU = defaults.GetDefaults().DefaultIfMTU
cfg.SessionFirewall.Enable = false
cfg.SessionFirewall.AllowFromDirect = true
cfg.SessionFirewall.AllowFromRemote = true
cfg.SessionFirewall.AlwaysAllowOutbound = true
cfg.SwitchOptions.MaxTotalQueueSize = 4 * 1024 * 1024
cfg.NodeInfoPrivacy = false
return &cfg
}
// NewEncryptionKeys replaces the encryption keypair in the NodeConfig with a
// new encryption keypair. The encryption keys are used by the router to encrypt
// traffic and to derive the node ID and IPv6 address/subnet of the node, so
// this is equivalent to discarding the node's identity on the network.
func (cfg *NodeConfig) NewEncryptionKeys() {
bpub, bpriv := crypto.NewBoxKeys()
cfg.EncryptionPublicKey = hex.EncodeToString(bpub[:])
cfg.EncryptionPrivateKey = hex.EncodeToString(bpriv[:])
type MulticastInterfaceConfig struct {
Regex string
Beacon bool
Listen bool
Port uint16
}
// NewSigningKeys replaces the signing keypair in the NodeConfig with a new
// signing keypair. The signing keys are used by the switch to derive the
// structure of the spanning tree.
func (cfg *NodeConfig) NewSigningKeys() {
spub, spriv := crypto.NewSigKeys()
cfg.SigningPublicKey = hex.EncodeToString(spub[:])
cfg.SigningPrivateKey = hex.EncodeToString(spriv[:])
func (cfg *NodeConfig) NewKeys() {
spub, spriv, err := ed25519.GenerateKey(nil)
if err != nil {
panic(err)
}
cfg.PublicKey = hex.EncodeToString(spub[:])
cfg.PrivateKey = hex.EncodeToString(spriv[:])
}

301
src/core/api.go Normal file
View File

@ -0,0 +1,301 @@
package core
import (
"crypto/ed25519"
//"encoding/hex"
"encoding/json"
//"errors"
//"fmt"
"net"
"net/url"
//"sort"
//"time"
"github.com/gologme/log"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
//"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
//"github.com/Arceliar/phony"
)
type Self struct {
Key ed25519.PublicKey
Root ed25519.PublicKey
Coords []uint64
}
type Peer struct {
Key ed25519.PublicKey
Root ed25519.PublicKey
Coords []uint64
Port uint64
Remote string
}
type DHTEntry struct {
Key ed25519.PublicKey
Port uint64
Rest uint64
}
type PathEntry struct {
Key ed25519.PublicKey
Path []uint64
}
type Session struct {
Key ed25519.PublicKey
}
func (c *Core) GetSelf() Self {
var self Self
s := c.pc.PacketConn.Debug.GetSelf()
self.Key = s.Key
self.Root = s.Root
self.Coords = s.Coords
return self
}
func (c *Core) GetPeers() []Peer {
var peers []Peer
names := make(map[net.Conn]string)
c.links.mutex.Lock()
for _, info := range c.links.links {
names[info.conn] = info.lname
}
c.links.mutex.Unlock()
ps := c.pc.PacketConn.Debug.GetPeers()
for _, p := range ps {
var info Peer
info.Key = p.Key
info.Root = p.Root
info.Coords = p.Coords
info.Port = p.Port
info.Remote = p.Conn.RemoteAddr().String()
if name := names[p.Conn]; name != "" {
info.Remote = name
}
peers = append(peers, info)
}
return peers
}
func (c *Core) GetDHT() []DHTEntry {
var dhts []DHTEntry
ds := c.pc.PacketConn.Debug.GetDHT()
for _, d := range ds {
var info DHTEntry
info.Key = d.Key
info.Port = d.Port
info.Rest = d.Rest
dhts = append(dhts, info)
}
return dhts
}
func (c *Core) GetPaths() []PathEntry {
var paths []PathEntry
ps := c.pc.PacketConn.Debug.GetPaths()
for _, p := range ps {
var info PathEntry
info.Key = p.Key
info.Path = p.Path
paths = append(paths, info)
}
return paths
}
func (c *Core) GetSessions() []Session {
var sessions []Session
ss := c.pc.Debug.GetSessions()
for _, s := range ss {
var info Session
info.Key = s.Key
sessions = append(sessions, info)
}
return sessions
}
// Listen starts a new listener (either TCP or TLS). The input should be a url.URL
// parsed from a string of the form e.g. "tcp://a.b.c.d:e". In the case of a
// link-local address, the interface should be provided as the second argument.
func (c *Core) Listen(u *url.URL, sintf string) (*TcpListener, error) {
return c.links.tcp.listenURL(u, sintf)
}
// Address gets the IPv6 address of the Yggdrasil node. This is always a /128
// address. The IPv6 address is only relevant when the node is operating as an
// IP router and often is meaningless when embedded into an application, unless
// that application also implements either VPN functionality or deals with IP
// packets specifically.
func (c *Core) Address() net.IP {
addr := net.IP(address.AddrForKey(c.public)[:])
return addr
}
// Subnet gets the routed IPv6 subnet of the Yggdrasil node. This is always a
// /64 subnet. The IPv6 subnet is only relevant when the node is operating as an
// IP router and often is meaningless when embedded into an application, unless
// that application also implements either VPN functionality or deals with IP
// packets specifically.
func (c *Core) Subnet() net.IPNet {
subnet := address.SubnetForKey(c.public)[:]
subnet = append(subnet, 0, 0, 0, 0, 0, 0, 0, 0)
return net.IPNet{IP: subnet, Mask: net.CIDRMask(64, 128)}
}
// SetLogger sets the output logger of the Yggdrasil node after startup. This
// may be useful if you want to redirect the output later. Note that this
// expects a Logger from the github.com/gologme/log package and not from Go's
// built-in log package.
func (c *Core) SetLogger(log *log.Logger) {
c.log = log
}
// AddPeer adds a peer. This should be specified in the peer URI format, e.g.:
// tcp://a.b.c.d:e
// socks://a.b.c.d:e/f.g.h.i:j
// This adds the peer to the peer list, so that they will be called again if the
// connection drops.
/*
func (c *Core) AddPeer(addr string, sintf string) error {
if err := c.CallPeer(addr, sintf); err != nil {
// TODO: We maybe want this to write the peer to the persistent
// configuration even if a connection attempt fails, but first we'll need to
// move the code to check the peer URI so that we don't deliberately save a
// peer with a known bad URI. Loading peers from config should really do the
// same thing too but I don't think that happens today
return err
}
c.config.Mutex.Lock()
defer c.config.Mutex.Unlock()
if sintf == "" {
for _, peer := range c.config.Current.Peers {
if peer == addr {
return errors.New("peer already added")
}
}
c.config.Current.Peers = append(c.config.Current.Peers, addr)
} else {
if _, ok := c.config.Current.InterfacePeers[sintf]; ok {
for _, peer := range c.config.Current.InterfacePeers[sintf] {
if peer == addr {
return errors.New("peer already added")
}
}
}
if _, ok := c.config.Current.InterfacePeers[sintf]; !ok {
c.config.Current.InterfacePeers[sintf] = []string{addr}
} else {
c.config.Current.InterfacePeers[sintf] = append(c.config.Current.InterfacePeers[sintf], addr)
}
}
return nil
}
*/
/*
func (c *Core) RemovePeer(addr string, sintf string) error {
if sintf == "" {
for i, peer := range c.config.Current.Peers {
if peer == addr {
c.config.Current.Peers = append(c.config.Current.Peers[:i], c.config.Current.Peers[i+1:]...)
break
}
}
} else if _, ok := c.config.Current.InterfacePeers[sintf]; ok {
for i, peer := range c.config.Current.InterfacePeers[sintf] {
if peer == addr {
c.config.Current.InterfacePeers[sintf] = append(c.config.Current.InterfacePeers[sintf][:i], c.config.Current.InterfacePeers[sintf][i+1:]...)
break
}
}
}
panic("TODO") // Get the net.Conn to this peer (if any) and close it
c.peers.Act(nil, func() {
ports := c.peers.ports
for _, peer := range ports {
if addr == peer.intf.name() {
c.peers._removePeer(peer)
}
}
})
return nil
}
*/
// CallPeer calls a peer once. This should be specified in the peer URI format,
// e.g.:
// tcp://a.b.c.d:e
// socks://a.b.c.d:e/f.g.h.i:j
// This does not add the peer to the peer list, so if the connection drops, the
// peer will not be called again automatically.
func (c *Core) CallPeer(u *url.URL, sintf string) error {
return c.links.call(u, sintf)
}
func (c *Core) PublicKey() ed25519.PublicKey {
return c.public
}
func (c *Core) MaxMTU() uint64 {
return c.store.maxSessionMTU()
}
func (c *Core) SetMTU(mtu uint64) {
if mtu < 1280 {
mtu = 1280
}
c.store.mutex.Lock()
c.store.mtu = mtu
c.store.mutex.Unlock()
}
func (c *Core) MTU() uint64 {
c.store.mutex.Lock()
mtu := c.store.mtu
c.store.mutex.Unlock()
return mtu
}
// Implement io.ReadWriteCloser
func (c *Core) Read(p []byte) (n int, err error) {
n, err = c.store.readPC(p)
return
}
func (c *Core) Write(p []byte) (n int, err error) {
n, err = c.store.writePC(p)
return
}
func (c *Core) Close() error {
c.Stop()
return nil
}
// Hack to get the admin stuff working, TODO something cleaner
type AddHandler interface {
AddHandler(name string, args []string, handlerfunc func(json.RawMessage) (interface{}, error)) error
}
// SetAdmin must be called after Init and before Start.
// It sets the admin handler for NodeInfo and the Debug admin functions.
func (c *Core) SetAdmin(a AddHandler) error {
if err := a.AddHandler("getNodeInfo", []string{"key"}, c.proto.nodeinfo.nodeInfoAdminHandler); err != nil {
return err
}
if err := a.AddHandler("debug_remoteGetSelf", []string{"key"}, c.proto.getSelfHandler); err != nil {
return err
}
if err := a.AddHandler("debug_remoteGetPeers", []string{"key"}, c.proto.getPeersHandler); err != nil {
return err
}
if err := a.AddHandler("debug_remoteGetDHT", []string{"key"}, c.proto.getDHTHandler); err != nil {
return err
}
return nil
}

View File

@ -1,16 +1,21 @@
package yggdrasil
package core
import (
"context"
"crypto/ed25519"
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"net/url"
"time"
iw "github.com/Arceliar/ironwood/encrypted"
"github.com/Arceliar/phony"
"github.com/gologme/log"
"github.com/yggdrasil-network/yggdrasil-go/src/config"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
//"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/yggdrasil-network/yggdrasil-go/src/version"
)
@ -21,17 +26,17 @@ type Core struct {
// We're going to keep our own copy of the provided config - that way we can
// guarantee that it will be covered by the mutex
phony.Inbox
config config.NodeState // Config
boxPub crypto.BoxPubKey
boxPriv crypto.BoxPrivKey
sigPub crypto.SigPubKey
sigPriv crypto.SigPrivKey
switchTable switchTable
peers peers
router router
pc *iw.PacketConn
config *config.NodeConfig // Config
secret ed25519.PrivateKey
public ed25519.PublicKey
links links
proto protoHandler
store keyStore
log *log.Logger
addPeerTimer *time.Timer
ctx context.Context
ctxCancel context.CancelFunc
}
func (c *Core) _init() error {
@ -39,71 +44,67 @@ func (c *Core) _init() error {
// Init sets up structs
// Start launches goroutines that depend on structs being set up
// This is pretty much required to completely avoid race conditions
c.config.RLock()
defer c.config.RUnlock()
if c.log == nil {
c.log = log.New(ioutil.Discard, "", 0)
}
current := c.config.GetCurrent()
boxPrivHex, err := hex.DecodeString(current.EncryptionPrivateKey)
sigPriv, err := hex.DecodeString(c.config.PrivateKey)
if err != nil {
return err
}
if len(boxPrivHex) < crypto.BoxPrivKeyLen {
return errors.New("EncryptionPrivateKey is incorrect length")
if len(sigPriv) < ed25519.PrivateKeySize {
return errors.New("PrivateKey is incorrect length")
}
sigPrivHex, err := hex.DecodeString(current.SigningPrivateKey)
if err != nil {
return err
c.secret = ed25519.PrivateKey(sigPriv)
c.public = c.secret.Public().(ed25519.PublicKey)
// TODO check public against current.PublicKey, error if they don't match
c.pc, err = iw.NewPacketConn(c.secret)
c.ctx, c.ctxCancel = context.WithCancel(context.Background())
c.store.init(c)
c.proto.init(c)
if err := c.proto.nodeinfo.setNodeInfo(c.config.NodeInfo, c.config.NodeInfoPrivacy); err != nil {
return fmt.Errorf("setNodeInfo: %w", err)
}
if len(sigPrivHex) < crypto.SigPrivKeyLen {
return errors.New("SigningPrivateKey is incorrect length")
}
copy(c.boxPriv[:], boxPrivHex)
copy(c.sigPriv[:], sigPrivHex)
boxPub, sigPub := c.boxPriv.Public(), c.sigPriv.Public()
copy(c.boxPub[:], boxPub[:])
copy(c.sigPub[:], sigPub[:])
if bp := hex.EncodeToString(c.boxPub[:]); current.EncryptionPublicKey != bp {
c.log.Warnln("EncryptionPublicKey in config is incorrect, should be", bp)
}
if sp := hex.EncodeToString(c.sigPub[:]); current.SigningPublicKey != sp {
c.log.Warnln("SigningPublicKey in config is incorrect, should be", sp)
}
c.peers.init(c)
c.router.init(c)
c.switchTable.init(c) // TODO move before peers? before router?
return nil
return err
}
// If any static peers were provided in the configuration above then we should
// configure them. The loop ensures that disconnected peers will eventually
// be reconnected with.
func (c *Core) _addPeerLoop() {
// Get the peers from the config - these could change!
current := c.config.GetCurrent()
c.config.RLock()
defer c.config.RUnlock()
if c.addPeerTimer == nil {
return
}
// Add peers from the Peers section
for _, peer := range current.Peers {
go func(peer, intf string) {
if err := c.CallPeer(peer, intf); err != nil {
for _, peer := range c.config.Peers {
go func(peer string, intf string) {
u, err := url.Parse(peer)
if err != nil {
c.log.Errorln("Failed to parse peer url:", peer, err)
}
if err := c.CallPeer(u, intf); err != nil {
c.log.Errorln("Failed to add peer:", err)
}
}(peer, "") // TODO: this should be acted and not in a goroutine?
}
// Add peers from the InterfacePeers section
for intf, intfpeers := range current.InterfacePeers {
for intf, intfpeers := range c.config.InterfacePeers {
for _, peer := range intfpeers {
go func(peer, intf string) {
if err := c.CallPeer(peer, intf); err != nil {
go func(peer string, intf string) {
u, err := url.Parse(peer)
if err != nil {
c.log.Errorln("Failed to parse peer url:", peer, err)
}
if err := c.CallPeer(u, intf); err != nil {
c.log.Errorln("Failed to add peer:", err)
}
}(peer, intf) // TODO: this should be acted and not in a goroutine?
@ -115,42 +116,22 @@ func (c *Core) _addPeerLoop() {
})
}
// UpdateConfig updates the configuration in Core with the provided
// config.NodeConfig and then signals the various module goroutines to
// reconfigure themselves if needed.
func (c *Core) UpdateConfig(config *config.NodeConfig) {
c.Act(nil, func() {
c.log.Debugln("Reloading node configuration...")
// Replace the active configuration with the supplied one
c.config.Replace(*config)
// Notify the router and switch about the new configuration
c.router.Act(c, c.router.reconfigure)
c.switchTable.Act(c, c.switchTable.reconfigure)
})
}
// Start starts up Yggdrasil using the provided config.NodeConfig, and outputs
// debug logging through the provided log.Logger. The started stack will include
// TCP and UDP sockets, a multicast discovery socket, an admin socket, router,
// switch and DHT node. A config.NodeState is returned which contains both the
// current and previous configurations (from reconfigures).
func (c *Core) Start(nc *config.NodeConfig, log *log.Logger) (conf *config.NodeState, err error) {
func (c *Core) Start(nc *config.NodeConfig, log *log.Logger) (err error) {
phony.Block(c, func() {
conf, err = c._start(nc, log)
err = c._start(nc, log)
})
return
}
// This function is unsafe and should only be ran by the core actor.
func (c *Core) _start(nc *config.NodeConfig, log *log.Logger) (*config.NodeState, error) {
func (c *Core) _start(nc *config.NodeConfig, log *log.Logger) error {
c.log = log
c.config = config.NodeState{
Current: *nc,
Previous: *nc,
}
c.config = nc
if name := version.BuildName(); name != "unknown" {
c.log.Infoln("Build name:", name)
@ -162,28 +143,20 @@ func (c *Core) _start(nc *config.NodeConfig, log *log.Logger) (*config.NodeState
c.log.Infoln("Starting up...")
if err := c._init(); err != nil {
c.log.Errorln("Failed to initialize core")
return nil, err
return err
}
if err := c.links.init(c); err != nil {
c.log.Errorln("Failed to start link interfaces")
return nil, err
return err
}
if err := c.switchTable.start(); err != nil {
c.log.Errorln("Failed to start switch")
return nil, err
}
if err := c.router.start(); err != nil {
c.log.Errorln("Failed to start router")
return nil, err
}
c.Act(c, c._addPeerLoop)
c.addPeerTimer = time.AfterFunc(0, func() {
c.Act(nil, c._addPeerLoop)
})
c.log.Infoln("Startup complete")
return &c.config, nil
return nil
}
// Stop shuts down the Yggdrasil node.
@ -194,10 +167,13 @@ func (c *Core) Stop() {
// This function is unsafe and should only be ran by the core actor.
func (c *Core) _stop() {
c.log.Infoln("Stopping...")
c.ctxCancel()
c.pc.Close()
if c.addPeerTimer != nil {
c.addPeerTimer.Stop()
c.addPeerTimer = nil
}
c.links.stop()
_ = c.links.stop()
/* FIXME this deadlocks, need a waitgroup or something to coordinate shutdown
for _, peer := range c.GetPeers() {
c.DisconnectPeer(peer.Port)

View File

@ -1,8 +1,9 @@
package yggdrasil
package core
import (
"bytes"
"math/rand"
"net/url"
"os"
"testing"
"time"
@ -10,11 +11,12 @@ import (
"github.com/gologme/log"
"github.com/yggdrasil-network/yggdrasil-go/src/config"
"github.com/yggdrasil-network/yggdrasil-go/src/defaults"
)
// GenerateConfig produces default configuration with suitable modifications for tests.
func GenerateConfig() *config.NodeConfig {
cfg := config.GenerateConfig()
cfg := defaults.GenerateConfig()
cfg.AdminListen = "none"
cfg.Listen = []string{"tcp://127.0.0.1:0"}
cfg.IfName = "none"
@ -39,21 +41,27 @@ func GetLoggerWithPrefix(prefix string, verbose bool) *log.Logger {
// Verbosity flag is passed to logger.
func CreateAndConnectTwo(t testing.TB, verbose bool) (nodeA *Core, nodeB *Core) {
nodeA = new(Core)
_, err := nodeA.Start(GenerateConfig(), GetLoggerWithPrefix("A: ", verbose))
if err != nil {
if err := nodeA.Start(GenerateConfig(), GetLoggerWithPrefix("A: ", verbose)); err != nil {
t.Fatal(err)
}
nodeA.SetMTU(1500)
nodeB = new(Core)
_, err = nodeB.Start(GenerateConfig(), GetLoggerWithPrefix("B: ", verbose))
if err := nodeB.Start(GenerateConfig(), GetLoggerWithPrefix("B: ", verbose)); err != nil {
t.Fatal(err)
}
nodeB.SetMTU(1500)
u, err := url.Parse("tcp://" + nodeA.links.tcp.getAddr().String())
if err != nil {
t.Fatal(err)
}
err = nodeB.CallPeer(u, "")
if err != nil {
t.Fatal(err)
}
err = nodeB.AddPeer("tcp://"+nodeA.link.tcp.getAddr().String(), "")
if err != nil {
t.Fatal(err)
}
time.Sleep(100 * time.Millisecond)
if l := len(nodeA.GetPeers()); l != 1 {
t.Fatal("unexpected number of peers", l)
@ -70,7 +78,7 @@ func WaitConnected(nodeA, nodeB *Core) bool {
// It may take up to 3 seconds, but let's wait 5.
for i := 0; i < 50; i++ {
time.Sleep(100 * time.Millisecond)
if len(nodeA.GetSwitchPeers()) > 0 && len(nodeB.GetSwitchPeers()) > 0 {
if len(nodeA.GetPeers()) > 0 && len(nodeB.GetPeers()) > 0 {
return true
}
}
@ -80,26 +88,13 @@ func WaitConnected(nodeA, nodeB *Core) bool {
// CreateEchoListener creates a routine listening on nodeA. It expects repeats messages of length bufLen.
// It returns a channel used to synchronize the routine with caller.
func CreateEchoListener(t testing.TB, nodeA *Core, bufLen int, repeats int) chan struct{} {
// Listen. Doing it here guarantees that there will be something to try to connect when it returns.
listener, err := nodeA.ConnListen()
if err != nil {
t.Fatal(err)
}
// Start routine
done := make(chan struct{})
go func() {
defer listener.Close()
conn, err := listener.Accept()
if err != nil {
t.Error(err)
return
}
defer conn.Close()
buf := make([]byte, bufLen)
res := make([]byte, bufLen)
for i := 0; i < repeats; i++ {
n, err := conn.Read(buf)
n, err := nodeA.Read(buf)
if err != nil {
t.Error(err)
return
@ -108,7 +103,10 @@ func CreateEchoListener(t testing.TB, nodeA *Core, bufLen int, repeats int) chan
t.Error("missing data")
return
}
_, err = conn.Write(buf)
copy(res, buf)
copy(res[8:24], buf[24:40])
copy(res[24:40], buf[8:24])
_, err = nodeA.Write(res)
if err != nil {
t.Error(err)
}
@ -127,6 +125,8 @@ func TestCore_Start_Connect(t *testing.T) {
// TestCore_Start_Transfer checks that messages can be passed between nodes (in both directions).
func TestCore_Start_Transfer(t *testing.T) {
nodeA, nodeB := CreateAndConnectTwo(t, true)
defer nodeA.Stop()
defer nodeB.Stop()
msgLen := 1500
done := CreateEchoListener(t, nodeA, msgLen, 1)
@ -135,28 +135,22 @@ func TestCore_Start_Transfer(t *testing.T) {
t.Fatal("nodes did not connect")
}
// Dial
dialer, err := nodeB.ConnDialer()
if err != nil {
t.Fatal(err)
}
conn, err := dialer.Dial("nodeid", nodeA.NodeID().String())
if err != nil {
t.Fatal(err)
}
defer conn.Close()
// Send
msg := make([]byte, msgLen)
rand.Read(msg)
conn.Write(msg)
rand.Read(msg[40:])
msg[0] = 0x60
copy(msg[8:24], nodeB.Address())
copy(msg[24:40], nodeA.Address())
_, err := nodeB.Write(msg)
if err != nil {
t.Fatal(err)
}
buf := make([]byte, msgLen)
_, err = conn.Read(buf)
_, err = nodeB.Read(buf)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(msg, buf) != 0 {
if !bytes.Equal(msg[40:], buf[40:]) {
t.Fatal("expected echo")
}
<-done
@ -173,29 +167,24 @@ func BenchmarkCore_Start_Transfer(b *testing.B) {
b.Fatal("nodes did not connect")
}
// Dial
dialer, err := nodeB.ConnDialer()
if err != nil {
b.Fatal(err)
}
conn, err := dialer.Dial("nodeid", nodeA.NodeID().String())
if err != nil {
b.Fatal(err)
}
defer conn.Close()
// Send
msg := make([]byte, msgLen)
rand.Read(msg)
rand.Read(msg[40:])
msg[0] = 0x60
copy(msg[8:24], nodeB.Address())
copy(msg[24:40], nodeA.Address())
buf := make([]byte, msgLen)
b.SetBytes(int64(b.N * msgLen))
b.SetBytes(int64(msgLen))
b.ResetTimer()
for i := 0; i < b.N; i++ {
conn.Write(msg)
_, err := nodeB.Write(msg)
if err != nil {
b.Fatal(err)
}
_, err = conn.Read(buf)
_, err = nodeB.Read(buf)
if err != nil {
b.Fatal(err)
}

View File

@ -1,6 +1,6 @@
// +build debug
package yggdrasil
package core
import "fmt"

View File

@ -1,4 +1,4 @@
package tuntap
package core
// The ICMPv6 module implements functions to easily create ICMPv6
// packets. These functions, when mixed with the built-in Go IPv6
@ -17,11 +17,7 @@ import (
"golang.org/x/net/ipv6"
)
const len_ETHER = 14
type ICMPv6 struct {
tun *TunAdapter
}
type ICMPv6 struct{}
// Marshal returns the binary encoding of h.
func ipv6Header_Marshal(h *ipv6.Header) ([]byte, error) {
@ -40,13 +36,6 @@ func ipv6Header_Marshal(h *ipv6.Header) ([]byte, error) {
return b, nil
}
// Initialises the ICMPv6 module by assigning our link-local IPv6 address and
// our MAC address. ICMPv6 messages will always appear to originate from these
// addresses.
func (i *ICMPv6) Init(t *TunAdapter) {
i.tun = t
}
// Creates an ICMPv6 packet based on the given icmp.MessageBody and other
// parameters, complete with IP headers only, which can be written directly to
// a TUN adapter, or called directly by the CreateICMPv6L2 function when

308
src/core/keystore.go Normal file
View File

@ -0,0 +1,308 @@
package core
import (
"crypto/ed25519"
"errors"
"fmt"
"net"
"sync"
"time"
"golang.org/x/net/icmp"
"golang.org/x/net/ipv6"
iwt "github.com/Arceliar/ironwood/types"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
)
const keyStoreTimeout = 2 * time.Minute
type keyArray [ed25519.PublicKeySize]byte
type keyStore struct {
core *Core
address address.Address
subnet address.Subnet
mutex sync.Mutex
keyToInfo map[keyArray]*keyInfo
addrToInfo map[address.Address]*keyInfo
addrBuffer map[address.Address]*buffer
subnetToInfo map[address.Subnet]*keyInfo
subnetBuffer map[address.Subnet]*buffer
mtu uint64
}
type keyInfo struct {
key keyArray
address address.Address
subnet address.Subnet
timeout *time.Timer // From calling a time.AfterFunc to do cleanup
}
type buffer struct {
packet []byte
timeout *time.Timer
}
func (k *keyStore) init(core *Core) {
k.core = core
k.address = *address.AddrForKey(k.core.public)
k.subnet = *address.SubnetForKey(k.core.public)
if err := k.core.pc.SetOutOfBandHandler(k.oobHandler); err != nil {
err = fmt.Errorf("tun.core.SetOutOfBandHander: %w", err)
panic(err)
}
k.keyToInfo = make(map[keyArray]*keyInfo)
k.addrToInfo = make(map[address.Address]*keyInfo)
k.addrBuffer = make(map[address.Address]*buffer)
k.subnetToInfo = make(map[address.Subnet]*keyInfo)
k.subnetBuffer = make(map[address.Subnet]*buffer)
k.mtu = 1280 // Default to something safe, expect user to set this
}
func (k *keyStore) sendToAddress(addr address.Address, bs []byte) {
k.mutex.Lock()
if info := k.addrToInfo[addr]; info != nil {
k.resetTimeout(info)
k.mutex.Unlock()
_, _ = k.core.pc.WriteTo(bs, iwt.Addr(info.key[:]))
} else {
var buf *buffer
if buf = k.addrBuffer[addr]; buf == nil {
buf = new(buffer)
k.addrBuffer[addr] = buf
}
msg := append([]byte(nil), bs...)
buf.packet = msg
if buf.timeout != nil {
buf.timeout.Stop()
}
buf.timeout = time.AfterFunc(keyStoreTimeout, func() {
k.mutex.Lock()
defer k.mutex.Unlock()
if nbuf := k.addrBuffer[addr]; nbuf == buf {
delete(k.addrBuffer, addr)
}
})
k.mutex.Unlock()
k.sendKeyLookup(addr.GetKey())
}
}
func (k *keyStore) sendToSubnet(subnet address.Subnet, bs []byte) {
k.mutex.Lock()
if info := k.subnetToInfo[subnet]; info != nil {
k.resetTimeout(info)
k.mutex.Unlock()
_, _ = k.core.pc.WriteTo(bs, iwt.Addr(info.key[:]))
} else {
var buf *buffer
if buf = k.subnetBuffer[subnet]; buf == nil {
buf = new(buffer)
k.subnetBuffer[subnet] = buf
}
msg := append([]byte(nil), bs...)
buf.packet = msg
if buf.timeout != nil {
buf.timeout.Stop()
}
buf.timeout = time.AfterFunc(keyStoreTimeout, func() {
k.mutex.Lock()
defer k.mutex.Unlock()
if nbuf := k.subnetBuffer[subnet]; nbuf == buf {
delete(k.subnetBuffer, subnet)
}
})
k.mutex.Unlock()
k.sendKeyLookup(subnet.GetKey())
}
}
func (k *keyStore) update(key ed25519.PublicKey) *keyInfo {
k.mutex.Lock()
var kArray keyArray
copy(kArray[:], key)
var info *keyInfo
if info = k.keyToInfo[kArray]; info == nil {
info = new(keyInfo)
info.key = kArray
info.address = *address.AddrForKey(ed25519.PublicKey(info.key[:]))
info.subnet = *address.SubnetForKey(ed25519.PublicKey(info.key[:]))
k.keyToInfo[info.key] = info
k.addrToInfo[info.address] = info
k.subnetToInfo[info.subnet] = info
k.resetTimeout(info)
k.mutex.Unlock()
if buf := k.addrBuffer[info.address]; buf != nil {
k.core.pc.WriteTo(buf.packet, iwt.Addr(info.key[:]))
delete(k.addrBuffer, info.address)
}
if buf := k.subnetBuffer[info.subnet]; buf != nil {
k.core.pc.WriteTo(buf.packet, iwt.Addr(info.key[:]))
delete(k.subnetBuffer, info.subnet)
}
} else {
k.resetTimeout(info)
k.mutex.Unlock()
}
return info
}
func (k *keyStore) resetTimeout(info *keyInfo) {
if info.timeout != nil {
info.timeout.Stop()
}
info.timeout = time.AfterFunc(keyStoreTimeout, func() {
k.mutex.Lock()
defer k.mutex.Unlock()
if nfo := k.keyToInfo[info.key]; nfo == info {
delete(k.keyToInfo, info.key)
}
if nfo := k.addrToInfo[info.address]; nfo == info {
delete(k.addrToInfo, info.address)
}
if nfo := k.subnetToInfo[info.subnet]; nfo == info {
delete(k.subnetToInfo, info.subnet)
}
})
}
func (k *keyStore) oobHandler(fromKey, toKey ed25519.PublicKey, data []byte) {
if len(data) != 1+ed25519.SignatureSize {
return
}
sig := data[1:]
switch data[0] {
case typeKeyLookup:
snet := *address.SubnetForKey(toKey)
if snet == k.subnet && ed25519.Verify(fromKey, toKey[:], sig) {
// This is looking for at least our subnet (possibly our address)
// Send a response
k.sendKeyResponse(fromKey)
}
case typeKeyResponse:
// TODO keep a list of something to match against...
// Ignore the response if it doesn't match anything of interest...
if ed25519.Verify(fromKey, toKey[:], sig) {
k.update(fromKey)
}
}
}
func (k *keyStore) sendKeyLookup(partial ed25519.PublicKey) {
sig := ed25519.Sign(k.core.secret, partial[:])
bs := append([]byte{typeKeyLookup}, sig...)
_ = k.core.pc.SendOutOfBand(partial, bs)
}
func (k *keyStore) sendKeyResponse(dest ed25519.PublicKey) {
sig := ed25519.Sign(k.core.secret, dest[:])
bs := append([]byte{typeKeyResponse}, sig...)
_ = k.core.pc.SendOutOfBand(dest, bs)
}
func (k *keyStore) maxSessionMTU() uint64 {
const sessionTypeOverhead = 1
return k.core.pc.MTU() - sessionTypeOverhead
}
func (k *keyStore) readPC(p []byte) (int, error) {
buf := make([]byte, k.core.pc.MTU(), 65535)
for {
bs := buf
n, from, err := k.core.pc.ReadFrom(bs)
if err != nil {
return n, err
}
if n == 0 {
continue
}
switch bs[0] {
case typeSessionTraffic:
// This is what we want to handle here
case typeSessionProto:
var key keyArray
copy(key[:], from.(iwt.Addr))
data := append([]byte(nil), bs[1:n]...)
k.core.proto.handleProto(nil, key, data)
continue
default:
continue
}
bs = bs[1:n]
if len(bs) == 0 {
continue
}
if bs[0]&0xf0 != 0x60 {
continue // not IPv6
}
if len(bs) < 40 {
continue
}
k.mutex.Lock()
mtu := int(k.mtu)
k.mutex.Unlock()
if len(bs) > mtu {
// Using bs would make it leak off the stack, so copy to buf
buf := make([]byte, 40)
copy(buf, bs)
ptb := &icmp.PacketTooBig{
MTU: mtu,
Data: buf[:40],
}
if packet, err := CreateICMPv6(buf[8:24], buf[24:40], ipv6.ICMPTypePacketTooBig, 0, ptb); err == nil {
_, _ = k.writePC(packet)
}
continue
}
var srcAddr, dstAddr address.Address
var srcSubnet, dstSubnet address.Subnet
copy(srcAddr[:], bs[8:])
copy(dstAddr[:], bs[24:])
copy(srcSubnet[:], bs[8:])
copy(dstSubnet[:], bs[24:])
if dstAddr != k.address && dstSubnet != k.subnet {
continue // bad local address/subnet
}
info := k.update(ed25519.PublicKey(from.(iwt.Addr)))
if srcAddr != info.address && srcSubnet != info.subnet {
continue // bad remote address/subnet
}
n = copy(p, bs)
return n, nil
}
}
func (k *keyStore) writePC(bs []byte) (int, error) {
if bs[0]&0xf0 != 0x60 {
return 0, errors.New("not an IPv6 packet") // not IPv6
}
if len(bs) < 40 {
strErr := fmt.Sprint("undersized IPv6 packet, length: ", len(bs))
return 0, errors.New(strErr)
}
var srcAddr, dstAddr address.Address
var srcSubnet, dstSubnet address.Subnet
copy(srcAddr[:], bs[8:])
copy(dstAddr[:], bs[24:])
copy(srcSubnet[:], bs[8:])
copy(dstSubnet[:], bs[24:])
if srcAddr != k.address && srcSubnet != k.subnet {
// This happens all the time due to link-local traffic
// Don't send back an error, just drop it
strErr := fmt.Sprint("incorrect source address: ", net.IP(srcAddr[:]).String())
return 0, errors.New(strErr)
}
buf := make([]byte, 1+len(bs), 65535)
buf[0] = typeSessionTraffic
copy(buf[1:], bs)
if dstAddr.IsValid() {
k.sendToAddress(dstAddr, buf)
} else if dstSubnet.IsValid() {
k.sendToSubnet(dstSubnet, buf)
} else {
return 0, errors.New("invalid destination address")
}
return len(bs), nil
}

251
src/core/link.go Normal file
View File

@ -0,0 +1,251 @@
package core
import (
"crypto/ed25519"
"encoding/hex"
"errors"
"fmt"
"io"
"net"
"net/url"
"strings"
"sync"
//"sync/atomic"
"time"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
"github.com/yggdrasil-network/yggdrasil-go/src/util"
"golang.org/x/net/proxy"
//"github.com/Arceliar/phony" // TODO? use instead of mutexes
)
type links struct {
core *Core
mutex sync.RWMutex // protects links below
links map[linkInfo]*link
tcp tcp // TCP interface support
stopped chan struct{}
// TODO timeout (to remove from switch), read from config.ReadTimeout
}
// linkInfo is used as a map key
type linkInfo struct {
key keyArray
linkType string // Type of link, e.g. TCP, AWDL
local string // Local name or address
remote string // Remote name or address
}
type link struct {
lname string
links *links
conn net.Conn
options linkOptions
info linkInfo
incoming bool
force bool
closed chan struct{}
}
type linkOptions struct {
pinnedEd25519Keys map[keyArray]struct{}
}
func (l *links) init(c *Core) error {
l.core = c
l.mutex.Lock()
l.links = make(map[linkInfo]*link)
l.mutex.Unlock()
l.stopped = make(chan struct{})
if err := l.tcp.init(l); err != nil {
c.log.Errorln("Failed to start TCP interface")
return err
}
return nil
}
func (l *links) call(u *url.URL, sintf string) error {
//u, err := url.Parse(uri)
//if err != nil {
// return fmt.Errorf("peer %s is not correctly formatted (%s)", uri, err)
//}
tcpOpts := tcpOptions{}
if pubkeys, ok := u.Query()["key"]; ok && len(pubkeys) > 0 {
tcpOpts.pinnedEd25519Keys = make(map[keyArray]struct{})
for _, pubkey := range pubkeys {
if sigPub, err := hex.DecodeString(pubkey); err == nil {
var sigPubKey keyArray
copy(sigPubKey[:], sigPub)
tcpOpts.pinnedEd25519Keys[sigPubKey] = struct{}{}
}
}
}
switch u.Scheme {
case "tcp":
l.tcp.call(u.Host, tcpOpts, sintf)
case "socks":
tcpOpts.socksProxyAddr = u.Host
if u.User != nil {
tcpOpts.socksProxyAuth = &proxy.Auth{}
tcpOpts.socksProxyAuth.User = u.User.Username()
tcpOpts.socksProxyAuth.Password, _ = u.User.Password()
}
tcpOpts.upgrade = l.tcp.tls.forDialer // TODO make this configurable
pathtokens := strings.Split(strings.Trim(u.Path, "/"), "/")
l.tcp.call(pathtokens[0], tcpOpts, sintf)
case "tls":
tcpOpts.upgrade = l.tcp.tls.forDialer
l.tcp.call(u.Host, tcpOpts, sintf)
default:
return errors.New("unknown call scheme: " + u.Scheme)
}
return nil
}
func (l *links) create(conn net.Conn, name, linkType, local, remote string, incoming, force bool, options linkOptions) (*link, error) {
// Technically anything unique would work for names, but let's pick something human readable, just for debugging
intf := link{
conn: conn,
lname: name,
links: l,
options: options,
info: linkInfo{
linkType: linkType,
local: local,
remote: remote,
},
incoming: incoming,
force: force,
}
return &intf, nil
}
func (l *links) stop() error {
close(l.stopped)
if err := l.tcp.stop(); err != nil {
return err
}
return nil
}
func (intf *link) handler() (chan struct{}, error) {
// TODO split some of this into shorter functions, so it's easier to read, and for the FIXME duplicate peer issue mentioned later
defer intf.conn.Close()
meta := version_getBaseMetadata()
meta.key = intf.links.core.public
metaBytes := meta.encode()
// TODO timeouts on send/recv (goroutine for send/recv, channel select w/ timer)
var err error
if !util.FuncTimeout(30*time.Second, func() {
var n int
n, err = intf.conn.Write(metaBytes)
if err == nil && n != len(metaBytes) {
err = errors.New("incomplete metadata send")
}
}) {
return nil, errors.New("timeout on metadata send")
}
if err != nil {
return nil, err
}
if !util.FuncTimeout(30*time.Second, func() {
var n int
n, err = io.ReadFull(intf.conn, metaBytes)
if err == nil && n != len(metaBytes) {
err = errors.New("incomplete metadata recv")
}
}) {
return nil, errors.New("timeout on metadata recv")
}
if err != nil {
return nil, err
}
meta = version_metadata{}
base := version_getBaseMetadata()
if !meta.decode(metaBytes) {
return nil, errors.New("failed to decode metadata")
}
if !meta.check() {
intf.links.core.log.Errorf("Failed to connect to node: %s is incompatible version (local %s, remote %s)",
intf.lname,
fmt.Sprintf("%d.%d", base.ver, base.minorVer),
fmt.Sprintf("%d.%d", meta.ver, meta.minorVer),
)
return nil, errors.New("remote node is incompatible version")
}
// Check if the remote side matches the keys we expected. This is a bit of a weak
// check - in future versions we really should check a signature or something like that.
if pinned := intf.options.pinnedEd25519Keys; pinned != nil {
var key keyArray
copy(key[:], meta.key)
if _, allowed := pinned[key]; !allowed {
intf.links.core.log.Errorf("Failed to connect to node: %q sent ed25519 key that does not match pinned keys", intf.name())
return nil, fmt.Errorf("failed to connect: host sent ed25519 key that does not match pinned keys")
}
}
// Check if we're authorized to connect to this key / IP
intf.links.core.config.RLock()
allowed := intf.links.core.config.AllowedPublicKeys
intf.links.core.config.RUnlock()
isallowed := len(allowed) == 0
for _, k := range allowed {
if k == hex.EncodeToString(meta.key) { // TODO: this is yuck
isallowed = true
break
}
}
if intf.incoming && !intf.force && !isallowed {
intf.links.core.log.Warnf("%s connection from %s forbidden: AllowedEncryptionPublicKeys does not contain key %s",
strings.ToUpper(intf.info.linkType), intf.info.remote, hex.EncodeToString(meta.key))
intf.close()
return nil, nil
}
// Check if we already have a link to this node
copy(intf.info.key[:], meta.key)
intf.links.mutex.Lock()
if oldIntf, isIn := intf.links.links[intf.info]; isIn {
intf.links.mutex.Unlock()
// FIXME we should really return an error and let the caller block instead
// That lets them do things like close connections on its own, avoid printing a connection message in the first place, etc.
intf.links.core.log.Debugln("DEBUG: found existing interface for", intf.name())
return oldIntf.closed, nil
} else {
intf.closed = make(chan struct{})
intf.links.links[intf.info] = intf
defer func() {
intf.links.mutex.Lock()
delete(intf.links.links, intf.info)
intf.links.mutex.Unlock()
close(intf.closed)
}()
intf.links.core.log.Debugln("DEBUG: registered interface for", intf.name())
}
intf.links.mutex.Unlock()
themAddr := address.AddrForKey(ed25519.PublicKey(intf.info.key[:]))
themAddrString := net.IP(themAddr[:]).String()
themString := fmt.Sprintf("%s@%s", themAddrString, intf.info.remote)
intf.links.core.log.Infof("Connected %s: %s, source %s",
strings.ToUpper(intf.info.linkType), themString, intf.info.local)
// Run the handler
err = intf.links.core.pc.HandleConn(ed25519.PublicKey(intf.info.key[:]), intf.conn)
// TODO don't report an error if it's just a 'use of closed network connection'
if err != nil {
intf.links.core.log.Infof("Disconnected %s: %s, source %s; error: %s",
strings.ToUpper(intf.info.linkType), themString, intf.info.local, err)
} else {
intf.links.core.log.Infof("Disconnected %s: %s, source %s",
strings.ToUpper(intf.info.linkType), themString, intf.info.local)
}
return nil, err
}
func (intf *link) close() {
intf.conn.Close()
}
func (intf *link) name() string {
return intf.lname
}

189
src/core/nodeinfo.go Normal file
View File

@ -0,0 +1,189 @@
package core
import (
"encoding/hex"
"encoding/json"
"errors"
"net"
"runtime"
"strings"
"time"
iwt "github.com/Arceliar/ironwood/types"
"github.com/Arceliar/phony"
//"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
"github.com/yggdrasil-network/yggdrasil-go/src/version"
)
// NodeInfoPayload represents a RequestNodeInfo response, in bytes.
type NodeInfoPayload []byte
type nodeinfo struct {
phony.Inbox
proto *protoHandler
myNodeInfo NodeInfoPayload
callbacks map[keyArray]nodeinfoCallback
}
type nodeinfoCallback struct {
call func(nodeinfo NodeInfoPayload)
created time.Time
}
// Initialises the nodeinfo cache/callback maps, and starts a goroutine to keep
// the cache/callback maps clean of stale entries
func (m *nodeinfo) init(proto *protoHandler) {
m.Act(nil, func() {
m._init(proto)
})
}
func (m *nodeinfo) _init(proto *protoHandler) {
m.proto = proto
m.callbacks = make(map[keyArray]nodeinfoCallback)
m._cleanup()
}
func (m *nodeinfo) _cleanup() {
for boxPubKey, callback := range m.callbacks {
if time.Since(callback.created) > time.Minute {
delete(m.callbacks, boxPubKey)
}
}
time.AfterFunc(time.Second*30, func() {
m.Act(nil, m._cleanup)
})
}
func (m *nodeinfo) _addCallback(sender keyArray, call func(nodeinfo NodeInfoPayload)) {
m.callbacks[sender] = nodeinfoCallback{
created: time.Now(),
call: call,
}
}
// Handles the callback, if there is one
func (m *nodeinfo) _callback(sender keyArray, nodeinfo NodeInfoPayload) {
if callback, ok := m.callbacks[sender]; ok {
callback.call(nodeinfo)
delete(m.callbacks, sender)
}
}
func (m *nodeinfo) _getNodeInfo() NodeInfoPayload {
return m.myNodeInfo
}
// Set the current node's nodeinfo
func (m *nodeinfo) setNodeInfo(given interface{}, privacy bool) (err error) {
phony.Block(m, func() {
err = m._setNodeInfo(given, privacy)
})
return
}
func (m *nodeinfo) _setNodeInfo(given interface{}, privacy bool) error {
defaults := map[string]interface{}{
"buildname": version.BuildName(),
"buildversion": version.BuildVersion(),
"buildplatform": runtime.GOOS,
"buildarch": runtime.GOARCH,
}
newnodeinfo := make(map[string]interface{})
if !privacy {
for k, v := range defaults {
newnodeinfo[k] = v
}
}
if nodeinfomap, ok := given.(map[string]interface{}); ok {
for key, value := range nodeinfomap {
if _, ok := defaults[key]; ok {
if strvalue, strok := value.(string); strok && strings.EqualFold(strvalue, "null") || value == nil {
delete(newnodeinfo, key)
}
continue
}
newnodeinfo[key] = value
}
}
newjson, err := json.Marshal(newnodeinfo)
if err == nil {
if len(newjson) > 16384 {
return errors.New("NodeInfo exceeds max length of 16384 bytes")
}
m.myNodeInfo = newjson
return nil
}
return err
}
func (m *nodeinfo) sendReq(from phony.Actor, key keyArray, callback func(nodeinfo NodeInfoPayload)) {
m.Act(from, func() {
m._sendReq(key, callback)
})
}
func (m *nodeinfo) _sendReq(key keyArray, callback func(nodeinfo NodeInfoPayload)) {
if callback != nil {
m._addCallback(key, callback)
}
_, _ = m.proto.core.pc.WriteTo([]byte{typeSessionProto, typeProtoNodeInfoRequest}, iwt.Addr(key[:]))
}
func (m *nodeinfo) handleReq(from phony.Actor, key keyArray) {
m.Act(from, func() {
m._sendRes(key)
})
}
func (m *nodeinfo) handleRes(from phony.Actor, key keyArray, info NodeInfoPayload) {
m.Act(from, func() {
m._callback(key, info)
})
}
func (m *nodeinfo) _sendRes(key keyArray) {
bs := append([]byte{typeSessionProto, typeProtoNodeInfoResponse}, m._getNodeInfo()...)
_, _ = m.proto.core.pc.WriteTo(bs, iwt.Addr(key[:]))
}
// Admin socket stuff
type GetNodeInfoRequest struct {
Key string `json:"key"`
}
type GetNodeInfoResponse map[string]interface{}
func (m *nodeinfo) nodeInfoAdminHandler(in json.RawMessage) (interface{}, error) {
var req GetNodeInfoRequest
if err := json.Unmarshal(in, &req); err != nil {
return nil, err
}
var key keyArray
var kbs []byte
var err error
if kbs, err = hex.DecodeString(req.Key); err != nil {
return nil, err
}
copy(key[:], kbs)
ch := make(chan []byte, 1)
m.sendReq(nil, key, func(info NodeInfoPayload) {
ch <- info
})
timer := time.NewTimer(6 * time.Second)
defer timer.Stop()
select {
case <-timer.C:
return nil, errors.New("timeout")
case info := <-ch:
var msg json.RawMessage
if err := msg.UnmarshalJSON(info); err != nil {
return nil, err
}
ip := net.IP(address.AddrForKey(kbs)[:])
res := GetNodeInfoResponse{ip.String(): msg}
return res, nil
}
}

349
src/core/proto.go Normal file
View File

@ -0,0 +1,349 @@
package core
import (
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"net"
"time"
iwt "github.com/Arceliar/ironwood/types"
"github.com/Arceliar/phony"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
)
const (
typeDebugDummy = iota
typeDebugGetSelfRequest
typeDebugGetSelfResponse
typeDebugGetPeersRequest
typeDebugGetPeersResponse
typeDebugGetDHTRequest
typeDebugGetDHTResponse
)
type reqInfo struct {
callback func([]byte)
timer *time.Timer // time.AfterFunc cleanup
}
type protoHandler struct {
phony.Inbox
core *Core
nodeinfo nodeinfo
sreqs map[keyArray]*reqInfo
preqs map[keyArray]*reqInfo
dreqs map[keyArray]*reqInfo
}
func (p *protoHandler) init(core *Core) {
p.core = core
p.nodeinfo.init(p)
p.sreqs = make(map[keyArray]*reqInfo)
p.preqs = make(map[keyArray]*reqInfo)
p.dreqs = make(map[keyArray]*reqInfo)
}
func (p *protoHandler) handleProto(from phony.Actor, key keyArray, bs []byte) {
if len(bs) == 0 {
return
}
switch bs[0] {
case typeProtoDummy:
case typeProtoNodeInfoRequest:
p.nodeinfo.handleReq(p, key)
case typeProtoNodeInfoResponse:
p.nodeinfo.handleRes(p, key, bs[1:])
case typeProtoDebug:
p._handleDebug(key, bs[1:])
}
}
func (p *protoHandler) _handleDebug(key keyArray, bs []byte) {
if len(bs) == 0 {
return
}
switch bs[0] {
case typeDebugDummy:
case typeDebugGetSelfRequest:
p._handleGetSelfRequest(key)
case typeDebugGetSelfResponse:
p._handleGetSelfResponse(key, bs[1:])
case typeDebugGetPeersRequest:
p._handleGetPeersRequest(key)
case typeDebugGetPeersResponse:
p._handleGetPeersResponse(key, bs[1:])
case typeDebugGetDHTRequest:
p._handleGetDHTRequest(key)
case typeDebugGetDHTResponse:
p._handleGetDHTResponse(key, bs[1:])
}
}
func (p *protoHandler) sendGetSelfRequest(key keyArray, callback func([]byte)) {
p.Act(nil, func() {
if info := p.sreqs[key]; info != nil {
info.timer.Stop()
delete(p.sreqs, key)
}
info := new(reqInfo)
info.callback = callback
info.timer = time.AfterFunc(time.Minute, func() {
p.Act(nil, func() {
if p.sreqs[key] == info {
delete(p.sreqs, key)
}
})
})
p.sreqs[key] = info
p._sendDebug(key, typeDebugGetSelfRequest, nil)
})
}
func (p *protoHandler) _handleGetSelfRequest(key keyArray) {
self := p.core.GetSelf()
res := map[string]string{
"key": hex.EncodeToString(self.Key[:]),
"coords": fmt.Sprintf("%v", self.Coords),
}
bs, err := json.Marshal(res) // FIXME this puts keys in base64, not hex
if err != nil {
return
}
p._sendDebug(key, typeDebugGetSelfResponse, bs)
}
func (p *protoHandler) _handleGetSelfResponse(key keyArray, bs []byte) {
if info := p.sreqs[key]; info != nil {
info.timer.Stop()
info.callback(bs)
delete(p.sreqs, key)
}
}
func (p *protoHandler) sendGetPeersRequest(key keyArray, callback func([]byte)) {
p.Act(nil, func() {
if info := p.preqs[key]; info != nil {
info.timer.Stop()
delete(p.preqs, key)
}
info := new(reqInfo)
info.callback = callback
info.timer = time.AfterFunc(time.Minute, func() {
p.Act(nil, func() {
if p.preqs[key] == info {
delete(p.preqs, key)
}
})
})
p.preqs[key] = info
p._sendDebug(key, typeDebugGetPeersRequest, nil)
})
}
func (p *protoHandler) _handleGetPeersRequest(key keyArray) {
peers := p.core.GetPeers()
var bs []byte
for _, pinfo := range peers {
tmp := append(bs, pinfo.Key[:]...)
const responseOverhead = 2 // 1 debug type, 1 getpeers type
if uint64(len(tmp))+responseOverhead > p.core.store.maxSessionMTU() {
break
}
bs = tmp
}
p._sendDebug(key, typeDebugGetPeersResponse, bs)
}
func (p *protoHandler) _handleGetPeersResponse(key keyArray, bs []byte) {
if info := p.preqs[key]; info != nil {
info.timer.Stop()
info.callback(bs)
delete(p.preqs, key)
}
}
func (p *protoHandler) sendGetDHTRequest(key keyArray, callback func([]byte)) {
p.Act(nil, func() {
if info := p.dreqs[key]; info != nil {
info.timer.Stop()
delete(p.dreqs, key)
}
info := new(reqInfo)
info.callback = callback
info.timer = time.AfterFunc(time.Minute, func() {
p.Act(nil, func() {
if p.dreqs[key] == info {
delete(p.dreqs, key)
}
})
})
p.dreqs[key] = info
p._sendDebug(key, typeDebugGetDHTRequest, nil)
})
}
func (p *protoHandler) _handleGetDHTRequest(key keyArray) {
dinfos := p.core.GetDHT()
var bs []byte
for _, dinfo := range dinfos {
tmp := append(bs, dinfo.Key[:]...)
const responseOverhead = 2 // 1 debug type, 1 getdht type
if uint64(len(tmp))+responseOverhead > p.core.store.maxSessionMTU() {
break
}
bs = tmp
}
p._sendDebug(key, typeDebugGetDHTResponse, bs)
}
func (p *protoHandler) _handleGetDHTResponse(key keyArray, bs []byte) {
if info := p.dreqs[key]; info != nil {
info.timer.Stop()
info.callback(bs)
delete(p.dreqs, key)
}
}
func (p *protoHandler) _sendDebug(key keyArray, dType uint8, data []byte) {
bs := append([]byte{typeSessionProto, typeProtoDebug, dType}, data...)
_, _ = p.core.pc.WriteTo(bs, iwt.Addr(key[:]))
}
// Admin socket stuff
type DebugGetSelfRequest struct {
Key string `json:"key"`
}
type DebugGetSelfResponse map[string]interface{}
func (p *protoHandler) getSelfHandler(in json.RawMessage) (interface{}, error) {
var req DebugGetSelfRequest
if err := json.Unmarshal(in, &req); err != nil {
return nil, err
}
var key keyArray
var kbs []byte
var err error
if kbs, err = hex.DecodeString(req.Key); err != nil {
return nil, err
}
copy(key[:], kbs)
ch := make(chan []byte, 1)
p.sendGetSelfRequest(key, func(info []byte) {
ch <- info
})
timer := time.NewTimer(6 * time.Second)
defer timer.Stop()
select {
case <-timer.C:
return nil, errors.New("timeout")
case info := <-ch:
var msg json.RawMessage
if err := msg.UnmarshalJSON(info); err != nil {
return nil, err
}
ip := net.IP(address.AddrForKey(kbs)[:])
res := DebugGetSelfResponse{ip.String(): msg}
return res, nil
}
}
type DebugGetPeersRequest struct {
Key string `json:"key"`
}
type DebugGetPeersResponse map[string]interface{}
func (p *protoHandler) getPeersHandler(in json.RawMessage) (interface{}, error) {
var req DebugGetPeersRequest
if err := json.Unmarshal(in, &req); err != nil {
return nil, err
}
var key keyArray
var kbs []byte
var err error
if kbs, err = hex.DecodeString(req.Key); err != nil {
return nil, err
}
copy(key[:], kbs)
ch := make(chan []byte, 1)
p.sendGetPeersRequest(key, func(info []byte) {
ch <- info
})
timer := time.NewTimer(6 * time.Second)
defer timer.Stop()
select {
case <-timer.C:
return nil, errors.New("timeout")
case info := <-ch:
ks := make(map[string][]string)
bs := info
for len(bs) >= len(key) {
ks["keys"] = append(ks["keys"], hex.EncodeToString(bs[:len(key)]))
bs = bs[len(key):]
}
js, err := json.Marshal(ks)
if err != nil {
return nil, err
}
var msg json.RawMessage
if err := msg.UnmarshalJSON(js); err != nil {
return nil, err
}
ip := net.IP(address.AddrForKey(kbs)[:])
res := DebugGetPeersResponse{ip.String(): msg}
return res, nil
}
}
type DebugGetDHTRequest struct {
Key string `json:"key"`
}
type DebugGetDHTResponse map[string]interface{}
func (p *protoHandler) getDHTHandler(in json.RawMessage) (interface{}, error) {
var req DebugGetDHTRequest
if err := json.Unmarshal(in, &req); err != nil {
return nil, err
}
var key keyArray
var kbs []byte
var err error
if kbs, err = hex.DecodeString(req.Key); err != nil {
return nil, err
}
copy(key[:], kbs)
ch := make(chan []byte, 1)
p.sendGetDHTRequest(key, func(info []byte) {
ch <- info
})
timer := time.NewTimer(6 * time.Second)
defer timer.Stop()
select {
case <-timer.C:
return nil, errors.New("timeout")
case info := <-ch:
ks := make(map[string][]string)
bs := info
for len(bs) >= len(key) {
ks["keys"] = append(ks["keys"], hex.EncodeToString(bs[:len(key)]))
bs = bs[len(key):]
}
js, err := json.Marshal(ks)
if err != nil {
return nil, err
}
var msg json.RawMessage
if err := msg.UnmarshalJSON(js); err != nil {
return nil, err
}
ip := net.IP(address.AddrForKey(kbs)[:])
res := DebugGetDHTResponse{ip.String(): msg}
return res, nil
}
}

View File

@ -1,4 +1,4 @@
package yggdrasil
package core
// This sends packets to peers using TCP as a transport
// It's generally better tested than the UDP implementation
@ -19,6 +19,7 @@ import (
"fmt"
"math/rand"
"net"
"net/url"
"strings"
"sync"
"time"
@ -26,11 +27,10 @@ import (
"golang.org/x/net/proxy"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
"github.com/yggdrasil-network/yggdrasil-go/src/util"
//"github.com/yggdrasil-network/yggdrasil-go/src/util"
)
const default_timeout = 6 * time.Second
const tcp_ping_interval = (default_timeout * 2 / 3)
// The TCP listener and information about active TCP connections, to avoid duplication.
type tcp struct {
@ -49,12 +49,12 @@ type tcp struct {
// multicast interfaces.
type TcpListener struct {
Listener net.Listener
upgrade *TcpUpgrade
opts tcpOptions
stop chan struct{}
}
type TcpUpgrade struct {
upgrade func(c net.Conn) (net.Conn, error)
upgrade func(c net.Conn, o *tcpOptions) (net.Conn, error)
name string
}
@ -67,7 +67,7 @@ type tcpOptions struct {
}
func (l *TcpListener) Stop() {
defer func() { recover() }()
defer func() { _ = recover() }()
close(l.stop)
}
@ -75,7 +75,7 @@ func (l *TcpListener) Stop() {
func (t *tcp) setExtraOptions(c net.Conn) {
switch sock := c.(type) {
case *net.TCPConn:
sock.SetNoDelay(true)
_ = sock.SetNoDelay(true)
// TODO something for socks5
default:
}
@ -104,20 +104,15 @@ func (t *tcp) init(l *links) error {
t.listeners = make(map[string]*TcpListener)
t.mutex.Unlock()
t.links.core.config.Mutex.RLock()
defer t.links.core.config.Mutex.RUnlock()
for _, listenaddr := range t.links.core.config.Current.Listen {
switch listenaddr[:6] {
case "tcp://":
if _, err := t.listen(listenaddr[6:], nil); err != nil {
return err
}
case "tls://":
if _, err := t.listen(listenaddr[6:], t.tls.forListener); err != nil {
return err
}
default:
t.links.core.log.Errorln("Failed to add listener: listener", listenaddr, "is not correctly formatted, ignoring")
t.links.core.config.RLock()
defer t.links.core.config.RUnlock()
for _, listenaddr := range t.links.core.config.Listen {
u, err := url.Parse(listenaddr)
if err != nil {
t.links.core.log.Errorln("Failed to parse listener: listener", listenaddr, "is not correctly formatted, ignoring")
}
if _, err := t.listenURL(u, ""); err != nil {
return err
}
}
@ -134,47 +129,31 @@ func (t *tcp) stop() error {
return nil
}
func (t *tcp) reconfigure() {
t.links.core.config.Mutex.RLock()
added := util.Difference(t.links.core.config.Current.Listen, t.links.core.config.Previous.Listen)
deleted := util.Difference(t.links.core.config.Previous.Listen, t.links.core.config.Current.Listen)
t.links.core.config.Mutex.RUnlock()
if len(added) > 0 || len(deleted) > 0 {
for _, a := range added {
switch a[:6] {
case "tcp://":
if _, err := t.listen(a[6:], nil); err != nil {
t.links.core.log.Errorln("Error adding TCP", a[6:], "listener:", err)
}
case "tls://":
if _, err := t.listen(a[6:], t.tls.forListener); err != nil {
t.links.core.log.Errorln("Error adding TLS", a[6:], "listener:", err)
}
default:
t.links.core.log.Errorln("Failed to add listener: listener", a, "is not correctly formatted, ignoring")
}
}
for _, d := range deleted {
if d[:6] != "tcp://" && d[:6] != "tls://" {
t.links.core.log.Errorln("Failed to delete listener: listener", d, "is not correctly formatted, ignoring")
continue
}
t.mutex.Lock()
if listener, ok := t.listeners[d[6:]]; ok {
t.mutex.Unlock()
listener.Stop()
t.links.core.log.Infoln("Stopped TCP listener:", d[6:])
} else {
t.mutex.Unlock()
}
func (t *tcp) listenURL(u *url.URL, sintf string) (*TcpListener, error) {
var listener *TcpListener
var err error
hostport := u.Host // Used for tcp and tls
if len(sintf) != 0 {
host, port, err := net.SplitHostPort(hostport)
if err == nil {
hostport = fmt.Sprintf("[%s%%%s]:%s", host, sintf, port)
}
}
switch u.Scheme {
case "tcp":
listener, err = t.listen(hostport, nil)
case "tls":
listener, err = t.listen(hostport, t.tls.forListener)
default:
t.links.core.log.Errorln("Failed to add listener: listener", u.String(), "is not correctly formatted, ignoring")
}
return listener, err
}
func (t *tcp) listen(listenaddr string, upgrade *TcpUpgrade) (*TcpListener, error) {
var err error
ctx := context.Background()
ctx := t.links.core.ctx
lc := net.ListenConfig{
Control: t.tcpContext,
}
@ -182,7 +161,7 @@ func (t *tcp) listen(listenaddr string, upgrade *TcpUpgrade) (*TcpListener, erro
if err == nil {
l := TcpListener{
Listener: listener,
upgrade: upgrade,
opts: tcpOptions{upgrade: upgrade},
stop: make(chan struct{}),
}
t.waitgroup.Add(1)
@ -206,17 +185,21 @@ func (t *tcp) listener(l *TcpListener, listenaddr string) {
l.Listener.Close()
return
}
callproto := "TCP"
if l.opts.upgrade != nil {
callproto = strings.ToUpper(l.opts.upgrade.name)
}
t.listeners[listenaddr] = l
t.mutex.Unlock()
// And here we go!
defer func() {
t.links.core.log.Infoln("Stopping TCP listener on:", l.Listener.Addr().String())
t.links.core.log.Infoln("Stopping", callproto, "listener on:", l.Listener.Addr().String())
l.Listener.Close()
t.mutex.Lock()
delete(t.listeners, listenaddr)
t.mutex.Unlock()
}()
t.links.core.log.Infoln("Listening for TCP on:", l.Listener.Addr().String())
t.links.core.log.Infoln("Listening for", callproto, "on:", l.Listener.Addr().String())
go func() {
<-l.stop
l.Listener.Close()
@ -235,9 +218,7 @@ func (t *tcp) listener(l *TcpListener, listenaddr string) {
continue
}
t.waitgroup.Add(1)
options := tcpOptions{
upgrade: l.upgrade,
}
options := l.opts
go t.handler(sock, true, options)
}
}
@ -293,12 +274,14 @@ func (t *tcp) call(saddr string, options tcpOptions, sintf string) {
if err != nil {
return
}
conn, err = dialer.Dial("tcp", saddr)
ctx, done := context.WithTimeout(t.links.core.ctx, default_timeout)
conn, err = dialer.(proxy.ContextDialer).DialContext(ctx, "tcp", saddr)
done()
if err != nil {
return
}
t.waitgroup.Add(1)
options.socksPeerAddr = conn.RemoteAddr().String()
options.socksPeerAddr = saddr
if ch := t.handler(conn, false, options); ch != nil {
<-ch
}
@ -315,7 +298,6 @@ func (t *tcp) call(saddr string, options tcpOptions, sintf string) {
}
dialer := net.Dialer{
Control: t.tcpContext,
Timeout: time.Second * 5,
}
if sintf != "" {
dialer.Control = t.getControl(sintf)
@ -361,7 +343,9 @@ func (t *tcp) call(saddr string, options tcpOptions, sintf string) {
}
}
}
conn, err = dialer.Dial("tcp", dst.String())
ctx, done := context.WithTimeout(t.links.core.ctx, default_timeout)
conn, err = dialer.DialContext(ctx, "tcp", dst.String())
done()
if err != nil {
t.links.core.log.Debugf("Failed to dial %s: %s", callproto, err)
return
@ -381,14 +365,12 @@ func (t *tcp) handler(sock net.Conn, incoming bool, options tcpOptions) chan str
var upgraded bool
if options.upgrade != nil {
var err error
if sock, err = options.upgrade.upgrade(sock); err != nil {
if sock, err = options.upgrade.upgrade(sock, &options); err != nil {
t.links.core.log.Errorln("TCP handler upgrade failed:", err)
return nil
}
upgraded = true
}
stream := stream{}
stream.init(sock)
var name, proto, local, remote string
if options.socksProxyAddr != "" {
name = "socks://" + sock.RemoteAddr().String() + "/" + options.socksPeerAddr
@ -423,7 +405,7 @@ func (t *tcp) handler(sock net.Conn, incoming bool, options tcpOptions) chan str
}
}
force := net.ParseIP(strings.Split(remote, "%")[0]).IsLinkLocalUnicast()
link, err := t.links.create(&stream, name, proto, local, remote, incoming, force, options.linkOptions)
link, err := t.links.create(sock, name, proto, local, remote, incoming, force, options.linkOptions)
if err != nil {
t.links.core.log.Println(err)
panic(err)

View File

@ -1,6 +1,6 @@
// +build darwin
package yggdrasil
package core
import (
"syscall"

View File

@ -1,6 +1,6 @@
// +build linux
package yggdrasil
package core
import (
"syscall"
@ -36,7 +36,7 @@ func (t *tcp) getControl(sintf string) func(string, string, syscall.RawConn) err
btd := func(fd uintptr) {
err = unix.BindToDevice(int(fd), sintf)
}
c.Control(btd)
_ = c.Control(btd)
if err != nil {
t.links.core.log.Debugln("Failed to set SO_BINDTODEVICE:", sintf)
}

View File

@ -1,6 +1,6 @@
// +build !darwin,!linux
package yggdrasil
package core
import (
"syscall"

View File

@ -1,4 +1,4 @@
package yggdrasil
package core
import (
"bytes"
@ -9,6 +9,7 @@ import (
"crypto/x509/pkix"
"encoding/hex"
"encoding/pem"
"errors"
"log"
"math/big"
"net"
@ -34,7 +35,7 @@ func (t *tcptls) init(tcp *tcp) {
}
edpriv := make(ed25519.PrivateKey, ed25519.PrivateKeySize)
copy(edpriv[:], tcp.links.core.sigPriv[:])
copy(edpriv[:], tcp.links.core.secret[:])
certBuf := &bytes.Buffer{}
@ -42,7 +43,7 @@ func (t *tcptls) init(tcp *tcp) {
pubtemp := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: hex.EncodeToString(tcp.links.core.sigPub[:]),
CommonName: hex.EncodeToString(tcp.links.core.public[:]),
},
NotBefore: time.Now(),
NotAfter: time.Now().Add(time.Hour * 24 * 365),
@ -76,16 +77,47 @@ func (t *tcptls) init(tcp *tcp) {
}
}
func (t *tcptls) upgradeListener(c net.Conn) (net.Conn, error) {
conn := tls.Server(c, t.config)
func (t *tcptls) configForOptions(options *tcpOptions) *tls.Config {
config := *t.config
config.VerifyPeerCertificate = func(rawCerts [][]byte, _ [][]*x509.Certificate) error {
if len(rawCerts) != 1 {
return errors.New("tls not exactly 1 cert")
}
cert, err := x509.ParseCertificate(rawCerts[0])
if err != nil {
return errors.New("tls failed to parse cert")
}
if cert.PublicKeyAlgorithm != x509.Ed25519 {
return errors.New("tls wrong cert algorithm")
}
pk := cert.PublicKey.(ed25519.PublicKey)
var key keyArray
copy(key[:], pk)
// If options does not have a pinned key, then pin one now
if options.pinnedEd25519Keys == nil {
options.pinnedEd25519Keys = make(map[keyArray]struct{})
options.pinnedEd25519Keys[key] = struct{}{}
}
if _, isIn := options.pinnedEd25519Keys[key]; !isIn {
return errors.New("tls key does not match pinned key")
}
return nil
}
return &config
}
func (t *tcptls) upgradeListener(c net.Conn, options *tcpOptions) (net.Conn, error) {
config := t.configForOptions(options)
conn := tls.Server(c, config)
if err := conn.Handshake(); err != nil {
return c, err
}
return conn, nil
}
func (t *tcptls) upgradeDialer(c net.Conn) (net.Conn, error) {
conn := tls.Client(c, t.config)
func (t *tcptls) upgradeDialer(c net.Conn, options *tcpOptions) (net.Conn, error) {
config := t.configForOptions(options)
conn := tls.Client(c, config)
if err := conn.Handshake(); err != nil {
return c, err
}

23
src/core/types.go Normal file
View File

@ -0,0 +1,23 @@
package core
// Out-of-band packet types
const (
typeKeyDummy = iota // nolint:deadcode,varcheck
typeKeyLookup
typeKeyResponse
)
// In-band packet types
const (
typeSessionDummy = iota // nolint:deadcode,varcheck
typeSessionTraffic
typeSessionProto
)
// Protocol packet types
const (
typeProtoDummy = iota
typeProtoNodeInfoRequest
typeProtoNodeInfoResponse
typeProtoDebug = 255
)

View File

@ -1,22 +1,20 @@
package yggdrasil
package core
// This file contains the version metadata struct
// Used in the initial connection setup and key exchange
// Some of this could arguably go in wire.go instead
import "github.com/yggdrasil-network/yggdrasil-go/src/crypto"
import "crypto/ed25519"
// This is the version-specific metadata exchanged at the start of a connection.
// It must always begin with the 4 bytes "meta" and a wire formatted uint64 major version number.
// The current version also includes a minor version number, and the box/sig/link keys that need to be exchanged to open a connection.
type version_metadata struct {
meta [4]byte
ver uint64 // 1 byte in this version
ver uint8 // 1 byte in this version
// Everything after this point potentially depends on the version number, and is subject to change in future versions
minorVer uint64 // 1 byte in this version
box crypto.BoxPubKey
sig crypto.SigPubKey
link crypto.BoxPubKey
minorVer uint8 // 1 byte in this version
key ed25519.PublicKey
}
// Gets a base metadata with no keys set, but with the correct version numbers.
@ -24,18 +22,16 @@ func version_getBaseMetadata() version_metadata {
return version_metadata{
meta: [4]byte{'m', 'e', 't', 'a'},
ver: 0,
minorVer: 2,
minorVer: 4,
}
}
// Gets the length of the metadata for this version, used to know how many bytes to read from the start of a connection.
func version_getMetaLength() (mlen int) {
mlen += 4 // meta
mlen++ // ver, as long as it's < 127, which it is in this version
mlen++ // minorVer, as long as it's < 127, which it is in this version
mlen += crypto.BoxPubKeyLen // box
mlen += crypto.SigPubKeyLen // sig
mlen += crypto.BoxPubKeyLen // link
mlen += 4 // meta
mlen++ // ver, as long as it's < 127, which it is in this version
mlen++ // minorVer, as long as it's < 127, which it is in this version
mlen += ed25519.PublicKeySize // key
return
}
@ -43,11 +39,9 @@ func version_getMetaLength() (mlen int) {
func (m *version_metadata) encode() []byte {
bs := make([]byte, 0, version_getMetaLength())
bs = append(bs, m.meta[:]...)
bs = append(bs, wire_encode_uint64(m.ver)...)
bs = append(bs, wire_encode_uint64(m.minorVer)...)
bs = append(bs, m.box[:]...)
bs = append(bs, m.sig[:]...)
bs = append(bs, m.link[:]...)
bs = append(bs, m.ver)
bs = append(bs, m.minorVer)
bs = append(bs, m.key[:]...)
if len(bs) != version_getMetaLength() {
panic("Inconsistent metadata length")
}
@ -56,20 +50,14 @@ func (m *version_metadata) encode() []byte {
// Decodes version metadata from its wire format into the struct.
func (m *version_metadata) decode(bs []byte) bool {
switch {
case !wire_chop_slice(m.meta[:], &bs):
return false
case !wire_chop_uint64(&m.ver, &bs):
return false
case !wire_chop_uint64(&m.minorVer, &bs):
return false
case !wire_chop_slice(m.box[:], &bs):
return false
case !wire_chop_slice(m.sig[:], &bs):
return false
case !wire_chop_slice(m.link[:], &bs):
if len(bs) != version_getMetaLength() {
return false
}
offset := 0
offset += copy(m.meta[:], bs[offset:])
m.ver, offset = bs[offset], offset+1
m.minorVer, offset = bs[offset], offset+1
m.key = append([]byte(nil), bs[offset:]...)
return true
}

View File

@ -1,311 +0,0 @@
// Package crypto is a wrapper around packages under golang.org/x/crypto/, particulaly curve25519, ed25519, and nacl/box.
// This is used to avoid explicitly importing and using these packages throughout yggdrasil.
// It also includes the all-important NodeID and TreeID types, which are used to identify nodes in the DHT and in the spanning tree's root selection algorithm, respectively.
package crypto
/*
This part of the package wraps crypto operations needed elsewhere
In particular, it exposes key generation for ed25519 and nacl box
It also defines NodeID and TreeID as hashes of keys, and wraps hash functions
*/
import (
"crypto/rand"
"crypto/sha512"
"encoding/hex"
"sync"
"golang.org/x/crypto/curve25519"
"golang.org/x/crypto/ed25519"
"golang.org/x/crypto/nacl/box"
)
////////////////////////////////////////////////////////////////////////////////
// NodeID and TreeID
// NodeIDLen is the length (in bytes) of a NodeID.
const NodeIDLen = sha512.Size
// TreeIDLen is the length (in bytes) of a TreeID.
const TreeIDLen = sha512.Size
// handleLen is the length (in bytes) of a Handle.
const handleLen = 8
// NodeID is how a yggdrasil node is identified in the DHT, and is used to derive IPv6 addresses and subnets in the main executable. It is a sha512sum hash of the node's BoxPubKey
type NodeID [NodeIDLen]byte
// TreeID is how a yggdrasil node is identified in the root selection algorithm used to construct the spanning tree.
type TreeID [TreeIDLen]byte
type Handle [handleLen]byte
func (n *NodeID) String() string {
return hex.EncodeToString(n[:])
}
// Network returns "nodeid" nearly always right now.
func (n *NodeID) Network() string {
return "nodeid"
}
// PrefixLength returns the number of bits set in a masked NodeID.
func (n *NodeID) PrefixLength() int {
var len int
for i, v := range *n {
_, _ = i, v
if v == 0xff {
len += 8
continue
}
for v&0x80 != 0 {
len++
v <<= 1
}
if v != 0 {
return -1
}
for i++; i < NodeIDLen; i++ {
if n[i] != 0 {
return -1
}
}
break
}
return len
}
// GetNodeID returns the NodeID associated with a BoxPubKey.
func GetNodeID(pub *BoxPubKey) *NodeID {
h := sha512.Sum512(pub[:])
return (*NodeID)(&h)
}
// GetTreeID returns the TreeID associated with a BoxPubKey
func GetTreeID(pub *SigPubKey) *TreeID {
h := sha512.Sum512(pub[:])
return (*TreeID)(&h)
}
// NewHandle returns a new (cryptographically random) Handle, used by the session code to identify which session an incoming packet is associated with.
func NewHandle() *Handle {
var h Handle
_, err := rand.Read(h[:])
if err != nil {
panic(err)
}
return &h
}
////////////////////////////////////////////////////////////////////////////////
// Signatures
// SigPubKeyLen is the length of a SigPubKey in bytes.
const SigPubKeyLen = ed25519.PublicKeySize
// SigPrivKeyLen is the length of a SigPrivKey in bytes.
const SigPrivKeyLen = ed25519.PrivateKeySize
// SigLen is the length of SigBytes.
const SigLen = ed25519.SignatureSize
// SigPubKey is a public ed25519 signing key.
type SigPubKey [SigPubKeyLen]byte
// SigPrivKey is a private ed25519 signing key.
type SigPrivKey [SigPrivKeyLen]byte
// SigBytes is an ed25519 signature.
type SigBytes [SigLen]byte
// NewSigKeys generates a public/private ed25519 key pair.
func NewSigKeys() (*SigPubKey, *SigPrivKey) {
var pub SigPubKey
var priv SigPrivKey
pubSlice, privSlice, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
panic(err)
}
copy(pub[:], pubSlice)
copy(priv[:], privSlice)
return &pub, &priv
}
// Sign returns the SigBytes signing a message.
func Sign(priv *SigPrivKey, msg []byte) *SigBytes {
var sig SigBytes
sigSlice := ed25519.Sign(priv[:], msg)
copy(sig[:], sigSlice)
return &sig
}
// Verify returns true if the provided signature matches the key and message.
func Verify(pub *SigPubKey, msg []byte, sig *SigBytes) bool {
// Should sig be an array instead of a slice?...
// It's fixed size, but
return ed25519.Verify(pub[:], msg, sig[:])
}
// Public returns the SigPubKey associated with this SigPrivKey.
func (p SigPrivKey) Public() SigPubKey {
priv := make(ed25519.PrivateKey, ed25519.PrivateKeySize)
copy(priv[:], p[:])
pub := priv.Public().(ed25519.PublicKey)
var sigPub SigPubKey
copy(sigPub[:], pub[:])
return sigPub
}
////////////////////////////////////////////////////////////////////////////////
// NaCl-like crypto "box" (curve25519+xsalsa20+poly1305)
// BoxPubKeyLen is the length of a BoxPubKey in bytes.
const BoxPubKeyLen = 32
// BoxPrivKeyLen is the length of a BoxPrivKey in bytes.
const BoxPrivKeyLen = 32
// BoxSharedKeyLen is the length of a BoxSharedKey in bytes.
const BoxSharedKeyLen = 32
// BoxNonceLen is the length of a BoxNonce in bytes.
const BoxNonceLen = 24
// BoxOverhead is the length of the overhead from boxing something.
const BoxOverhead = box.Overhead
// BoxPubKey is a NaCl-like "box" public key (curve25519+xsalsa20+poly1305).
type BoxPubKey [BoxPubKeyLen]byte
// BoxPrivKey is a NaCl-like "box" private key (curve25519+xsalsa20+poly1305).
type BoxPrivKey [BoxPrivKeyLen]byte
// BoxSharedKey is a NaCl-like "box" shared key (curve25519+xsalsa20+poly1305).
type BoxSharedKey [BoxSharedKeyLen]byte
// BoxNonce is the nonce used in NaCl-like crypto "box" operations (curve25519+xsalsa20+poly1305), and must not be reused for different messages encrypted using the same BoxSharedKey.
type BoxNonce [BoxNonceLen]byte
// String returns a string representation of the "box" key.
func (k BoxPubKey) String() string {
return hex.EncodeToString(k[:])
}
// Network returns "curve25519" for "box" keys.
func (n BoxPubKey) Network() string {
return "curve25519"
}
// NewBoxKeys generates a new pair of public/private crypto box keys.
func NewBoxKeys() (*BoxPubKey, *BoxPrivKey) {
pubBytes, privBytes, err := box.GenerateKey(rand.Reader)
if err != nil {
panic(err)
}
pub := (*BoxPubKey)(pubBytes)
priv := (*BoxPrivKey)(privBytes)
return pub, priv
}
// GetSharedKey returns the shared key derived from your private key and the destination's public key.
func GetSharedKey(myPrivKey *BoxPrivKey,
othersPubKey *BoxPubKey) *BoxSharedKey {
var shared [BoxSharedKeyLen]byte
priv := (*[BoxPrivKeyLen]byte)(myPrivKey)
pub := (*[BoxPubKeyLen]byte)(othersPubKey)
box.Precompute(&shared, pub, priv)
return (*BoxSharedKey)(&shared)
}
// pool is used internally by BoxOpen and BoxSeal to avoid allocating temporary space
var pool = sync.Pool{New: func() interface{} { return []byte(nil) }}
// BoxOpen returns a message and true if it successfully opens a crypto box using the provided shared key and nonce.
// The boxed input slice's backing array is reused for the unboxed output when possible.
func BoxOpen(shared *BoxSharedKey,
boxed []byte,
nonce *BoxNonce) ([]byte, bool) {
s := (*[BoxSharedKeyLen]byte)(shared)
n := (*[BoxNonceLen]byte)(nonce)
temp := append(pool.Get().([]byte), boxed...)
unboxed, success := box.OpenAfterPrecomputation(boxed[:0], temp, n, s)
pool.Put(temp[:0])
return unboxed, success
}
// BoxSeal seals a crypto box using the provided shared key, returning the box and the nonce needed to decrypt it.
// If nonce is nil, a random BoxNonce will be used and returned.
// If nonce is non-nil, then nonce.Increment() will be called before using it, and the incremented BoxNonce is what is returned.
// The unboxed input slice's backing array is reused for the boxed output when possible.
func BoxSeal(shared *BoxSharedKey, unboxed []byte, nonce *BoxNonce) ([]byte, *BoxNonce) {
if nonce == nil {
nonce = NewBoxNonce()
}
nonce.Increment()
s := (*[BoxSharedKeyLen]byte)(shared)
n := (*[BoxNonceLen]byte)(nonce)
temp := append(pool.Get().([]byte), unboxed...)
boxed := box.SealAfterPrecomputation(unboxed[:0], temp, n, s)
pool.Put(temp[:0])
return boxed, nonce
}
// NewBoxNonce generates a (cryptographically) random BoxNonce.
func NewBoxNonce() *BoxNonce {
var nonce BoxNonce
_, err := rand.Read(nonce[:])
for ; err == nil && nonce[0] == 0xff; _, err = rand.Read(nonce[:]) {
// Make sure nonce isn't too high
// This is just to make rollover unlikely to happen
// Rollover is fine, but it may kill the session and force it to reopen
}
if err != nil {
panic(err)
}
return &nonce
}
// Increment adds 2 to a BoxNonce, which is useful if one node intends to send only with odd BoxNonce values, and the other only with even BoxNonce values.
func (n *BoxNonce) Increment() {
oldNonce := *n
n[len(n)-1] += 2
for i := len(n) - 2; i >= 0; i-- {
if n[i+1] < oldNonce[i+1] {
n[i]++
}
}
}
// Public returns the BoxPubKey associated with this BoxPrivKey.
func (p BoxPrivKey) Public() BoxPubKey {
var boxPub [BoxPubKeyLen]byte
var boxPriv [BoxPrivKeyLen]byte
copy(boxPriv[:BoxPrivKeyLen], p[:BoxPrivKeyLen])
curve25519.ScalarBaseMult(&boxPub, &boxPriv)
return boxPub
}
// Minus is the result of subtracting the provided BoNonce from this BoxNonce, bounded at +- 64.
// It's primarily used to determine if a new BoxNonce is higher than the last known BoxNonce from a crypto session, and by how much.
// This is used in the machinery that makes sure replayed packets can't keep a session open indefinitely or stuck using old/bad information about a node.
func (n *BoxNonce) Minus(m *BoxNonce) int64 {
diff := int64(0)
for idx := range n {
diff *= 256
diff += int64(n[idx]) - int64(m[idx])
if diff > 64 {
diff = 64
}
if diff < -64 {
diff = -64
}
}
return diff
}

View File

@ -1,6 +1,8 @@
package defaults
import "github.com/yggdrasil-network/yggdrasil-go/src/types"
import "github.com/yggdrasil-network/yggdrasil-go/src/config"
type MulticastInterfaceConfig = config.MulticastInterfaceConfig
// Defines which parameters are expected by default for configuration on a
// specific platform. These values are populated in the relevant defaults_*.go
@ -13,10 +15,30 @@ type platformDefaultParameters struct {
DefaultConfigFile string
// Multicast interfaces
DefaultMulticastInterfaces []string
DefaultMulticastInterfaces []MulticastInterfaceConfig
// TUN/TAP
MaximumIfMTU types.MTU
DefaultIfMTU types.MTU
MaximumIfMTU uint64
DefaultIfMTU uint64
DefaultIfName string
}
// Generates default configuration and returns a pointer to the resulting
// NodeConfig. This is used when outputting the -genconf parameter and also when
// using -autoconf.
func GenerateConfig() *config.NodeConfig {
// Create a node configuration and populate it.
cfg := new(config.NodeConfig)
cfg.NewKeys()
cfg.Listen = []string{}
cfg.AdminListen = GetDefaults().DefaultAdminListen
cfg.Peers = []string{}
cfg.InterfacePeers = map[string][]string{}
cfg.AllowedPublicKeys = []string{}
cfg.MulticastInterfaces = GetDefaults().DefaultMulticastInterfaces
cfg.IfName = GetDefaults().DefaultIfName
cfg.IfMTU = GetDefaults().DefaultIfMTU
cfg.NodeInfoPrivacy = false
return cfg
}

View File

@ -13,9 +13,9 @@ func GetDefaults() platformDefaultParameters {
DefaultConfigFile: "/etc/yggdrasil.conf",
// Multicast interfaces
DefaultMulticastInterfaces: []string{
"en.*",
"bridge.*",
DefaultMulticastInterfaces: []MulticastInterfaceConfig{
{Regex: "en.*", Beacon: true, Listen: true},
{Regex: "bridge.*", Beacon: true, Listen: true},
},
// TUN/TAP

View File

@ -13,8 +13,8 @@ func GetDefaults() platformDefaultParameters {
DefaultConfigFile: "/usr/local/etc/yggdrasil.conf",
// Multicast interfaces
DefaultMulticastInterfaces: []string{
".*",
DefaultMulticastInterfaces: []MulticastInterfaceConfig{
{Regex: ".*", Beacon: true, Listen: true},
},
// TUN/TAP

View File

@ -13,8 +13,8 @@ func GetDefaults() platformDefaultParameters {
DefaultConfigFile: "/etc/yggdrasil.conf",
// Multicast interfaces
DefaultMulticastInterfaces: []string{
".*",
DefaultMulticastInterfaces: []MulticastInterfaceConfig{
{Regex: ".*", Beacon: true, Listen: true},
},
// TUN/TAP

View File

@ -13,8 +13,8 @@ func GetDefaults() platformDefaultParameters {
DefaultConfigFile: "/etc/yggdrasil.conf",
// Multicast interfaces
DefaultMulticastInterfaces: []string{
".*",
DefaultMulticastInterfaces: []MulticastInterfaceConfig{
{Regex: ".*", Beacon: true, Listen: true},
},
// TUN/TAP

View File

@ -13,8 +13,8 @@ func GetDefaults() platformDefaultParameters {
DefaultConfigFile: "/etc/yggdrasil.conf",
// Multicast interfaces
DefaultMulticastInterfaces: []string{
".*",
DefaultMulticastInterfaces: []MulticastInterfaceConfig{
{Regex: ".*", Beacon: true, Listen: true},
},
// TUN/TAP

View File

@ -13,8 +13,8 @@ func GetDefaults() platformDefaultParameters {
DefaultConfigFile: "C:\\Program Files\\Yggdrasil\\yggdrasil.conf",
// Multicast interfaces
DefaultMulticastInterfaces: []string{
".*",
DefaultMulticastInterfaces: []MulticastInterfaceConfig{
{Regex: ".*", Beacon: true, Listen: true},
},
// TUN/TAP

View File

@ -1,20 +0,0 @@
package module
import (
"github.com/gologme/log"
"github.com/yggdrasil-network/yggdrasil-go/src/admin"
"github.com/yggdrasil-network/yggdrasil-go/src/config"
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
)
// Module is an interface that defines which functions must be supported by a
// given Yggdrasil module.
type Module interface {
Init(core *yggdrasil.Core, state *config.NodeState, log *log.Logger, options interface{}) error
Start() error
Stop() error
UpdateConfig(config *config.NodeConfig)
SetupAdminHandlers(a *admin.AdminSocket)
IsStarted() bool
}

View File

@ -1,13 +1,34 @@
package multicast
import "github.com/yggdrasil-network/yggdrasil-go/src/admin"
import (
"encoding/json"
"github.com/yggdrasil-network/yggdrasil-go/src/admin"
)
type GetMulticastInterfacesRequest struct{}
type GetMulticastInterfacesResponse struct {
Interfaces []string `json:"multicast_interfaces"`
}
func (m *Multicast) getMulticastInterfacesHandler(req *GetMulticastInterfacesRequest, res *GetMulticastInterfacesResponse) error {
res.Interfaces = []string{}
for _, v := range m.Interfaces() {
res.Interfaces = append(res.Interfaces, v.Name)
}
return nil
}
func (m *Multicast) SetupAdminHandlers(a *admin.AdminSocket) {
a.AddHandler("getMulticastInterfaces", []string{}, func(in admin.Info) (admin.Info, error) {
var intfs []string
for _, v := range m.Interfaces() {
intfs = append(intfs, v.Name)
_ = a.AddHandler("getMulticastInterfaces", []string{}, func(in json.RawMessage) (interface{}, error) {
req := &GetMulticastInterfacesRequest{}
res := &GetMulticastInterfacesResponse{}
if err := json.Unmarshal(in, &req); err != nil {
return nil, err
}
return admin.Info{"multicast_interfaces": intfs}, nil
if err := m.getMulticastInterfacesHandler(req, res); err != nil {
return nil, err
}
return res, nil
})
}

View File

@ -1,9 +1,14 @@
package multicast
import (
"bytes"
"context"
"crypto/ed25519"
"encoding/binary"
"encoding/hex"
"fmt"
"net"
"net/url"
"regexp"
"time"
@ -11,7 +16,7 @@ import (
"github.com/gologme/log"
"github.com/yggdrasil-network/yggdrasil-go/src/config"
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
"github.com/yggdrasil-network/yggdrasil-go/src/core"
"golang.org/x/net/ipv6"
)
@ -21,37 +26,38 @@ import (
// automatically.
type Multicast struct {
phony.Inbox
core *yggdrasil.Core
config *config.NodeState
core *core.Core
config *config.NodeConfig
log *log.Logger
sock *ipv6.PacketConn
groupAddr string
listeners map[string]*listenerInfo
listenPort uint16
isOpen bool
_interfaces map[string]interfaceInfo
}
type interfaceInfo struct {
iface net.Interface
addrs []net.Addr
iface net.Interface
addrs []net.Addr
beacon bool
listen bool
port uint16
}
type listenerInfo struct {
listener *yggdrasil.TcpListener
listener *core.TcpListener
time time.Time
interval time.Duration
port uint16
}
// Init prepares the multicast interface for use.
func (m *Multicast) Init(core *yggdrasil.Core, state *config.NodeState, log *log.Logger, options interface{}) error {
func (m *Multicast) Init(core *core.Core, nc *config.NodeConfig, log *log.Logger, options interface{}) error {
m.core = core
m.config = state
m.config = nc
m.log = log
m.listeners = make(map[string]*listenerInfo)
m._interfaces = make(map[string]interfaceInfo)
current := m.config.GetCurrent()
m.listenPort = current.LinkLocalTCPPort
m.groupAddr = "[ff02::114]:9001"
return nil
}
@ -72,7 +78,9 @@ func (m *Multicast) _start() error {
if m.isOpen {
return fmt.Errorf("multicast module is already started")
}
if len(m.config.GetCurrent().MulticastInterfaces) == 0 {
m.config.RLock()
defer m.config.RUnlock()
if len(m.config.MulticastInterfaces) == 0 {
return nil
}
m.log.Infoln("Starting multicast module")
@ -89,7 +97,7 @@ func (m *Multicast) _start() error {
return err
}
m.sock = ipv6.NewPacketConn(conn)
if err = m.sock.SetControlMessage(ipv6.FlagDst, true); err != nil {
if err = m.sock.SetControlMessage(ipv6.FlagDst, true); err != nil { // nolint:staticcheck
// Windows can't set this flag, so we need to handle it in other ways
}
@ -129,45 +137,17 @@ func (m *Multicast) _stop() error {
return nil
}
// UpdateConfig updates the multicast module with the provided config.NodeConfig
// and then signals the various module goroutines to reconfigure themselves if
// needed.
func (m *Multicast) UpdateConfig(config *config.NodeConfig) {
m.Act(nil, func() { m._updateConfig(config) })
}
func (m *Multicast) _updateConfig(config *config.NodeConfig) {
m.log.Infoln("Reloading multicast configuration...")
if m.isOpen {
if len(config.MulticastInterfaces) == 0 || config.LinkLocalTCPPort != m.listenPort {
if err := m._stop(); err != nil {
m.log.Errorln("Error stopping multicast module:", err)
}
}
}
m.config.Replace(*config)
m.listenPort = config.LinkLocalTCPPort
if !m.isOpen && len(config.MulticastInterfaces) > 0 {
if err := m._start(); err != nil {
m.log.Errorln("Error starting multicast module:", err)
}
}
m.log.Debugln("Reloaded multicast configuration successfully")
}
func (m *Multicast) _updateInterfaces() {
interfaces := make(map[string]interfaceInfo)
intfs := m.getAllowedInterfaces()
for _, intf := range intfs {
addrs, err := intf.Addrs()
interfaces := m.getAllowedInterfaces()
for name, info := range interfaces {
addrs, err := info.iface.Addrs()
if err != nil {
m.log.Warnf("Failed up get addresses for interface %s: %s", intf.Name, err)
m.log.Warnf("Failed up get addresses for interface %s: %s", name, err)
delete(interfaces, name)
continue
}
interfaces[intf.Name] = interfaceInfo{
iface: intf,
addrs: addrs,
}
info.addrs = addrs
interfaces[name] = info
}
m._interfaces = interfaces
}
@ -183,11 +163,10 @@ func (m *Multicast) Interfaces() map[string]net.Interface {
}
// getAllowedInterfaces returns the currently known/enabled multicast interfaces.
func (m *Multicast) getAllowedInterfaces() map[string]net.Interface {
interfaces := make(map[string]net.Interface)
func (m *Multicast) getAllowedInterfaces() map[string]interfaceInfo {
interfaces := make(map[string]interfaceInfo)
// Get interface expressions from config
current := m.config.GetCurrent()
exprs := current.MulticastInterfaces
ifcfgs := m.config.MulticastInterfaces
// Ask the system for network interfaces
allifaces, err := net.Interfaces()
if err != nil {
@ -209,15 +188,24 @@ func (m *Multicast) getAllowedInterfaces() map[string]net.Interface {
// Ignore point-to-point interfaces
continue
}
for _, expr := range exprs {
for _, ifcfg := range ifcfgs {
// Compile each regular expression
e, err := regexp.Compile(expr)
e, err := regexp.Compile(ifcfg.Regex)
if err != nil {
panic(err)
}
// Does the interface match the regular expression? Store it if so
if e.MatchString(iface.Name) {
interfaces[iface.Name] = iface
if ifcfg.Beacon || ifcfg.Listen {
info := interfaceInfo{
iface: iface,
beacon: ifcfg.Beacon,
listen: ifcfg.Listen,
port: ifcfg.Port,
}
interfaces[iface.Name] = info
}
break
}
}
}
@ -293,42 +281,55 @@ func (m *Multicast) _announce() {
if !addrIP.IsLinkLocalUnicast() {
continue
}
// Join the multicast group
m.sock.JoinGroup(&iface, groupAddr)
if info.listen {
// Join the multicast group, so we can listen for beacons
_ = m.sock.JoinGroup(&iface, groupAddr)
}
if !info.beacon {
break // Don't send multicast beacons or accept incoming connections
}
// Try and see if we already have a TCP listener for this interface
var info *listenerInfo
var linfo *listenerInfo
if nfo, ok := m.listeners[iface.Name]; !ok || nfo.listener.Listener == nil {
// No listener was found - let's create one
listenaddr := fmt.Sprintf("[%s%%%s]:%d", addrIP, iface.Name, m.listenPort)
if li, err := m.core.ListenTCP(listenaddr); err == nil {
urlString := fmt.Sprintf("tls://[%s]:%d", addrIP, info.port)
u, err := url.Parse(urlString)
if err != nil {
panic(err)
}
if li, err := m.core.Listen(u, iface.Name); err == nil {
m.log.Debugln("Started multicasting on", iface.Name)
// Store the listener so that we can stop it later if needed
info = &listenerInfo{listener: li, time: time.Now()}
m.listeners[iface.Name] = info
linfo = &listenerInfo{listener: li, time: time.Now()}
m.listeners[iface.Name] = linfo
} else {
m.log.Warnln("Not multicasting on", iface.Name, "due to error:", err)
}
} else {
// An existing listener was found
info = m.listeners[iface.Name]
linfo = m.listeners[iface.Name]
}
// Make sure nothing above failed for some reason
if info == nil {
if linfo == nil {
continue
}
if time.Since(info.time) < info.interval {
if time.Since(linfo.time) < linfo.interval {
continue
}
// Get the listener details and construct the multicast beacon
lladdr := info.listener.Listener.Addr().String()
lladdr := linfo.listener.Listener.Addr().String()
if a, err := net.ResolveTCPAddr("tcp6", lladdr); err == nil {
a.Zone = ""
destAddr.Zone = iface.Name
msg := []byte(a.String())
m.sock.WriteTo(msg, nil, destAddr)
msg := append([]byte(nil), m.core.GetSelf().Key...)
msg = append(msg, a.IP...)
pbs := make([]byte, 2)
binary.BigEndian.PutUint16(pbs, uint16(a.Port))
msg = append(msg, pbs...)
_, _ = m.sock.WriteTo(msg, nil, destAddr)
}
if info.interval.Seconds() < 15 {
info.interval += time.Second
if linfo.interval.Seconds() < 15 {
linfo.interval += time.Second
}
break
}
@ -363,22 +364,42 @@ func (m *Multicast) listen() {
continue
}
}
anAddr := string(bs[:nBytes])
addr, err := net.ResolveTCPAddr("tcp6", anAddr)
if nBytes < ed25519.PublicKeySize {
continue
}
var key ed25519.PublicKey
key = append(key, bs[:ed25519.PublicKeySize]...)
if bytes.Equal(key, m.core.GetSelf().Key) {
continue // don't bother trying to peer with self
}
begin := ed25519.PublicKeySize
end := nBytes - 2
if end <= begin {
continue // malformed address
}
ip := bs[begin:end]
port := binary.BigEndian.Uint16(bs[end:nBytes])
anAddr := net.TCPAddr{IP: ip, Port: int(port)}
addr, err := net.ResolveTCPAddr("tcp6", anAddr.String())
if err != nil {
continue
}
from := fromAddr.(*net.UDPAddr)
if addr.IP.String() != from.IP.String() {
if !from.IP.Equal(addr.IP) {
continue
}
var interfaces map[string]interfaceInfo
phony.Block(m, func() {
interfaces = m._interfaces
})
if _, ok := interfaces[from.Zone]; ok {
if info, ok := interfaces[from.Zone]; ok && info.listen {
addr.Zone = ""
if err := m.core.CallPeer("tcp://"+addr.String(), from.Zone); err != nil {
pin := fmt.Sprintf("/?key=%s", hex.EncodeToString(key))
u, err := url.Parse("tls://" + addr.String() + pin)
if err != nil {
m.log.Debugln("Call from multicast failed, parse error:", addr.String(), err)
}
if err := m.core.CallPeer(u, from.Zone); err != nil {
m.log.Debugln("Call from multicast failed:", err)
}
}

View File

@ -29,8 +29,6 @@ import (
"golang.org/x/sys/unix"
)
var awdlGoroutineStarted bool
func (m *Multicast) _multicastStarted() {
if !m.isOpen {
return

View File

@ -1,114 +1,37 @@
package tuntap
import (
"encoding/hex"
"errors"
"fmt"
"net"
"encoding/json"
"github.com/yggdrasil-network/yggdrasil-go/src/admin"
)
func (t *TunAdapter) SetupAdminHandlers(a *admin.AdminSocket) {
a.AddHandler("getTunTap", []string{}, func(in admin.Info) (r admin.Info, e error) {
defer func() {
if err := recover(); err != nil {
r = admin.Info{"none": admin.Info{}}
e = nil
}
}()
type GetTUNRequest struct{}
type GetTUNResponse map[string]TUNEntry
return admin.Info{
t.Name(): admin.Info{
"mtu": t.mtu,
},
}, nil
})
/*
// TODO: rewrite this as I'm fairly sure it doesn't work right on many
// platforms anyway, but it may require changes to Water
a.AddHandler("setTunTap", []string{"name", "[tap_mode]", "[mtu]"}, func(in Info) (Info, error) {
// Set sane defaults
iftapmode := defaults.GetDefaults().DefaultIfTAPMode
ifmtu := defaults.GetDefaults().DefaultIfMTU
// Has TAP mode been specified?
if tap, ok := in["tap_mode"]; ok {
iftapmode = tap.(bool)
}
// Check we have enough params for MTU
if mtu, ok := in["mtu"]; ok {
if mtu.(float64) >= 1280 && ifmtu <= defaults.GetDefaults().MaximumIfMTU {
ifmtu = int(in["mtu"].(float64))
}
}
// Start the TUN adapter
if err := a.startTunWithMTU(in["name"].(string), iftapmode, ifmtu); err != nil {
return Info{}, errors.New("Failed to configure adapter")
} else {
return Info{
a.core.router.tun.iface.Name(): Info{
"tap_mode": a.core.router.tun.iface.IsTAP(),
"mtu": ifmtu,
},
}, nil
}
})
*/
a.AddHandler("getTunnelRouting", []string{}, func(in admin.Info) (admin.Info, error) {
return admin.Info{"enabled": t.ckr.isEnabled()}, nil
})
a.AddHandler("setTunnelRouting", []string{"enabled"}, func(in admin.Info) (admin.Info, error) {
enabled := false
if e, ok := in["enabled"].(bool); ok {
enabled = e
type TUNEntry struct {
MTU uint64 `json:"mtu"`
}
func (t *TunAdapter) getTUNHandler(req *GetTUNRequest, res *GetTUNResponse) error {
*res = GetTUNResponse{
t.Name(): TUNEntry{
MTU: t.MTU(),
},
}
return nil
}
func (t *TunAdapter) SetupAdminHandlers(a *admin.AdminSocket) {
_ = a.AddHandler("getTunTap", []string{}, func(in json.RawMessage) (interface{}, error) {
req := &GetTUNRequest{}
res := &GetTUNResponse{}
if err := json.Unmarshal(in, &req); err != nil {
return nil, err
}
t.ckr.setEnabled(enabled)
return admin.Info{"enabled": enabled}, nil
})
a.AddHandler("addLocalSubnet", []string{"subnet"}, func(in admin.Info) (admin.Info, error) {
if err := t.ckr.addLocalSubnet(in["subnet"].(string)); err == nil {
return admin.Info{"added": []string{in["subnet"].(string)}}, nil
if err := t.getTUNHandler(req, res); err != nil {
return nil, err
}
return admin.Info{"not_added": []string{in["subnet"].(string)}}, errors.New("Failed to add source subnet")
})
a.AddHandler("addRemoteSubnet", []string{"subnet", "box_pub_key"}, func(in admin.Info) (admin.Info, error) {
if err := t.ckr.addRemoteSubnet(in["subnet"].(string), in["box_pub_key"].(string)); err == nil {
return admin.Info{"added": []string{fmt.Sprintf("%s via %s", in["subnet"].(string), in["box_pub_key"].(string))}}, nil
}
return admin.Info{"not_added": []string{fmt.Sprintf("%s via %s", in["subnet"].(string), in["box_pub_key"].(string))}}, errors.New("Failed to add route")
})
a.AddHandler("getSourceSubnets", []string{}, func(in admin.Info) (admin.Info, error) {
var subnets []string
getSourceSubnets := func(snets []net.IPNet) {
for _, subnet := range snets {
subnets = append(subnets, subnet.String())
}
}
getSourceSubnets(t.ckr.ipv4locals)
getSourceSubnets(t.ckr.ipv6locals)
return admin.Info{"source_subnets": subnets}, nil
})
a.AddHandler("getRoutes", []string{}, func(in admin.Info) (admin.Info, error) {
routes := make(admin.Info)
getRoutes := func(ckrs []cryptokey_route) {
for _, ckr := range ckrs {
routes[ckr.subnet.String()] = hex.EncodeToString(ckr.destination[:])
}
}
getRoutes(t.ckr.ipv4remotes)
getRoutes(t.ckr.ipv6remotes)
return admin.Info{"routes": routes}, nil
})
a.AddHandler("removeLocalSubnet", []string{"subnet"}, func(in admin.Info) (admin.Info, error) {
if err := t.ckr.removeLocalSubnet(in["subnet"].(string)); err == nil {
return admin.Info{"removed": []string{in["subnet"].(string)}}, nil
}
return admin.Info{"not_removed": []string{in["subnet"].(string)}}, errors.New("Failed to remove source subnet")
})
a.AddHandler("removeRemoteSubnet", []string{"subnet", "box_pub_key"}, func(in admin.Info) (admin.Info, error) {
if err := t.ckr.removeRemoteSubnet(in["subnet"].(string), in["box_pub_key"].(string)); err == nil {
return admin.Info{"removed": []string{fmt.Sprintf("%s via %s", in["subnet"].(string), in["box_pub_key"].(string))}}, nil
}
return admin.Info{"not_removed": []string{fmt.Sprintf("%s via %s", in["subnet"].(string), in["box_pub_key"].(string))}}, errors.New("Failed to remove route")
return res, nil
})
}

View File

@ -1,430 +0,0 @@
package tuntap
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"net"
"sort"
"sync"
"sync/atomic"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
)
// This module implements crypto-key routing, similar to Wireguard, where we
// allow traffic for non-Yggdrasil ranges to be routed over Yggdrasil.
type cryptokey struct {
tun *TunAdapter
enabled atomic.Value // bool
ipv4remotes []cryptokey_route
ipv6remotes []cryptokey_route
ipv4cache map[address.Address]cryptokey_route
ipv6cache map[address.Address]cryptokey_route
ipv4locals []net.IPNet
ipv6locals []net.IPNet
mutexremotes sync.RWMutex
mutexcaches sync.RWMutex
mutexlocals sync.RWMutex
}
type cryptokey_route struct {
subnet net.IPNet
destination crypto.BoxPubKey
}
// Initialise crypto-key routing. This must be done before any other CKR calls.
func (c *cryptokey) init(tun *TunAdapter) {
c.tun = tun
c.configure()
}
// Configure the CKR routes. This should only ever be ran by the TUN/TAP actor.
func (c *cryptokey) configure() {
current := c.tun.config.GetCurrent()
// Set enabled/disabled state
c.setEnabled(current.TunnelRouting.Enable)
// Clear out existing routes
c.mutexremotes.Lock()
c.ipv6remotes = make([]cryptokey_route, 0)
c.ipv4remotes = make([]cryptokey_route, 0)
c.mutexremotes.Unlock()
// Add IPv6 routes
for ipv6, pubkey := range current.TunnelRouting.IPv6RemoteSubnets {
if err := c.addRemoteSubnet(ipv6, pubkey); err != nil {
c.tun.log.Errorln("Error adding CKR IPv6 remote subnet:", err)
}
}
// Add IPv4 routes
for ipv4, pubkey := range current.TunnelRouting.IPv4RemoteSubnets {
if err := c.addRemoteSubnet(ipv4, pubkey); err != nil {
c.tun.log.Errorln("Error adding CKR IPv4 remote subnet:", err)
}
}
// Clear out existing sources
c.mutexlocals.Lock()
c.ipv6locals = make([]net.IPNet, 0)
c.ipv4locals = make([]net.IPNet, 0)
c.mutexlocals.Unlock()
// Add IPv6 sources
c.ipv6locals = make([]net.IPNet, 0)
for _, source := range current.TunnelRouting.IPv6LocalSubnets {
if err := c.addLocalSubnet(source); err != nil {
c.tun.log.Errorln("Error adding CKR IPv6 local subnet:", err)
}
}
// Add IPv4 sources
c.ipv4locals = make([]net.IPNet, 0)
for _, source := range current.TunnelRouting.IPv4LocalSubnets {
if err := c.addLocalSubnet(source); err != nil {
c.tun.log.Errorln("Error adding CKR IPv4 local subnet:", err)
}
}
// Wipe the caches
c.mutexcaches.Lock()
c.ipv4cache = make(map[address.Address]cryptokey_route, 0)
c.ipv6cache = make(map[address.Address]cryptokey_route, 0)
c.mutexcaches.Unlock()
}
// Enable or disable crypto-key routing.
func (c *cryptokey) setEnabled(enabled bool) {
c.enabled.Store(enabled)
}
// Check if crypto-key routing is enabled.
func (c *cryptokey) isEnabled() bool {
enabled, ok := c.enabled.Load().(bool)
return ok && enabled
}
// Check whether the given address (with the address length specified in bytes)
// matches either the current node's address, the node's routed subnet or the
// list of subnets specified in ipv4locals/ipv6locals.
func (c *cryptokey) isValidLocalAddress(addr address.Address, addrlen int) bool {
c.mutexlocals.RLock()
defer c.mutexlocals.RUnlock()
// Does it match a configured CKR source?
if c.isEnabled() {
ip := net.IP(addr[:addrlen])
// Build our references to the routing sources
var routingsources *[]net.IPNet
// Check if the prefix is IPv4 or IPv6
if addrlen == net.IPv6len {
routingsources = &c.ipv6locals
} else if addrlen == net.IPv4len {
routingsources = &c.ipv4locals
} else {
return false
}
for _, subnet := range *routingsources {
if subnet.Contains(ip) {
return true
}
}
}
// Doesn't match any of the above
return false
}
// Adds a source subnet, which allows traffic with these source addresses to
// be tunnelled using crypto-key routing.
func (c *cryptokey) addLocalSubnet(cidr string) error {
c.mutexlocals.Lock()
defer c.mutexlocals.Unlock()
// Is the CIDR we've been given valid?
_, ipnet, err := net.ParseCIDR(cidr)
if err != nil {
return err
}
// Get the prefix length and size
_, prefixsize := ipnet.Mask.Size()
// Build our references to the routing sources
var routingsources *[]net.IPNet
// Check if the prefix is IPv4 or IPv6
if prefixsize == net.IPv6len*8 {
routingsources = &c.ipv6locals
} else if prefixsize == net.IPv4len*8 {
routingsources = &c.ipv4locals
} else {
return errors.New("unexpected prefix size")
}
// Check if we already have this CIDR
for _, subnet := range *routingsources {
if subnet.String() == ipnet.String() {
return errors.New("local subnet already configured")
}
}
// Add the source subnet
*routingsources = append(*routingsources, *ipnet)
c.tun.log.Infoln("Added CKR local subnet", cidr)
return nil
}
// Adds a destination route for the given CIDR to be tunnelled to the node
// with the given BoxPubKey.
func (c *cryptokey) addRemoteSubnet(cidr string, dest string) error {
c.mutexremotes.Lock()
c.mutexcaches.Lock()
defer c.mutexremotes.Unlock()
defer c.mutexcaches.Unlock()
// Is the CIDR we've been given valid?
ipaddr, ipnet, err := net.ParseCIDR(cidr)
if err != nil {
return err
}
// Get the prefix length and size
_, prefixsize := ipnet.Mask.Size()
// Build our references to the routing table and cache
var routingtable *[]cryptokey_route
var routingcache *map[address.Address]cryptokey_route
// Check if the prefix is IPv4 or IPv6
if prefixsize == net.IPv6len*8 {
routingtable = &c.ipv6remotes
routingcache = &c.ipv6cache
} else if prefixsize == net.IPv4len*8 {
routingtable = &c.ipv4remotes
routingcache = &c.ipv4cache
} else {
return errors.New("unexpected prefix size")
}
// Is the route an Yggdrasil destination?
var addr address.Address
var snet address.Subnet
copy(addr[:], ipaddr)
copy(snet[:], ipnet.IP)
if addr.IsValid() || snet.IsValid() {
return errors.New("can't specify Yggdrasil destination as crypto-key route")
}
// Do we already have a route for this subnet?
for _, route := range *routingtable {
if route.subnet.String() == ipnet.String() {
return fmt.Errorf("remote subnet already exists for %s", cidr)
}
}
// Decode the public key
if bpk, err := hex.DecodeString(dest); err != nil {
return err
} else if len(bpk) != crypto.BoxPubKeyLen {
return fmt.Errorf("incorrect key length for %s", dest)
} else {
// Add the new crypto-key route
var key crypto.BoxPubKey
copy(key[:], bpk)
*routingtable = append(*routingtable, cryptokey_route{
subnet: *ipnet,
destination: key,
})
// Sort so most specific routes are first
sort.Slice(*routingtable, func(i, j int) bool {
im, _ := (*routingtable)[i].subnet.Mask.Size()
jm, _ := (*routingtable)[j].subnet.Mask.Size()
return im > jm
})
// Clear the cache as this route might change future routing
// Setting an empty slice keeps the memory whereas nil invokes GC
for k := range *routingcache {
delete(*routingcache, k)
}
c.tun.log.Infoln("Added CKR remote subnet", cidr)
return nil
}
}
// Looks up the most specific route for the given address (with the address
// length specified in bytes) from the crypto-key routing table. An error is
// returned if the address is not suitable or no route was found.
func (c *cryptokey) getPublicKeyForAddress(addr address.Address, addrlen int) (crypto.BoxPubKey, error) {
// Check if the address is a valid Yggdrasil address - if so it
// is exempt from all CKR checking
if addr.IsValid() {
return crypto.BoxPubKey{}, errors.New("cannot look up CKR for Yggdrasil addresses")
}
// Build our references to the routing table and cache
var routingtable *[]cryptokey_route
var routingcache *map[address.Address]cryptokey_route
// Check if the prefix is IPv4 or IPv6
if addrlen == net.IPv6len {
routingcache = &c.ipv6cache
} else if addrlen == net.IPv4len {
routingcache = &c.ipv4cache
} else {
return crypto.BoxPubKey{}, errors.New("unexpected prefix size")
}
// Check if there's a cache entry for this addr
c.mutexcaches.RLock()
if route, ok := (*routingcache)[addr]; ok {
c.mutexcaches.RUnlock()
return route.destination, nil
}
c.mutexcaches.RUnlock()
c.mutexremotes.RLock()
defer c.mutexremotes.RUnlock()
// Check if the prefix is IPv4 or IPv6
if addrlen == net.IPv6len {
routingtable = &c.ipv6remotes
} else if addrlen == net.IPv4len {
routingtable = &c.ipv4remotes
} else {
return crypto.BoxPubKey{}, errors.New("unexpected prefix size")
}
// No cache was found - start by converting the address into a net.IP
ip := make(net.IP, addrlen)
copy(ip[:addrlen], addr[:])
// Check if we have a route. At this point c.ipv6remotes should be
// pre-sorted so that the most specific routes are first
for _, route := range *routingtable {
// Does this subnet match the given IP?
if route.subnet.Contains(ip) {
c.mutexcaches.Lock()
defer c.mutexcaches.Unlock()
// Check if the routing cache is above a certain size, if it is evict
// a random entry so we can make room for this one. We take advantage
// of the fact that the iteration order is random here
for k := range *routingcache {
if len(*routingcache) < 1024 {
break
}
delete(*routingcache, k)
}
// Cache the entry for future packets to get a faster lookup
(*routingcache)[addr] = route
// Return the boxPubKey
return route.destination, nil
}
}
// No route was found if we got to this point
return crypto.BoxPubKey{}, fmt.Errorf("no route to %s", ip.String())
}
// Removes a source subnet, which allows traffic with these source addresses to
// be tunnelled using crypto-key routing.
func (c *cryptokey) removeLocalSubnet(cidr string) error {
c.mutexlocals.Lock()
defer c.mutexlocals.Unlock()
// Is the CIDR we've been given valid?
_, ipnet, err := net.ParseCIDR(cidr)
if err != nil {
return err
}
// Get the prefix length and size
_, prefixsize := ipnet.Mask.Size()
// Build our references to the routing sources
var routingsources *[]net.IPNet
// Check if the prefix is IPv4 or IPv6
if prefixsize == net.IPv6len*8 {
routingsources = &c.ipv6locals
} else if prefixsize == net.IPv4len*8 {
routingsources = &c.ipv4locals
} else {
return errors.New("unexpected prefix size")
}
// Check if we already have this CIDR
for idx, subnet := range *routingsources {
if subnet.String() == ipnet.String() {
*routingsources = append((*routingsources)[:idx], (*routingsources)[idx+1:]...)
c.tun.log.Infoln("Removed CKR local subnet", cidr)
return nil
}
}
return errors.New("local subnet not found")
}
// Removes a destination route for the given CIDR to be tunnelled to the node
// with the given BoxPubKey.
func (c *cryptokey) removeRemoteSubnet(cidr string, dest string) error {
c.mutexremotes.Lock()
c.mutexcaches.Lock()
defer c.mutexremotes.Unlock()
defer c.mutexcaches.Unlock()
// Is the CIDR we've been given valid?
_, ipnet, err := net.ParseCIDR(cidr)
if err != nil {
return err
}
// Get the prefix length and size
_, prefixsize := ipnet.Mask.Size()
// Build our references to the routing table and cache
var routingtable *[]cryptokey_route
var routingcache *map[address.Address]cryptokey_route
// Check if the prefix is IPv4 or IPv6
if prefixsize == net.IPv6len*8 {
routingtable = &c.ipv6remotes
routingcache = &c.ipv6cache
} else if prefixsize == net.IPv4len*8 {
routingtable = &c.ipv4remotes
routingcache = &c.ipv4cache
} else {
return errors.New("unexpected prefix size")
}
// Decode the public key
bpk, err := hex.DecodeString(dest)
if err != nil {
return err
} else if len(bpk) != crypto.BoxPubKeyLen {
return fmt.Errorf("incorrect key length for %s", dest)
}
netStr := ipnet.String()
for idx, route := range *routingtable {
if bytes.Equal(route.destination[:], bpk) && route.subnet.String() == netStr {
*routingtable = append((*routingtable)[:idx], (*routingtable)[idx+1:]...)
for k := range *routingcache {
delete(*routingcache, k)
}
c.tun.log.Infof("Removed CKR remote subnet %s via %s\n", cidr, dest)
return nil
}
}
return fmt.Errorf("route does not exists for %s", cidr)
}

View File

@ -1,227 +0,0 @@
package tuntap
import (
"bytes"
"errors"
"time"
"github.com/Arceliar/phony"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/yggdrasil-network/yggdrasil-go/src/util"
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
"golang.org/x/net/icmp"
"golang.org/x/net/ipv6"
)
const tunConnTimeout = 2 * time.Minute
type tunConn struct {
phony.Inbox
tun *TunAdapter
conn *yggdrasil.Conn
addr address.Address
snet address.Subnet
stop chan struct{}
alive *time.Timer // From calling time.AfterFunc
}
func (s *tunConn) close() {
s.tun.Act(s, s._close_from_tun)
}
func (s *tunConn) _close_from_tun() {
go s.conn.Close() // Just in case it blocks on actor operations
delete(s.tun.addrToConn, s.addr)
delete(s.tun.subnetToConn, s.snet)
func() {
defer func() { recover() }()
close(s.stop) // Closes reader/writer goroutines
}()
}
func (s *tunConn) _read(bs []byte) (err error) {
select {
case <-s.stop:
err = errors.New("session was already closed")
return
default:
}
if len(bs) == 0 {
err = errors.New("read packet with 0 size")
return
}
ipv4 := len(bs) > 20 && bs[0]&0xf0 == 0x40
ipv6 := len(bs) > 40 && bs[0]&0xf0 == 0x60
isCGA := true
// Check source addresses
switch {
case ipv6 && bs[8] == 0x02 && bytes.Equal(s.addr[:16], bs[8:24]): // source
case ipv6 && bs[8] == 0x03 && bytes.Equal(s.snet[:8], bs[8:16]): // source
default:
isCGA = false
}
// Check destination addresses
switch {
case ipv6 && bs[24] == 0x02 && bytes.Equal(s.tun.addr[:16], bs[24:40]): // destination
case ipv6 && bs[24] == 0x03 && bytes.Equal(s.tun.subnet[:8], bs[24:32]): // destination
default:
isCGA = false
}
// Decide how to handle the packet
var skip bool
switch {
case isCGA: // Allowed
case s.tun.ckr.isEnabled() && (ipv4 || ipv6):
var srcAddr address.Address
var dstAddr address.Address
var addrlen int
if ipv4 {
copy(srcAddr[:], bs[12:16])
copy(dstAddr[:], bs[16:20])
addrlen = 4
}
if ipv6 {
copy(srcAddr[:], bs[8:24])
copy(dstAddr[:], bs[24:40])
addrlen = 16
}
if !s.tun.ckr.isValidLocalAddress(dstAddr, addrlen) {
// The destination address isn't in our CKR allowed range
skip = true
} else if key, err := s.tun.ckr.getPublicKeyForAddress(srcAddr, addrlen); err == nil {
if *s.conn.RemoteAddr().(*crypto.BoxPubKey) == key {
// This is the one allowed CKR case, where source and destination addresses are both good
} else {
// The CKR key associated with this address doesn't match the sender's NodeID
skip = true
}
} else {
// We have no CKR route for this source address
skip = true
}
default:
skip = true
}
if skip {
err = errors.New("address not allowed")
return
}
s.tun.writer.writeFrom(s, bs)
s.stillAlive()
return
}
func (s *tunConn) writeFrom(from phony.Actor, bs []byte) {
s.Act(from, func() {
s._write(bs)
})
}
func (s *tunConn) _write(bs []byte) (err error) {
select {
case <-s.stop:
err = errors.New("session was already closed")
return
default:
}
v4 := len(bs) > 20 && bs[0]&0xf0 == 0x40
v6 := len(bs) > 40 && bs[0]&0xf0 == 0x60
isCGA := true
// Check source addresses
switch {
case v6 && bs[8] == 0x02 && bytes.Equal(s.tun.addr[:16], bs[8:24]): // source
case v6 && bs[8] == 0x03 && bytes.Equal(s.tun.subnet[:8], bs[8:16]): // source
default:
isCGA = false
}
// Check destiantion addresses
switch {
case v6 && bs[24] == 0x02 && bytes.Equal(s.addr[:16], bs[24:40]): // destination
case v6 && bs[24] == 0x03 && bytes.Equal(s.snet[:8], bs[24:32]): // destination
default:
isCGA = false
}
// Decide how to handle the packet
var skip bool
switch {
case isCGA: // Allowed
case s.tun.ckr.isEnabled() && (v4 || v6):
var srcAddr address.Address
var dstAddr address.Address
var addrlen int
if v4 {
copy(srcAddr[:], bs[12:16])
copy(dstAddr[:], bs[16:20])
addrlen = 4
}
if v6 {
copy(srcAddr[:], bs[8:24])
copy(dstAddr[:], bs[24:40])
addrlen = 16
}
if !s.tun.ckr.isValidLocalAddress(srcAddr, addrlen) {
// The source address isn't in our CKR allowed range
skip = true
} else if key, err := s.tun.ckr.getPublicKeyForAddress(dstAddr, addrlen); err == nil {
if *s.conn.RemoteAddr().(*crypto.BoxPubKey) == key {
// This is the one allowed CKR case, where source and destination addresses are both good
} else {
// The CKR key associated with this address doesn't match the sender's NodeID
skip = true
}
} else {
// We have no CKR route for this destination address... why do we have the packet in the first place?
skip = true
}
default:
skip = true
}
if skip {
err = errors.New("address not allowed")
return
}
msg := yggdrasil.FlowKeyMessage{
FlowKey: util.GetFlowKey(bs),
Message: bs,
}
s.conn.WriteFrom(s, msg, func(err error) {
if err == nil {
// No point in wasting resources to send back an error if there was none
return
}
s.Act(s.conn, func() {
if e, eok := err.(yggdrasil.ConnError); !eok {
if e.Closed() {
s.tun.log.Debugln(s.conn.String(), "TUN/TAP generic write debug:", err)
} else {
s.tun.log.Errorln(s.conn.String(), "TUN/TAP generic write error:", err)
}
} else if e.PacketTooBig() {
// TODO: This currently isn't aware of IPv4 for CKR
ptb := &icmp.PacketTooBig{
MTU: int(e.PacketMaximumSize()),
Data: bs[:900],
}
if packet, err := CreateICMPv6(bs[8:24], bs[24:40], ipv6.ICMPTypePacketTooBig, 0, ptb); err == nil {
s.tun.writer.writeFrom(s, packet)
}
} else {
if e.Closed() {
s.tun.log.Debugln(s.conn.String(), "TUN/TAP conn write debug:", err)
} else {
s.tun.log.Errorln(s.conn.String(), "TUN/TAP conn write error:", err)
}
}
})
})
s.stillAlive()
return
}
func (s *tunConn) stillAlive() {
if s.alive != nil {
s.alive.Stop()
}
s.alive = time.AfterFunc(tunConnTimeout, s.close)
}

View File

@ -1,220 +1,47 @@
package tuntap
import (
"github.com/yggdrasil-network/yggdrasil-go/src/address"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
"golang.org/x/net/icmp"
"golang.org/x/net/ipv6"
"github.com/Arceliar/phony"
)
const TUN_OFFSET_BYTES = 4
type tunWriter struct {
phony.Inbox
tun *TunAdapter
buf [TUN_OFFSET_BYTES + 65536]byte
}
func (w *tunWriter) writeFrom(from phony.Actor, b []byte) {
w.Act(from, func() {
w._write(b)
})
}
// write is pretty loose with the memory safety rules, e.g. it assumes it can
// read w.tun.iface.IsTap() safely
func (w *tunWriter) _write(b []byte) {
var written int
var err error
n := len(b)
if n == 0 {
return
}
temp := append(w.buf[:TUN_OFFSET_BYTES], b...)
written, err = w.tun.iface.Write(temp, TUN_OFFSET_BYTES)
if err != nil {
w.tun.Act(w, func() {
if !w.tun.isOpen {
w.tun.log.Errorln("TUN iface write error:", err)
func (tun *TunAdapter) read() {
var buf [TUN_OFFSET_BYTES + 65535]byte
for {
n, err := tun.iface.Read(buf[:], TUN_OFFSET_BYTES)
if n <= TUN_OFFSET_BYTES || err != nil {
tun.log.Errorln("Error reading TUN:", err)
ferr := tun.iface.Flush()
if ferr != nil {
tun.log.Errorln("Unable to flush packets:", ferr)
}
})
}
if written != n+TUN_OFFSET_BYTES {
// FIXME some platforms return the wrong number of bytes written, causing error spam
//w.tun.log.Errorln("TUN iface write mismatch:", written, "bytes written vs", n+TUN_OFFSET_BYTES, "bytes given")
return
}
begin := TUN_OFFSET_BYTES
end := begin + n
bs := buf[begin:end]
if _, err := tun.core.Write(bs); err != nil {
tun.log.Debugln("Unable to send packet:", err)
}
}
}
type tunReader struct {
phony.Inbox
tun *TunAdapter
buf [TUN_OFFSET_BYTES + 65536]byte
}
func (r *tunReader) _read() {
// Get a slice to store the packet in
// Wait for a packet to be delivered to us through the TUN adapter
n, err := r.tun.iface.Read(r.buf[:], TUN_OFFSET_BYTES)
if n <= TUN_OFFSET_BYTES || err != nil {
r.tun.log.Errorln("Error reading TUN:", err)
ferr := r.tun.iface.Flush()
if ferr != nil {
r.tun.log.Errorln("Unable to flush packets:", ferr)
}
} else {
bs := make([]byte, n, n+crypto.BoxOverhead) // extra capacity for later...
copy(bs, r.buf[TUN_OFFSET_BYTES:n+TUN_OFFSET_BYTES])
r.tun.handlePacketFrom(r, bs, err)
}
if err == nil {
// Now read again
r.Act(nil, r._read)
}
}
func (tun *TunAdapter) handlePacketFrom(from phony.Actor, packet []byte, err error) {
tun.Act(from, func() {
tun._handlePacket(packet, err)
})
}
// does the work of reading a packet and sending it to the correct tunConn
func (tun *TunAdapter) _handlePacket(recvd []byte, err error) {
if err != nil {
tun.log.Errorln("TUN iface read error:", err)
return
}
// Offset the buffer from now on so that we can ignore ethernet frames if
// they are present
bs := recvd[:]
// Check if the packet is long enough to detect if it's an ICMP packet or not
if len(bs) < 7 {
tun.log.Traceln("TUN iface read undersized unknown packet, length:", len(bs))
return
}
// From the IP header, work out what our source and destination addresses
// and node IDs are. We will need these in order to work out where to send
// the packet
var dstAddr address.Address
var dstSnet address.Subnet
var addrlen int
n := len(bs)
// Check the IP protocol - if it doesn't match then we drop the packet and
// do nothing with it
if bs[0]&0xf0 == 0x60 {
// Check if we have a fully-sized IPv6 header
if len(bs) < 40 {
tun.log.Traceln("TUN iface read undersized ipv6 packet, length:", len(bs))
func (tun *TunAdapter) write() {
var buf [TUN_OFFSET_BYTES + 65535]byte
for {
bs := buf[TUN_OFFSET_BYTES:]
n, err := tun.core.Read(bs)
if err != nil {
tun.log.Errorln("Exiting tun writer due to core read error:", err)
return
}
// Check the packet size
if n-tun_IPv6_HEADER_LENGTH != 256*int(bs[4])+int(bs[5]) {
return
if !tun.isEnabled {
continue // Nothing to do, the tun isn't enabled
}
// IPv6 address
addrlen = 16
copy(dstAddr[:addrlen], bs[24:])
copy(dstSnet[:addrlen/2], bs[24:])
} else if bs[0]&0xf0 == 0x40 {
// Check if we have a fully-sized IPv4 header
if len(bs) < 20 {
tun.log.Traceln("TUN iface read undersized ipv4 packet, length:", len(bs))
return
bs = buf[:TUN_OFFSET_BYTES+n]
if _, err = tun.iface.Write(bs, TUN_OFFSET_BYTES); err != nil {
tun.Act(nil, func() {
if !tun.isOpen {
tun.log.Errorln("TUN iface write error:", err)
}
})
}
// Check the packet size
if n != 256*int(bs[2])+int(bs[3]) {
return
}
// IPv4 address
addrlen = 4
copy(dstAddr[:addrlen], bs[16:])
} else {
// Unknown address length or protocol, so drop the packet and ignore it
tun.log.Traceln("Unknown packet type, dropping")
return
}
if tun.ckr.isEnabled() {
if addrlen != 16 || (!dstAddr.IsValid() && !dstSnet.IsValid()) {
if key, err := tun.ckr.getPublicKeyForAddress(dstAddr, addrlen); err == nil {
// A public key was found, get the node ID for the search
dstNodeID := crypto.GetNodeID(&key)
dstAddr = *address.AddrForNodeID(dstNodeID)
dstSnet = *address.SubnetForNodeID(dstNodeID)
addrlen = 16
}
}
}
if addrlen != 16 || (!dstAddr.IsValid() && !dstSnet.IsValid()) {
// Couldn't find this node's ygg IP
dlen := len(bs)
if dlen > 900 {
dlen = 900
}
ptb := &icmp.DstUnreach{
Data: bs[:dlen],
}
if packet, err := CreateICMPv6(bs[8:24], bs[24:40], ipv6.ICMPTypeDestinationUnreachable, 0, ptb); err == nil {
tun.writer.writeFrom(nil, packet)
}
return
}
// Do we have an active connection for this node address?
var dstString string
session, isIn := tun.addrToConn[dstAddr]
if !isIn || session == nil {
session, isIn = tun.subnetToConn[dstSnet]
if !isIn || session == nil {
// Neither an address nor a subnet mapping matched, therefore populate
// the node ID and mask to commence a search
if dstAddr.IsValid() {
dstString = dstAddr.GetNodeIDLengthString()
} else {
dstString = dstSnet.GetNodeIDLengthString()
}
}
}
// If we don't have a connection then we should open one
if !isIn || session == nil {
// Check we haven't been given empty node ID, really this shouldn't ever
// happen but just to be sure...
if dstString == "" {
panic("Given empty dstString - this shouldn't happen")
}
_, known := tun.dials[dstString]
tun.dials[dstString] = append(tun.dials[dstString], bs)
for len(tun.dials[dstString]) > 32 {
tun.dials[dstString] = tun.dials[dstString][1:]
}
if !known {
go func() {
conn, err := tun.dialer.Dial("nodeid", dstString)
tun.Act(nil, func() {
packets := tun.dials[dstString]
delete(tun.dials, dstString)
if err != nil {
return
}
// We've been given a connection so prepare the session wrapper
var tc *tunConn
if tc, err = tun._wrap(conn.(*yggdrasil.Conn)); err != nil {
// Something went wrong when storing the connection, typically that
// something already exists for this address or subnet
tun.log.Debugln("TUN iface wrap:", err)
return
}
for _, packet := range packets {
tc.writeFrom(nil, packet)
}
})
}()
}
}
// If we have a connection now, try writing to it
if isIn && session != nil {
session.writeFrom(tun, bs)
}
}

View File

@ -9,7 +9,6 @@ package tuntap
// TODO: Don't block in reader on writes that are pending searches
import (
"encoding/hex"
"errors"
"fmt"
"net"
@ -22,51 +21,33 @@ import (
"github.com/yggdrasil-network/yggdrasil-go/src/address"
"github.com/yggdrasil-network/yggdrasil-go/src/config"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/yggdrasil-network/yggdrasil-go/src/core"
"github.com/yggdrasil-network/yggdrasil-go/src/defaults"
"github.com/yggdrasil-network/yggdrasil-go/src/types"
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
)
type MTU = types.MTU
const tun_IPv6_HEADER_LENGTH = 40
type MTU uint16
// TunAdapter represents a running TUN interface and extends the
// yggdrasil.Adapter type. In order to use the TUN adapter with Yggdrasil, you
// should pass this object to the yggdrasil.SetRouterAdapter() function before
// calling yggdrasil.Start().
type TunAdapter struct {
core *yggdrasil.Core
writer tunWriter
reader tunReader
config *config.NodeState
core *core.Core
config *config.NodeConfig
log *log.Logger
reconfigure chan chan error
listener *yggdrasil.Listener
dialer *yggdrasil.Dialer
addr address.Address
subnet address.Subnet
ckr cryptokey
icmpv6 ICMPv6
mtu MTU
mtu uint64
iface tun.Device
phony.Inbox // Currently only used for _handlePacket from the reader, TODO: all the stuff that currently needs a mutex below
//mutex sync.RWMutex // Protects the below
addrToConn map[address.Address]*tunConn
subnetToConn map[address.Subnet]*tunConn
dials map[string][][]byte // Buffer of packets to send after dialing finishes
isOpen bool
}
type TunOptions struct {
Listener *yggdrasil.Listener
Dialer *yggdrasil.Dialer
isOpen bool
isEnabled bool // Used by the writer to drop sessionTraffic if not enabled
}
// Gets the maximum supported MTU for the platform based on the defaults in
// defaults.GetDefaults().
func getSupportedMTU(mtu MTU) MTU {
func getSupportedMTU(mtu uint64) uint64 {
if mtu < 1280 {
return 1280
}
@ -88,7 +69,7 @@ func (tun *TunAdapter) Name() string {
// MTU gets the adapter's MTU. This can range between 1280 and 65535, although
// the maximum value is determined by your platform. The returned value will
// never exceed that of MaximumMTU().
func (tun *TunAdapter) MTU() MTU {
func (tun *TunAdapter) MTU() uint64 {
return getSupportedMTU(tun.mtu)
}
@ -99,34 +80,23 @@ func DefaultName() string {
// DefaultMTU gets the default TUN interface MTU for your platform. This can
// be as high as MaximumMTU(), depending on platform, but is never lower than 1280.
func DefaultMTU() MTU {
func DefaultMTU() uint64 {
return defaults.GetDefaults().DefaultIfMTU
}
// MaximumMTU returns the maximum supported TUN interface MTU for your
// platform. This can be as high as 65535, depending on platform, but is never
// lower than 1280.
func MaximumMTU() MTU {
func MaximumMTU() uint64 {
return defaults.GetDefaults().MaximumIfMTU
}
// Init initialises the TUN module. You must have acquired a Listener from
// the Yggdrasil core before this point and it must not be in use elsewhere.
func (tun *TunAdapter) Init(core *yggdrasil.Core, config *config.NodeState, log *log.Logger, options interface{}) error {
tunoptions, ok := options.(TunOptions)
if !ok {
return fmt.Errorf("invalid options supplied to TunAdapter module")
}
func (tun *TunAdapter) Init(core *core.Core, config *config.NodeConfig, log *log.Logger, options interface{}) error {
tun.core = core
tun.config = config
tun.log = log
tun.listener = tunoptions.Listener
tun.dialer = tunoptions.Dialer
tun.addrToConn = make(map[address.Address]*tunConn)
tun.subnetToConn = make(map[address.Subnet]*tunConn)
tun.dials = make(map[string][][]byte)
tun.writer.tun = tun
tun.reader.tun = tun
return nil
}
@ -144,35 +114,36 @@ func (tun *TunAdapter) _start() error {
if tun.isOpen {
return errors.New("TUN module is already started")
}
current := tun.config.GetCurrent()
if tun.config == nil || tun.listener == nil || tun.dialer == nil {
tun.config.RLock()
defer tun.config.RUnlock()
if tun.config == nil {
return errors.New("no configuration available to TUN")
}
var boxPub crypto.BoxPubKey
boxPubHex, err := hex.DecodeString(current.EncryptionPublicKey)
if err != nil {
return err
}
copy(boxPub[:], boxPubHex)
nodeID := crypto.GetNodeID(&boxPub)
tun.addr = *address.AddrForNodeID(nodeID)
tun.subnet = *address.SubnetForNodeID(nodeID)
pk := tun.core.PublicKey()
tun.addr = *address.AddrForKey(pk)
tun.subnet = *address.SubnetForKey(pk)
addr := fmt.Sprintf("%s/%d", net.IP(tun.addr[:]).String(), 8*len(address.GetPrefix())-1)
if current.IfName == "none" || current.IfName == "dummy" {
if tun.config.IfName == "none" || tun.config.IfName == "dummy" {
tun.log.Debugln("Not starting TUN as ifname is none or dummy")
tun.isEnabled = false
go tun.write()
return nil
}
if err := tun.setup(current.IfName, addr, current.IfMTU); err != nil {
mtu := tun.config.IfMTU
if tun.core.MaxMTU() < mtu {
mtu = tun.core.MaxMTU()
}
if err := tun.setup(tun.config.IfName, addr, mtu); err != nil {
return err
}
if tun.MTU() != current.IfMTU {
tun.log.Warnf("Warning: Interface MTU %d automatically adjusted to %d (supported range is 1280-%d)", current.IfMTU, tun.MTU(), MaximumMTU())
if tun.MTU() != mtu {
tun.log.Warnf("Warning: Interface MTU %d automatically adjusted to %d (supported range is 1280-%d)", tun.config.IfMTU, tun.MTU(), MaximumMTU())
}
tun.core.SetMaximumSessionMTU(tun.MTU())
tun.core.SetMTU(tun.MTU())
tun.isOpen = true
go tun.handler()
tun.reader.Act(nil, tun.reader._read) // Start the reader
tun.ckr.init(tun)
tun.isEnabled = true
go tun.read()
go tun.write()
return nil
}
@ -204,80 +175,3 @@ func (tun *TunAdapter) _stop() error {
}
return nil
}
// UpdateConfig updates the TUN module with the provided config.NodeConfig
// and then signals the various module goroutines to reconfigure themselves if
// needed.
func (tun *TunAdapter) UpdateConfig(config *config.NodeConfig) {
tun.log.Debugln("Reloading TUN configuration...")
// Replace the active configuration with the supplied one
tun.config.Replace(*config)
// If the MTU has changed in the TUN module then this is where we would
// tell the router so that updated session pings can be sent. However, we
// don't currently update the MTU of the adapter once it has been created so
// this doesn't actually happen in the real world yet.
// tun.core.SetMaximumSessionMTU(...)
// Notify children about the configuration change
tun.Act(nil, tun.ckr.configure)
}
func (tun *TunAdapter) handler() error {
for {
// Accept the incoming connection
conn, err := tun.listener.Accept()
if err != nil {
tun.log.Errorln("TUN connection accept error:", err)
return err
}
phony.Block(tun, func() {
if _, err := tun._wrap(conn.(*yggdrasil.Conn)); err != nil {
// Something went wrong when storing the connection, typically that
// something already exists for this address or subnet
tun.log.Debugln("TUN handler wrap:", err)
}
})
}
}
func (tun *TunAdapter) _wrap(conn *yggdrasil.Conn) (c *tunConn, err error) {
// Prepare a session wrapper for the given connection
s := tunConn{
tun: tun,
conn: conn,
stop: make(chan struct{}),
}
c = &s
// Get the remote address and subnet of the other side
remotePubKey := conn.RemoteAddr().(*crypto.BoxPubKey)
remoteNodeID := crypto.GetNodeID(remotePubKey)
s.addr = *address.AddrForNodeID(remoteNodeID)
s.snet = *address.SubnetForNodeID(remoteNodeID)
// Work out if this is already a destination we already know about
atc, aok := tun.addrToConn[s.addr]
stc, sok := tun.subnetToConn[s.snet]
// If we know about a connection for this destination already then assume it
// is no longer valid and close it
if aok {
atc._close_from_tun()
err = errors.New("replaced connection for address")
} else if sok {
stc._close_from_tun()
err = errors.New("replaced connection for subnet")
}
// Save the session wrapper so that we can look it up quickly next time
// we receive a packet through the interface for this address
tun.addrToConn[s.addr] = &s
tun.subnetToConn[s.snet] = &s
// Set the read callback and start the timeout
conn.SetReadCallback(func(bs []byte) {
s.Act(conn, func() {
s._read(bs)
})
})
s.Act(nil, s.stillAlive)
// Return
return c, err
}

View File

@ -73,14 +73,14 @@ type in6_ifreq_lifetime struct {
}
// Configures the TUN adapter with the correct IPv6 address and MTU.
func (tun *TunAdapter) setup(ifname string, addr string, mtu MTU) error {
func (tun *TunAdapter) setup(ifname string, addr string, mtu uint64) error {
iface, err := wgtun.CreateTUN(ifname, int(mtu))
if err != nil {
panic(err)
}
tun.iface = iface
if mtu, err := iface.MTU(); err == nil {
tun.mtu = getSupportedMTU(MTU(mtu))
tun.mtu = getSupportedMTU(uint64(mtu))
} else {
tun.mtu = 0
}

View File

@ -16,7 +16,7 @@ import (
)
// Configures the "utun" adapter with the correct IPv6 address and MTU.
func (tun *TunAdapter) setup(ifname string, addr string, mtu MTU) error {
func (tun *TunAdapter) setup(ifname string, addr string, mtu uint64) error {
if ifname == "auto" {
ifname = "utun"
}
@ -25,8 +25,8 @@ func (tun *TunAdapter) setup(ifname string, addr string, mtu MTU) error {
panic(err)
}
tun.iface = iface
if mtu, err := iface.MTU(); err == nil {
tun.mtu = getSupportedMTU(MTU(mtu))
if m, err := iface.MTU(); err == nil {
tun.mtu = getSupportedMTU(uint64(m))
} else {
tun.mtu = 0
}
@ -40,26 +40,29 @@ const (
darwin_ND6_INFINITE_LIFETIME = 0xFFFFFFFF // netinet6/nd6.h
)
// nolint:structcheck
type in6_addrlifetime struct {
ia6t_expire float64
ia6t_preferred float64
ia6t_expire float64 // nolint:unused
ia6t_preferred float64 // nolint:unused
ia6t_vltime uint32
ia6t_pltime uint32
}
// nolint:structcheck
type sockaddr_in6 struct {
sin6_len uint8
sin6_family uint8
sin6_port uint8
sin6_flowinfo uint32
sin6_port uint8 // nolint:unused
sin6_flowinfo uint32 // nolint:unused
sin6_addr [8]uint16
sin6_scope_id uint32
sin6_scope_id uint32 // nolint:unused
}
// nolint:structcheck
type in6_aliasreq struct {
ifra_name [16]byte
ifra_addr sockaddr_in6
ifra_dstaddr sockaddr_in6
ifra_dstaddr sockaddr_in6 // nolint:unused
ifra_prefixmask sockaddr_in6
ifra_flags uint32
ifra_lifetime in6_addrlifetime
@ -87,7 +90,7 @@ func (tun *TunAdapter) setupAddress(addr string) error {
ar.ifra_prefixmask.sin6_len = uint8(unsafe.Sizeof(ar.ifra_prefixmask))
b := make([]byte, 16)
binary.LittleEndian.PutUint16(b, uint16(0xFE00))
ar.ifra_prefixmask.sin6_addr[0] = uint16(binary.BigEndian.Uint16(b))
ar.ifra_prefixmask.sin6_addr[0] = binary.BigEndian.Uint16(b)
ar.ifra_addr.sin6_len = uint8(unsafe.Sizeof(ar.ifra_addr))
ar.ifra_addr.sin6_family = unix.AF_INET6
@ -96,7 +99,7 @@ func (tun *TunAdapter) setupAddress(addr string) error {
addr, _ := strconv.ParseUint(parts[i], 16, 16)
b := make([]byte, 16)
binary.LittleEndian.PutUint16(b, uint16(addr))
ar.ifra_addr.sin6_addr[i] = uint16(binary.BigEndian.Uint16(b))
ar.ifra_addr.sin6_addr[i] = binary.BigEndian.Uint16(b)
}
ar.ifra_flags |= darwin_IN6_IFF_NODAD

View File

@ -10,7 +10,7 @@ import (
)
// Configures the TUN adapter with the correct IPv6 address and MTU.
func (tun *TunAdapter) setup(ifname string, addr string, mtu MTU) error {
func (tun *TunAdapter) setup(ifname string, addr string, mtu uint64) error {
if ifname == "auto" {
ifname = "\000"
}
@ -20,7 +20,7 @@ func (tun *TunAdapter) setup(ifname string, addr string, mtu MTU) error {
}
tun.iface = iface
if mtu, err := iface.MTU(); err == nil {
tun.mtu = getSupportedMTU(MTU(mtu))
tun.mtu = getSupportedMTU(uint64(mtu))
} else {
tun.mtu = 0
}

View File

@ -10,14 +10,14 @@ import (
)
// Configures the TUN adapter with the correct IPv6 address and MTU.
func (tun *TunAdapter) setup(ifname string, addr string, mtu int) error {
func (tun *TunAdapter) setup(ifname string, addr string, mtu uint64) error {
iface, err := wgtun.CreateTUN(ifname, mtu)
if err != nil {
panic(err)
}
tun.iface = iface
if mtu, err := iface.MTU(); err == nil {
tun.mtu = getSupportedMTU(mtu)
tun.mtu = getSupportedMTU(uint64(mtu))
} else {
tun.mtu = 0
}

View File

@ -19,7 +19,7 @@ import (
// This is to catch Windows platforms
// Configures the TUN adapter with the correct IPv6 address and MTU.
func (tun *TunAdapter) setup(ifname string, addr string, mtu MTU) error {
func (tun *TunAdapter) setup(ifname string, addr string, mtu uint64) error {
if ifname == "auto" {
ifname = defaults.GetDefaults().DefaultIfName
}
@ -43,14 +43,14 @@ func (tun *TunAdapter) setup(ifname string, addr string, mtu MTU) error {
return err
}
if mtu, err := iface.MTU(); err == nil {
tun.mtu = MTU(mtu)
tun.mtu = uint64(mtu)
}
return nil
})
}
// Sets the MTU of the TAP adapter.
func (tun *TunAdapter) setupMTU(mtu MTU) error {
func (tun *TunAdapter) setupMTU(mtu uint64) error {
if tun.iface == nil || tun.Name() == "" {
return errors.New("Can't configure MTU as TUN adapter is not present")
}

View File

@ -1,3 +0,0 @@
package types
type MTU uint16

View File

@ -1,103 +0,0 @@
package util
import (
"errors"
"runtime"
"sync"
"time"
)
// Cancellation is used to signal when things should shut down, such as signaling anything associated with a Conn to exit.
// This is and is similar to a context, but with an error to specify the reason for the cancellation.
type Cancellation interface {
Finished() <-chan struct{} // Finished returns a channel which will be closed when Cancellation.Cancel is first called.
Cancel(error) error // Cancel closes the channel returned by Finished and sets the error returned by error, or else returns the existing error if the Cancellation has already run.
Error() error // Error returns the error provided to Cancel, or nil if no error has been provided.
}
// CancellationFinalized is an error returned if a cancellation object was garbage collected and the finalizer was run.
// If you ever see this, then you're probably doing something wrong with your code.
var CancellationFinalized = errors.New("finalizer called")
// CancellationTimeoutError is used when a CancellationWithTimeout or CancellationWithDeadline is cancelled due to said timeout.
var CancellationTimeoutError = errors.New("timeout")
// CancellationFinalizer is set as a finalizer when creating a new cancellation with NewCancellation(), and generally shouldn't be needed by the user, but is included in case other implementations of the same interface want to make use of it.
func CancellationFinalizer(c Cancellation) {
c.Cancel(CancellationFinalized)
}
type cancellation struct {
cancel chan struct{}
mutex sync.RWMutex
err error
done bool
}
// NewCancellation returns a pointer to a struct satisfying the Cancellation interface.
func NewCancellation() Cancellation {
c := cancellation{
cancel: make(chan struct{}),
}
runtime.SetFinalizer(&c, CancellationFinalizer)
return &c
}
// Finished returns a channel which will be closed when Cancellation.Cancel is first called.
func (c *cancellation) Finished() <-chan struct{} {
return c.cancel
}
// Cancel closes the channel returned by Finished and sets the error returned by error, or else returns the existing error if the Cancellation has already run.
func (c *cancellation) Cancel(err error) error {
c.mutex.Lock()
defer c.mutex.Unlock()
if c.done {
return c.err
}
c.err = err
c.done = true
close(c.cancel)
return nil
}
// Error returns the error provided to Cancel, or nil if no error has been provided.
func (c *cancellation) Error() error {
c.mutex.RLock()
err := c.err
c.mutex.RUnlock()
return err
}
// CancellationChild returns a new Cancellation which can be Cancelled independently of the parent, but which will also be Cancelled if the parent is Cancelled first.
func CancellationChild(parent Cancellation) Cancellation {
child := NewCancellation()
go func() {
select {
case <-child.Finished():
case <-parent.Finished():
child.Cancel(parent.Error())
}
}()
return child
}
// CancellationWithTimeout returns a ChildCancellation that will automatically be Cancelled with a CancellationTimeoutError after the timeout.
func CancellationWithTimeout(parent Cancellation, timeout time.Duration) Cancellation {
child := CancellationChild(parent)
go func() {
timer := time.NewTimer(timeout)
defer TimerStop(timer)
select {
case <-child.Finished():
case <-timer.C:
child.Cancel(CancellationTimeoutError)
}
}()
return child
}
// CancellationWithTimeout returns a ChildCancellation that will automatically be Cancelled with a CancellationTimeoutError after the specified deadline.
func CancellationWithDeadline(parent Cancellation, deadline time.Time) Cancellation {
return CancellationWithTimeout(parent, deadline.Sub(time.Now()))
}

View File

@ -5,35 +5,9 @@ package util
// These are misc. utility functions that didn't really fit anywhere else
import (
"runtime"
"strconv"
"strings"
"time"
)
// Yield just executes runtime.Gosched(), and is included so we don't need to explicitly import runtime elsewhere.
func Yield() {
runtime.Gosched()
}
// LockThread executes runtime.LockOSThread(), and is included so we don't need to explicitly import runtime elsewhere.
func LockThread() {
runtime.LockOSThread()
}
// UnlockThread executes runtime.UnlockOSThread(), and is included so we don't need to explicitly import runtime elsewhere.
func UnlockThread() {
runtime.UnlockOSThread()
}
// ResizeBytes returns a slice of the specified length. If the provided slice has sufficient capacity, it will be resized and returned rather than allocating a new slice.
func ResizeBytes(bs []byte, length int) []byte {
if cap(bs) >= length {
return bs[:length]
}
return make([]byte, length)
}
// TimerStop stops a timer and makes sure the channel is drained, returns true if the timer was stopped before firing.
func TimerStop(t *time.Timer) bool {
stopped := t.Stop()
@ -46,7 +20,7 @@ func TimerStop(t *time.Timer) bool {
// FuncTimeout runs the provided function in a separate goroutine, and returns true if the function finishes executing before the timeout passes, or false if the timeout passes.
// It includes no mechanism to stop the function if the timeout fires, so the user is expected to do so on their own (such as with a Cancellation or a context).
func FuncTimeout(f func(), timeout time.Duration) bool {
func FuncTimeout(timeout time.Duration, f func()) bool {
success := make(chan struct{})
go func() {
defer close(success)
@ -61,70 +35,3 @@ func FuncTimeout(f func(), timeout time.Duration) bool {
return false
}
}
// Difference loops over two strings and returns the elements of A which do not appear in B.
// This is somewhat useful when needing to determine which elements of a configuration file have changed.
func Difference(a, b []string) []string {
ab := []string{}
mb := map[string]bool{}
for _, x := range b {
mb[x] = true
}
for _, x := range a {
if !mb[x] {
ab = append(ab, x)
}
}
return ab
}
// DecodeCoordString decodes a string representing coordinates in [1 2 3] format
// and returns a []uint64.
func DecodeCoordString(in string) (out []uint64) {
s := strings.Trim(in, "[]")
t := strings.Split(s, " ")
for _, a := range t {
if u, err := strconv.ParseUint(a, 0, 64); err == nil {
out = append(out, u)
}
}
return out
}
// GetFlowKey takes an IP packet as an argument and returns some information about the traffic flow.
// For IPv4 packets, this is derived from the source and destination protocol and port numbers.
// For IPv6 packets, this is derived from the FlowLabel field of the packet if this was set, otherwise it's handled like IPv4.
// The FlowKey is then used internally by Yggdrasil for congestion control.
func GetFlowKey(bs []byte) uint64 {
// Work out the flowkey - this is used to determine which switch queue
// traffic will be pushed to in the event of congestion
var flowkey uint64
// Get the IP protocol version from the packet
switch bs[0] & 0xf0 {
case 0x40: // IPv4 packet
ihl := (bs[0] & 0x0f) * 4 // whole IPv4 header length (min 20)
// 8 is minimum UDP packet length
if ihl >= 20 && len(bs)-int(ihl) >= 8 {
switch bs[9] /* protocol */ {
case 0x06 /* TCP */, 0x11 /* UDP */, 0x84 /* SCTP */ :
flowkey = uint64(bs[9])<<32 /* proto */ |
uint64(bs[ihl+0])<<24 | uint64(bs[ihl+1])<<16 /* sport */ |
uint64(bs[ihl+2])<<8 | uint64(bs[ihl+3]) /* dport */
}
}
case 0x60: // IPv6 packet
// Check if the flowlabel was specified in the packet header
flowkey = uint64(bs[1]&0x0f)<<16 | uint64(bs[2])<<8 | uint64(bs[3])
// If the flowlabel isn't present, make protokey from proto | sport | dport
// if the packet meets minimum UDP packet length
if flowkey == 0 && len(bs) >= 48 {
switch bs[9] /* protocol */ {
case 0x06 /* TCP */, 0x11 /* UDP */, 0x84 /* SCTP */ :
flowkey = uint64(bs[6])<<32 /* proto */ |
uint64(bs[40])<<24 | uint64(bs[41])<<16 /* sport */ |
uint64(bs[42])<<8 | uint64(bs[43]) /* dport */
}
}
}
return flowkey
}

View File

@ -1,29 +0,0 @@
package util
import "runtime"
var workerPool chan func()
func init() {
maxProcs := runtime.GOMAXPROCS(0)
if maxProcs < 1 {
maxProcs = 1
}
workerPool = make(chan func(), maxProcs)
for idx := 0; idx < maxProcs; idx++ {
go func() {
for f := range workerPool {
f()
}
}()
}
}
// WorkerGo submits a job to a pool of GOMAXPROCS worker goroutines.
// This is meant for short non-blocking functions f() where you could just go f(),
// but you want some kind of backpressure to prevent spawning endless goroutines.
// WorkerGo returns as soon as the function is queued to run, not when it finishes.
// In Yggdrasil, these workers are used for certain cryptographic operations.
func WorkerGo(f func()) {
workerPool <- f
}

View File

@ -1,562 +0,0 @@
package yggdrasil
import (
"encoding/hex"
"errors"
"fmt"
"net"
"sort"
"time"
"github.com/gologme/log"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/Arceliar/phony"
)
// Peer represents a single peer object. This contains information from the
// preferred switch port for this peer, although there may be more than one
// active switch port connection to the peer in reality.
//
// This struct is informational only - you cannot manipulate peer connections
// using instances of this struct. You should use the AddPeer or RemovePeer
// functions instead.
type Peer struct {
PublicKey crypto.BoxPubKey // The public key of the remote node
Endpoint string // The connection string used to connect to the peer
BytesSent uint64 // Number of bytes sent to this peer
BytesRecvd uint64 // Number of bytes received from this peer
Protocol string // The transport protocol that this peer is connected with, typically "tcp"
Port uint64 // Switch port number for this peer connection
Uptime time.Duration // How long this peering has been active for
}
// SwitchPeer represents a switch connection to a peer. Note that there may be
// multiple switch peers per actual peer, e.g. if there are multiple connections
// to a given node.
//
// This struct is informational only - you cannot manipulate switch peer
// connections using instances of this struct. You should use the AddPeer or
// RemovePeer functions instead.
type SwitchPeer struct {
PublicKey crypto.BoxPubKey // The public key of the remote node
Coords []uint64 // The coordinates of the remote node
BytesSent uint64 // Number of bytes sent via this switch port
BytesRecvd uint64 // Number of bytes received via this switch port
Port uint64 // Switch port number for this switch peer
Protocol string // The transport protocol that this switch port is connected with, typically "tcp"
Endpoint string // The connection string used to connect to the switch peer
}
// DHTEntry represents a single DHT entry that has been learned or cached from
// DHT searches.
type DHTEntry struct {
PublicKey crypto.BoxPubKey
Coords []uint64
LastSeen time.Duration
}
// DHTRes represents a DHT response, as returned by DHTPing.
type DHTRes struct {
PublicKey crypto.BoxPubKey // key of the sender
Coords []uint64 // coords of the sender
Dest crypto.NodeID // the destination node ID
Infos []DHTEntry // response
}
// NodeInfoPayload represents a RequestNodeInfo response, in bytes.
type NodeInfoPayload []byte
// SwitchQueues represents information from the switch related to link
// congestion and a list of switch queues created in response to congestion on a
// given link.
type SwitchQueues struct {
Queues []SwitchQueue // An array of SwitchQueue objects containing information about individual queues
Count uint64 // The current number of active switch queues
Size uint64 // The current total size of active switch queues
HighestCount uint64 // The highest recorded number of switch queues so far
HighestSize uint64 // The highest recorded total size of switch queues so far
MaximumSize uint64 // The maximum allowed total size of switch queues, as specified by config
}
// SwitchQueue represents a single switch queue. Switch queues are only created
// in response to congestion on a given link and represent how much data has
// been temporarily cached for sending once the congestion has cleared.
type SwitchQueue struct {
ID string // The ID of the switch queue
Size uint64 // The total size, in bytes, of the queue
Packets uint64 // The number of packets in the queue
Port uint64 // The switch port to which the queue applies
}
// Session represents an open session with another node. Sessions are opened in
// response to traffic being exchanged between two nodes using Conn objects.
// Note that sessions will automatically be closed by Yggdrasil if no traffic is
// exchanged for around two minutes.
type Session struct {
PublicKey crypto.BoxPubKey // The public key of the remote node
Coords []uint64 // The coordinates of the remote node
BytesSent uint64 // Bytes sent to the session
BytesRecvd uint64 // Bytes received from the session
MTU MTU // The maximum supported message size of the session
Uptime time.Duration // How long this session has been active for
WasMTUFixed bool // This field is no longer used
}
// GetPeers returns one or more Peer objects containing information about active
// peerings with other Yggdrasil nodes, where one of the responses always
// includes information about the current node (with a port number of 0). If
// there is exactly one entry then this node is not connected to any other nodes
// and is therefore isolated.
func (c *Core) GetPeers() []Peer {
var ports map[switchPort]*peer
phony.Block(&c.peers, func() { ports = c.peers.ports })
var peers []Peer
var ps []switchPort
for port := range ports {
ps = append(ps, port)
}
sort.Slice(ps, func(i, j int) bool { return ps[i] < ps[j] })
for _, port := range ps {
p := ports[port]
var info Peer
phony.Block(p, func() {
info = Peer{
Endpoint: p.intf.name(),
BytesSent: p.bytesSent,
BytesRecvd: p.bytesRecvd,
Protocol: p.intf.interfaceType(),
Port: uint64(port),
Uptime: time.Since(p.firstSeen),
}
copy(info.PublicKey[:], p.box[:])
})
peers = append(peers, info)
}
return peers
}
// GetSwitchPeers returns zero or more SwitchPeer objects containing information
// about switch port connections with other Yggdrasil nodes. Note that, unlike
// GetPeers, GetSwitchPeers does not include information about the current node,
// therefore it is possible for this to return zero elements if the node is
// isolated or not connected to any peers.
func (c *Core) GetSwitchPeers() []SwitchPeer {
var switchpeers []SwitchPeer
var table *lookupTable
var ports map[switchPort]*peer
phony.Block(&c.peers, func() {
table = c.peers.table
ports = c.peers.ports
})
for _, elem := range table.elems {
peer, isIn := ports[elem.port]
if !isIn {
continue
}
coords := elem.locator.getCoords()
var info SwitchPeer
phony.Block(peer, func() {
info = SwitchPeer{
Coords: append([]uint64{}, wire_coordsBytestoUint64s(coords)...),
BytesSent: peer.bytesSent,
BytesRecvd: peer.bytesRecvd,
Port: uint64(elem.port),
Protocol: peer.intf.interfaceType(),
Endpoint: peer.intf.remote(),
}
copy(info.PublicKey[:], peer.box[:])
})
switchpeers = append(switchpeers, info)
}
return switchpeers
}
// GetDHT returns zero or more entries as stored in the DHT, cached primarily
// from searches that have already taken place.
func (c *Core) GetDHT() []DHTEntry {
var dhtentries []DHTEntry
getDHT := func() {
now := time.Now()
var dhtentry []*dhtInfo
for _, v := range c.router.dht.table {
dhtentry = append(dhtentry, v)
}
sort.SliceStable(dhtentry, func(i, j int) bool {
return dht_ordered(&c.router.dht.nodeID, dhtentry[i].getNodeID(), dhtentry[j].getNodeID())
})
for _, v := range dhtentry {
info := DHTEntry{
Coords: append([]uint64{}, wire_coordsBytestoUint64s(v.coords)...),
LastSeen: now.Sub(v.recv),
}
copy(info.PublicKey[:], v.key[:])
dhtentries = append(dhtentries, info)
}
}
phony.Block(&c.router, getDHT)
return dhtentries
}
// GetSessions returns a list of open sessions from this node to other nodes.
func (c *Core) GetSessions() []Session {
var sessions []Session
getSessions := func() {
for _, sinfo := range c.router.sessions.sinfos {
var session Session
workerFunc := func() {
session = Session{
Coords: append([]uint64{}, wire_coordsBytestoUint64s(sinfo.coords)...),
MTU: sinfo._getMTU(),
BytesSent: sinfo.bytesSent,
BytesRecvd: sinfo.bytesRecvd,
Uptime: time.Now().Sub(sinfo.timeOpened),
WasMTUFixed: sinfo.wasMTUFixed,
}
copy(session.PublicKey[:], sinfo.theirPermPub[:])
}
phony.Block(sinfo, workerFunc)
// TODO? skipped known but timed out sessions?
sessions = append(sessions, session)
}
}
phony.Block(&c.router, getSessions)
return sessions
}
// ConnListen returns a listener for Yggdrasil session connections. You can only
// call this function once as each Yggdrasil node can only have a single
// ConnListener. Make sure to keep the reference to this for as long as it is
// needed.
func (c *Core) ConnListen() (*Listener, error) {
c.router.sessions.listenerMutex.Lock()
defer c.router.sessions.listenerMutex.Unlock()
if c.router.sessions.listener != nil {
return nil, errors.New("a listener already exists")
}
c.router.sessions.listener = &Listener{
core: c,
conn: make(chan *Conn),
close: make(chan interface{}),
}
return c.router.sessions.listener, nil
}
// ConnDialer returns a dialer for Yggdrasil session connections. Since
// ConnDialers are stateless, you can request as many dialers as you like,
// although ideally you should request only one and keep the reference to it for
// as long as it is needed.
func (c *Core) ConnDialer() (*Dialer, error) {
return &Dialer{
core: c,
}, nil
}
// ListenTCP starts a new TCP listener. The input URI should match that of the
// "Listen" configuration item, e.g.
// tcp://a.b.c.d:e
func (c *Core) ListenTCP(uri string) (*TcpListener, error) {
return c.links.tcp.listen(uri, nil)
}
// ListenTLS starts a new TLS listener. The input URI should match that of the
// "Listen" configuration item, e.g.
// tls://a.b.c.d:e
func (c *Core) ListenTLS(uri string) (*TcpListener, error) {
return c.links.tcp.listen(uri, c.links.tcp.tls.forListener)
}
// NodeID gets the node ID. This is derived from your router encryption keys.
// Remote nodes wanting to open connections to your node will need to know your
// node ID.
func (c *Core) NodeID() *crypto.NodeID {
return crypto.GetNodeID(&c.boxPub)
}
// TreeID gets the tree ID. This is derived from your switch signing keys. There
// is typically no need to share this key.
func (c *Core) TreeID() *crypto.TreeID {
return crypto.GetTreeID(&c.sigPub)
}
// SigningPublicKey gets the node's signing public key, as used by the switch.
func (c *Core) SigningPublicKey() string {
return hex.EncodeToString(c.sigPub[:])
}
// EncryptionPublicKey gets the node's encryption public key, as used by the
// router.
func (c *Core) EncryptionPublicKey() string {
return hex.EncodeToString(c.boxPub[:])
}
// Coords returns the current coordinates of the node. Note that these can
// change at any time for a number of reasons, not limited to but including
// changes to peerings (either yours or a parent nodes) or changes to the network
// root.
//
// This function may return an empty array - this is normal behaviour if either
// you are the root of the network that you are connected to, or you are not
// connected to any other nodes (effectively making you the root of a
// single-node network).
func (c *Core) Coords() []uint64 {
var coords []byte
phony.Block(&c.router, func() {
coords = c.router.table.self.getCoords()
})
return wire_coordsBytestoUint64s(coords)
}
// Address gets the IPv6 address of the Yggdrasil node. This is always a /128
// address. The IPv6 address is only relevant when the node is operating as an
// IP router and often is meaningless when embedded into an application, unless
// that application also implements either VPN functionality or deals with IP
// packets specifically.
func (c *Core) Address() net.IP {
address := net.IP(address.AddrForNodeID(c.NodeID())[:])
return address
}
// Subnet gets the routed IPv6 subnet of the Yggdrasil node. This is always a
// /64 subnet. The IPv6 subnet is only relevant when the node is operating as an
// IP router and often is meaningless when embedded into an application, unless
// that application also implements either VPN functionality or deals with IP
// packets specifically.
func (c *Core) Subnet() net.IPNet {
subnet := address.SubnetForNodeID(c.NodeID())[:]
subnet = append(subnet, 0, 0, 0, 0, 0, 0, 0, 0)
return net.IPNet{IP: subnet, Mask: net.CIDRMask(64, 128)}
}
// MyNodeInfo gets the currently configured nodeinfo. NodeInfo is typically
// specified through the "NodeInfo" option in the node configuration or using
// the SetNodeInfo function, although it may also contain other built-in values
// such as "buildname", "buildversion" etc.
func (c *Core) MyNodeInfo() NodeInfoPayload {
return c.router.nodeinfo.getNodeInfo()
}
// SetNodeInfo sets the local nodeinfo. Note that nodeinfo can be any value or
// struct, it will be serialised into JSON automatically.
func (c *Core) SetNodeInfo(nodeinfo interface{}, nodeinfoprivacy bool) {
c.router.nodeinfo.setNodeInfo(nodeinfo, nodeinfoprivacy)
}
// GetMaximumSessionMTU returns the maximum allowed session MTU size.
func (c *Core) GetMaximumSessionMTU() MTU {
var mtu MTU
phony.Block(&c.router, func() {
mtu = c.router.sessions.myMaximumMTU
})
return mtu
}
// SetMaximumSessionMTU sets the maximum allowed session MTU size. The default
// value is 65535 bytes. Session pings will be sent to update all open sessions
// if the MTU has changed.
func (c *Core) SetMaximumSessionMTU(mtu MTU) {
phony.Block(&c.router, func() {
if c.router.sessions.myMaximumMTU != mtu {
c.router.sessions.myMaximumMTU = mtu
c.router.sessions.reconfigure()
}
})
}
// GetNodeInfo requests nodeinfo from a remote node, as specified by the public
// key and coordinates specified. The third parameter specifies whether a cached
// result is acceptable - this results in less traffic being generated than is
// necessary when, e.g. crawling the network.
func (c *Core) GetNodeInfo(key crypto.BoxPubKey, coords []uint64, nocache bool) (NodeInfoPayload, error) {
response := make(chan *NodeInfoPayload, 1)
c.router.nodeinfo.addCallback(key, func(nodeinfo *NodeInfoPayload) {
defer func() { recover() }()
select {
case response <- nodeinfo:
default:
}
})
c.router.nodeinfo.sendNodeInfo(key, wire_coordsUint64stoBytes(coords), false)
phony.Block(&c.router.nodeinfo, func() {}) // Wait for sendNodeInfo before starting timer
timer := time.AfterFunc(6*time.Second, func() { close(response) })
defer timer.Stop()
for res := range response {
return *res, nil
}
return NodeInfoPayload{}, fmt.Errorf("getNodeInfo timeout: %s", hex.EncodeToString(key[:]))
}
// SetSessionGatekeeper allows you to configure a handler function for deciding
// whether a session should be allowed or not. The default session firewall is
// implemented in this way. The function receives the public key of the remote
// side and a boolean which is true if we initiated the session or false if we
// received an incoming session request. The function should return true to
// allow the session or false to reject it.
func (c *Core) SetSessionGatekeeper(f func(pubkey *crypto.BoxPubKey, initiator bool) bool) {
c.router.sessions.isAllowedMutex.Lock()
defer c.router.sessions.isAllowedMutex.Unlock()
c.router.sessions.isAllowedHandler = f
}
// SetLogger sets the output logger of the Yggdrasil node after startup. This
// may be useful if you want to redirect the output later. Note that this
// expects a Logger from the github.com/gologme/log package and not from Go's
// built-in log package.
func (c *Core) SetLogger(log *log.Logger) {
c.log = log
}
// AddPeer adds a peer. This should be specified in the peer URI format, e.g.:
// tcp://a.b.c.d:e
// socks://a.b.c.d:e/f.g.h.i:j
// This adds the peer to the peer list, so that they will be called again if the
// connection drops.
func (c *Core) AddPeer(addr string, sintf string) error {
if err := c.CallPeer(addr, sintf); err != nil {
// TODO: We maybe want this to write the peer to the persistent
// configuration even if a connection attempt fails, but first we'll need to
// move the code to check the peer URI so that we don't deliberately save a
// peer with a known bad URI. Loading peers from config should really do the
// same thing too but I don't think that happens today
return err
}
c.config.Mutex.Lock()
defer c.config.Mutex.Unlock()
if sintf == "" {
for _, peer := range c.config.Current.Peers {
if peer == addr {
return errors.New("peer already added")
}
}
c.config.Current.Peers = append(c.config.Current.Peers, addr)
} else {
if _, ok := c.config.Current.InterfacePeers[sintf]; ok {
for _, peer := range c.config.Current.InterfacePeers[sintf] {
if peer == addr {
return errors.New("peer already added")
}
}
}
if _, ok := c.config.Current.InterfacePeers[sintf]; !ok {
c.config.Current.InterfacePeers[sintf] = []string{addr}
} else {
c.config.Current.InterfacePeers[sintf] = append(c.config.Current.InterfacePeers[sintf], addr)
}
}
return nil
}
func (c *Core) RemovePeer(addr string, sintf string) error {
if sintf == "" {
for i, peer := range c.config.Current.Peers {
if peer == addr {
c.config.Current.Peers = append(c.config.Current.Peers[:i], c.config.Current.Peers[i+1:]...)
break
}
}
} else if _, ok := c.config.Current.InterfacePeers[sintf]; ok {
for i, peer := range c.config.Current.InterfacePeers[sintf] {
if peer == addr {
c.config.Current.InterfacePeers[sintf] = append(c.config.Current.InterfacePeers[sintf][:i], c.config.Current.InterfacePeers[sintf][i+1:]...)
break
}
}
}
c.peers.Act(nil, func() {
ports := c.peers.ports
for _, peer := range ports {
if addr == peer.intf.name() {
c.peers._removePeer(peer)
}
}
})
return nil
}
// CallPeer calls a peer once. This should be specified in the peer URI format,
// e.g.:
// tcp://a.b.c.d:e
// socks://a.b.c.d:e/f.g.h.i:j
// This does not add the peer to the peer list, so if the connection drops, the
// peer will not be called again automatically.
func (c *Core) CallPeer(addr string, sintf string) error {
return c.links.call(addr, sintf)
}
// DisconnectPeer disconnects a peer once. This should be specified as a port
// number.
func (c *Core) DisconnectPeer(port uint64) error {
c.peers.Act(nil, func() {
if p, isIn := c.peers.ports[switchPort(port)]; isIn {
p.Act(&c.peers, p._removeSelf)
}
})
return nil
}
// GetAllowedEncryptionPublicKeys returns the public keys permitted for incoming
// peer connections. If this list is empty then all incoming peer connections
// are accepted by default.
func (c *Core) GetAllowedEncryptionPublicKeys() []string {
return c.peers.getAllowedEncryptionPublicKeys()
}
// AddAllowedEncryptionPublicKey whitelists a key for incoming peer connections.
// By default all incoming peer connections are accepted, but adding public keys
// to the whitelist using this function enables strict checking from that point
// forward. Once the whitelist is enabled, only peer connections from
// whitelisted public keys will be accepted.
func (c *Core) AddAllowedEncryptionPublicKey(bstr string) (err error) {
c.peers.addAllowedEncryptionPublicKey(bstr)
return nil
}
// RemoveAllowedEncryptionPublicKey removes a key from the whitelist for
// incoming peer connections. If none are set, an empty list permits all
// incoming connections.
func (c *Core) RemoveAllowedEncryptionPublicKey(bstr string) (err error) {
c.peers.removeAllowedEncryptionPublicKey(bstr)
return nil
}
// DHTPing sends a DHT ping to the node with the provided key and coords,
// optionally looking up the specified target NodeID.
func (c *Core) DHTPing(key crypto.BoxPubKey, coords []uint64, target *crypto.NodeID) (DHTRes, error) {
resCh := make(chan *dhtRes, 1)
info := dhtInfo{
key: key,
coords: wire_coordsUint64stoBytes(coords),
}
if target == nil {
target = info.getNodeID()
}
rq := dhtReqKey{info.key, *target}
sendPing := func() {
c.router.dht.addCallback(&rq, func(res *dhtRes) {
resCh <- res
})
c.router.dht.ping(&info, &rq.dest)
}
phony.Block(&c.router, sendPing)
// TODO: do something better than the below...
res := <-resCh
if res != nil {
r := DHTRes{
Coords: append([]uint64{}, wire_coordsBytestoUint64s(res.Coords)...),
}
copy(r.PublicKey[:], res.Key[:])
for _, i := range res.Infos {
e := DHTEntry{
Coords: append([]uint64{}, wire_coordsBytestoUint64s(i.coords)...),
}
copy(e.PublicKey[:], i.key[:])
r.Infos = append(r.Infos, e)
}
return r, nil
}
return DHTRes{}, fmt.Errorf("DHT ping timeout: %s", hex.EncodeToString(key[:]))
}

View File

@ -1,397 +0,0 @@
package yggdrasil
import (
"errors"
"fmt"
"net"
"time"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/yggdrasil-network/yggdrasil-go/src/types"
"github.com/yggdrasil-network/yggdrasil-go/src/util"
"github.com/Arceliar/phony"
)
type MTU = types.MTU
// ConnError implements the net.Error interface
type ConnError struct {
error
timeout bool
temporary bool
closed bool
maxsize int
}
// Timeout returns true if the error relates to a timeout condition on the
// connection.
func (e *ConnError) Timeout() bool {
return e.timeout
}
// Temporary return true if the error is temporary or false if it is a permanent
// error condition.
func (e *ConnError) Temporary() bool {
return e.temporary
}
// PacketTooBig returns in response to sending a packet that is too large, and
// if so, the maximum supported packet size that should be used for the
// connection.
func (e *ConnError) PacketTooBig() bool {
return e.maxsize > 0
}
// PacketMaximumSize returns the maximum supported packet size. This will only
// return a non-zero value if ConnError.PacketTooBig() returns true.
func (e *ConnError) PacketMaximumSize() int {
if !e.PacketTooBig() {
return 0
}
return e.maxsize
}
// Closed returns if the session is already closed and is now unusable.
func (e *ConnError) Closed() bool {
return e.closed
}
// The Conn struct is a reference to an active connection session between the
// local node and a remote node. Conn implements the io.ReadWriteCloser
// interface and is used to send and receive traffic with a remote node.
type Conn struct {
phony.Inbox
core *Core
readDeadline *time.Time
writeDeadline *time.Time
nodeID *crypto.NodeID
nodeMask *crypto.NodeID
session *sessionInfo
mtu MTU
readCallback func([]byte)
readBuffer chan []byte
}
// TODO func NewConn() that initializes additional fields as needed
func newConn(core *Core, nodeID *crypto.NodeID, nodeMask *crypto.NodeID, session *sessionInfo) *Conn {
conn := Conn{
core: core,
nodeID: nodeID,
nodeMask: nodeMask,
session: session,
readBuffer: make(chan []byte, 1024),
}
return &conn
}
// String returns a string that uniquely identifies a connection. Currently this
// takes a form similar to "conn=0x0000000", which contains a memory reference
// to the Conn object. While this value should always be unique for each Conn
// object, the format of this is not strictly defined and may change in the
// future.
func (c *Conn) String() string {
var s string
phony.Block(c, func() { s = fmt.Sprintf("conn=%p", c) })
return s
}
func (c *Conn) setMTU(from phony.Actor, mtu MTU) {
c.Act(from, func() { c.mtu = mtu })
}
// This should never be called from an actor, used in the dial functions
func (c *Conn) search() error {
var err error
done := make(chan struct{})
phony.Block(&c.core.router, func() {
_, isIn := c.core.router.searches.searches[*c.nodeID]
if !isIn {
searchCompleted := func(sinfo *sessionInfo, e error) {
select {
case <-done:
// Somehow this was called multiple times, TODO don't let that happen
if sinfo != nil {
// Need to clean up to avoid a session leak
sinfo.cancel.Cancel(nil)
sinfo.sessions.removeSession(sinfo)
}
default:
if sinfo != nil {
// Finish initializing the session
c.session = sinfo
c.session.setConn(nil, c)
c.nodeID = crypto.GetNodeID(&c.session.theirPermPub)
for i := range c.nodeMask {
c.nodeMask[i] = 0xFF
}
}
err = e
close(done)
}
}
sinfo := c.core.router.searches.newIterSearch(c.nodeID, c.nodeMask, searchCompleted)
sinfo.startSearch()
} else {
err = errors.New("search already exists")
close(done)
}
})
<-done
if c.session == nil && err == nil {
panic("search failed but returned no error")
}
return err
}
// Used in session keep-alive traffic
func (c *Conn) _doSearch() {
s := fmt.Sprintf("conn=%p", c)
routerWork := func() {
// Check to see if there is a search already matching the destination
sinfo, isIn := c.core.router.searches.searches[*c.nodeID]
if !isIn {
// Nothing was found, so create a new search
searchCompleted := func(sinfo *sessionInfo, e error) {}
sinfo = c.core.router.searches.newIterSearch(c.nodeID, c.nodeMask, searchCompleted)
c.core.log.Debugf("%s DHT search started: %p", s, sinfo)
// Start the search
sinfo.startSearch()
}
}
c.core.router.Act(c.session, routerWork)
}
func (c *Conn) _getDeadlineCancellation(t *time.Time) (util.Cancellation, bool) {
if t != nil {
// A deadline is set, so return a Cancellation that uses it
c := util.CancellationWithDeadline(c.session.cancel, *t)
return c, true
}
// No deadline was set, so just return the existing cancellation and a dummy value
return c.session.cancel, false
}
// SetReadCallback allows you to specify a function that will be called whenever
// a packet is received. This should be used if you wish to implement
// asynchronous patterns for receiving data from the remote node.
//
// Note that if a read callback has been supplied, you should no longer attempt
// to use the synchronous Read function.
func (c *Conn) SetReadCallback(callback func([]byte)) {
c.Act(nil, func() {
c.readCallback = callback
c._drainReadBuffer()
})
}
func (c *Conn) _drainReadBuffer() {
if c.readCallback == nil {
return
}
select {
case bs := <-c.readBuffer:
c.readCallback(bs)
c.Act(nil, c._drainReadBuffer) // In case there's more
default:
}
}
// Called by the session to pass a new message to the Conn
func (c *Conn) recvMsg(from phony.Actor, msg []byte) {
c.Act(from, func() {
if c.readCallback != nil {
c.readCallback(msg)
} else {
select {
case c.readBuffer <- msg:
default:
}
}
})
}
// Used internally by Read, the caller is responsible for util.PutBytes when they're done.
func (c *Conn) readNoCopy() ([]byte, error) {
var cancel util.Cancellation
var doCancel bool
phony.Block(c, func() { cancel, doCancel = c._getDeadlineCancellation(c.readDeadline) })
if doCancel {
defer cancel.Cancel(nil)
}
// Wait for some traffic to come through from the session
select {
case <-cancel.Finished():
if cancel.Error() == util.CancellationTimeoutError {
return nil, ConnError{errors.New("read timeout"), true, false, false, 0}
}
return nil, ConnError{errors.New("session closed"), false, false, true, 0}
case bs := <-c.readBuffer:
return bs, nil
}
}
// Read allows you to read from the connection in a synchronous fashion. The
// function will block up until the point that either new data is available, the
// connection has been closed or the read deadline has been reached. If the
// function succeeds, the number of bytes read from the connection will be
// returned. Otherwise, an error condition will be returned.
//
// Note that you can also implement asynchronous reads by using SetReadCallback.
// If you do that, you should no longer attempt to use the Read function.
func (c *Conn) Read(b []byte) (int, error) {
bs, err := c.readNoCopy()
if err != nil {
return 0, err
}
n := len(bs)
if len(bs) > len(b) {
n = len(b)
err = ConnError{errors.New("read buffer too small for entire packet"), false, true, false, 0}
}
// Copy results to the output slice and clean up
copy(b, bs)
// Return the number of bytes copied to the slice, along with any error
return n, err
}
func (c *Conn) _write(msg FlowKeyMessage) error {
if len(msg.Message) > int(c.mtu) {
return ConnError{errors.New("packet too big"), true, false, false, int(c.mtu)}
}
c.session.Act(c, func() {
// Send the packet
c.session._send(msg)
// Session keep-alive, while we wait for the crypto workers from send
switch {
case time.Since(c.session.time) > 6*time.Second:
if c.session.time.Before(c.session.pingTime) && time.Since(c.session.pingTime) > 6*time.Second {
// TODO double check that the above condition is correct
c._doSearch()
} else {
c.session.ping(c.session) // TODO send from self if this becomes an actor
}
case c.session.reset && c.session.pingTime.Before(c.session.time):
c.session.ping(c.session) // TODO send from self if this becomes an actor
default: // Don't do anything, to keep traffic throttled
}
})
return nil
}
// WriteFrom should be called by a phony.Actor, and tells the Conn to send a
// message. This is used internally by Write. If the callback is called with a
// non-nil value, then it is safe to reuse the argument FlowKeyMessage.
func (c *Conn) WriteFrom(from phony.Actor, msg FlowKeyMessage, callback func(error)) {
c.Act(from, func() {
callback(c._write(msg))
})
}
// writeNoCopy is used internally by Write and makes use of WriteFrom under the hood.
// The caller must not reuse the argument FlowKeyMessage when a nil error is returned.
func (c *Conn) writeNoCopy(msg FlowKeyMessage) error {
var cancel util.Cancellation
var doCancel bool
phony.Block(c, func() { cancel, doCancel = c._getDeadlineCancellation(c.writeDeadline) })
if doCancel {
defer cancel.Cancel(nil)
}
var err error
select {
case <-cancel.Finished():
if cancel.Error() == util.CancellationTimeoutError {
err = ConnError{errors.New("write timeout"), true, false, false, 0}
} else {
err = ConnError{errors.New("session closed"), false, false, true, 0}
}
default:
done := make(chan struct{})
callback := func(e error) { err = e; close(done) }
c.WriteFrom(nil, msg, callback)
<-done
}
return err
}
// Write allows you to write to the connection in a synchronous fashion. This
// function may block until either the write has completed, the connection has
// been closed or the write deadline has been reached. If the function succeeds,
// the number of written bytes is returned. Otherwise, an error condition is
// returned.
func (c *Conn) Write(b []byte) (int, error) {
written := len(b)
bs := make([]byte, 0, len(b)+crypto.BoxOverhead)
bs = append(bs, b...)
msg := FlowKeyMessage{Message: bs}
err := c.writeNoCopy(msg)
if err != nil {
written = 0
}
return written, err
}
// Close will close an open connection and any blocking operations on the
// connection will unblock and return. From this point forward, the connection
// can no longer be used and you should no longer attempt to Read or Write to
// the connection.
func (c *Conn) Close() (err error) {
phony.Block(c, func() {
if c.session != nil {
// Close the session, if it hasn't been closed already
if e := c.session.cancel.Cancel(errors.New("connection closed")); e != nil {
err = ConnError{errors.New("close failed, session already closed"), false, false, true, 0}
} else {
c.session.doRemove()
}
}
})
return
}
// LocalAddr returns the complete public key of the local side of the
// connection. This is always going to return your own node's public key.
func (c *Conn) LocalAddr() net.Addr {
return &c.core.boxPub
}
// RemoteAddr returns the complete public key of the remote side of the
// connection.
func (c *Conn) RemoteAddr() net.Addr {
if c.session != nil {
return &c.session.theirPermPub
}
return nil
}
// SetDeadline is equivalent to calling both SetReadDeadline and
// SetWriteDeadline with the same value, configuring the maximum amount of time
// that synchronous Read and Write operations can block for. If no deadline is
// configured, Read and Write operations can potentially block indefinitely.
func (c *Conn) SetDeadline(t time.Time) error {
c.SetReadDeadline(t)
c.SetWriteDeadline(t)
return nil
}
// SetReadDeadline configures the maximum amount of time that a synchronous Read
// operation can block for. A Read operation will unblock at the point that the
// read deadline is reached if no other condition (such as data arrival or
// connection closure) happens first. If no deadline is configured, Read
// operations can potentially block indefinitely.
func (c *Conn) SetReadDeadline(t time.Time) error {
// TODO warn that this can block while waiting for the Conn actor to run, so don't call it from other actors...
phony.Block(c, func() { c.readDeadline = &t })
return nil
}
// SetWriteDeadline configures the maximum amount of time that a synchronous
// Write operation can block for. A Write operation will unblock at the point
// that the read deadline is reached if no other condition (such as data sending
// or connection closure) happens first. If no deadline is configured, Write
// operations can potentially block indefinitely.
func (c *Conn) SetWriteDeadline(t time.Time) error {
// TODO warn that this can block while waiting for the Conn actor to run, so don't call it from other actors...
phony.Block(c, func() { c.writeDeadline = &t })
return nil
}

View File

@ -1,447 +0,0 @@
package yggdrasil
// A chord-like Distributed Hash Table (DHT).
// Used to look up coords given a NodeID and bitmask (taken from an IPv6 address).
// Keeps track of immediate successor, predecessor, and all peers.
// Also keeps track of other nodes if they're closer in tree space than all other known nodes encountered when heading in either direction to that point, under the hypothesis that, for the kinds of networks we care about, this should probabilistically include the node needed to keep lookups to near O(logn) steps.
import (
"sort"
"time"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
)
const (
dht_lookup_size = 16
dht_timeout = 6 * time.Minute
dht_max_delay = 5 * time.Minute
dht_max_delay_dirty = 30 * time.Second
)
// dhtInfo represents everything we know about a node in the DHT.
// This includes its key, a cache of its NodeID, coords, and timing/ping related info for deciding who/when to ping nodes for maintenance.
type dhtInfo struct {
nodeID_hidden *crypto.NodeID
key crypto.BoxPubKey
coords []byte
recv time.Time // When we last received a message
pings int // Time out if at least 3 consecutive maintenance pings drop
throttle time.Duration
dirty bool // Set to true if we've used this node in ping responses (for queries about someone other than the person doing the asking, i.e. real searches) since the last time we heard from the node
}
// Returns the *NodeID associated with dhtInfo.key, calculating it on the fly the first time or from a cache all subsequent times.
func (info *dhtInfo) getNodeID() *crypto.NodeID {
if info.nodeID_hidden == nil {
info.nodeID_hidden = crypto.GetNodeID(&info.key)
}
return info.nodeID_hidden
}
// Request for a node to do a lookup.
// Includes our key and coords so they can send a response back, and the destination NodeID we want to ask about.
type dhtReq struct {
Key crypto.BoxPubKey // Key of whoever asked
Coords []byte // Coords of whoever asked
Dest crypto.NodeID // NodeID they're asking about
}
// Response to a DHT lookup.
// Includes the key and coords of the node that's responding, and the destination they were asked about.
// The main part is Infos []*dhtInfo, the lookup response.
type dhtRes struct {
Key crypto.BoxPubKey // key of the sender
Coords []byte // coords of the sender
Dest crypto.NodeID
Infos []*dhtInfo // response
}
// Parts of a DHT req usable as a key in a map.
type dhtReqKey struct {
key crypto.BoxPubKey
dest crypto.NodeID
}
// The main DHT struct.
type dht struct {
router *router
nodeID crypto.NodeID
reqs map[dhtReqKey]time.Time // Keeps track of recent outstanding requests
callbacks map[dhtReqKey][]dht_callbackInfo // Search and admin lookup callbacks
// These next two could be replaced by a single linked list or similar...
table map[crypto.NodeID]*dhtInfo
imp []*dhtInfo
}
// Initializes the DHT.
func (t *dht) init(r *router) {
t.router = r
t.nodeID = *t.router.core.NodeID()
t.callbacks = make(map[dhtReqKey][]dht_callbackInfo)
t.reset()
}
func (t *dht) reconfigure() {
// This is where reconfiguration would go, if we had anything to do
}
// Resets the DHT in response to coord changes.
// This empties all info from the DHT and drops outstanding requests.
func (t *dht) reset() {
for _, info := range t.table {
if t.isImportant(info) {
t.ping(info, nil)
}
}
t.reqs = make(map[dhtReqKey]time.Time)
t.table = make(map[crypto.NodeID]*dhtInfo)
t.imp = nil
}
// Does a DHT lookup and returns up to dht_lookup_size results.
func (t *dht) lookup(nodeID *crypto.NodeID, everything bool) []*dhtInfo {
results := make([]*dhtInfo, 0, len(t.table))
for _, info := range t.table {
results = append(results, info)
}
if len(results) > dht_lookup_size {
// Drop the middle part, so we keep some nodes before and after.
// This should help to bootstrap / recover more quickly.
sort.SliceStable(results, func(i, j int) bool {
return dht_ordered(nodeID, results[i].getNodeID(), results[j].getNodeID())
})
newRes := make([]*dhtInfo, 0, len(results))
newRes = append(newRes, results[len(results)-dht_lookup_size/2:]...)
newRes = append(newRes, results[:len(results)-dht_lookup_size/2]...)
results = newRes
results = results[:dht_lookup_size]
}
return results
}
// Insert into table, preserving the time we last sent a packet if the node was already in the table, otherwise setting that time to now.
func (t *dht) insert(info *dhtInfo) {
if *info.getNodeID() == t.nodeID {
// This shouldn't happen, but don't add it if it does
return
}
info.recv = time.Now()
if oldInfo, isIn := t.table[*info.getNodeID()]; isIn {
sameCoords := true
if len(info.coords) != len(oldInfo.coords) {
sameCoords = false
} else {
for idx := 0; idx < len(info.coords); idx++ {
if info.coords[idx] != oldInfo.coords[idx] {
sameCoords = false
break
}
}
}
if sameCoords {
info.throttle = oldInfo.throttle
}
}
t.imp = nil // It needs to update to get a pointer to the new info
t.table[*info.getNodeID()] = info
}
// Insert a peer into the table if it hasn't been pinged lately, to keep peers from dropping
func (t *dht) insertPeer(info *dhtInfo) {
t.insert(info) // FIXME this resets timers / ping counts / etc, so it seems kind of dangerous
t.ping(info, nil) // This is a quick fix to the above, ping them immediately...
}
// Return true if first/second/third are (partially) ordered correctly.
func dht_ordered(first, second, third *crypto.NodeID) bool {
lessOrEqual := func(first, second *crypto.NodeID) bool {
for idx := 0; idx < crypto.NodeIDLen; idx++ {
if first[idx] > second[idx] {
return false
}
if first[idx] < second[idx] {
return true
}
}
return true
}
firstLessThanSecond := lessOrEqual(first, second)
secondLessThanThird := lessOrEqual(second, third)
thirdLessThanFirst := lessOrEqual(third, first)
switch {
case firstLessThanSecond && secondLessThanThird:
// Nothing wrapped around 0, the easy case
return true
case thirdLessThanFirst && firstLessThanSecond:
// Third wrapped around 0
return true
case secondLessThanThird && thirdLessThanFirst:
// Second (and third) wrapped around 0
return true
}
return false
}
// Reads a request, performs a lookup, and responds.
// Update info about the node that sent the request.
func (t *dht) handleReq(req *dhtReq) {
// Send them what they asked for
res := dhtRes{
Key: t.router.core.boxPub,
Coords: t.router.table.self.getCoords(),
Dest: req.Dest,
Infos: t.lookup(&req.Dest, false),
}
t.sendRes(&res, req)
// Also add them to our DHT
info := dhtInfo{
key: req.Key,
coords: req.Coords,
}
if _, isIn := t.table[*info.getNodeID()]; !isIn && t.isImportant(&info) {
t.ping(&info, nil)
}
// Maybe mark nodes from lookup as dirty
if req.Dest != *info.getNodeID() {
// This node asked about someone other than themself, so this wasn't just idle traffic.
for _, info := range res.Infos {
// Mark nodes dirty so we're sure to check up on them again later
info.dirty = true
}
}
}
// Sends a lookup response to the specified node.
func (t *dht) sendRes(res *dhtRes, req *dhtReq) {
// Send a reply for a dhtReq
bs := res.encode()
shared := t.router.sessions.getSharedKey(&t.router.core.boxPriv, &req.Key)
payload, nonce := crypto.BoxSeal(shared, bs, nil)
p := wire_protoTrafficPacket{
Coords: req.Coords,
ToKey: req.Key,
FromKey: t.router.core.boxPub,
Nonce: *nonce,
Payload: payload,
}
packet := p.encode()
t.router.out(packet)
}
type dht_callbackInfo struct {
f func(*dhtRes)
time time.Time
}
// Adds a callback and removes it after some timeout.
func (t *dht) addCallback(rq *dhtReqKey, callback func(*dhtRes)) {
info := dht_callbackInfo{callback, time.Now().Add(6 * time.Second)}
t.callbacks[*rq] = append(t.callbacks[*rq], info)
}
// Reads a lookup response, checks that we had sent a matching request, and processes the response info.
// This mainly consists of updating the node we asked in our DHT (they responded, so we know they're still alive), and deciding if we want to do anything with their responses
func (t *dht) handleRes(res *dhtRes) {
rq := dhtReqKey{res.Key, res.Dest}
if callbacks, isIn := t.callbacks[rq]; isIn {
for _, callback := range callbacks {
callback.f(res)
}
delete(t.callbacks, rq)
}
_, isIn := t.reqs[rq]
if !isIn {
return
}
delete(t.reqs, rq)
rinfo := dhtInfo{
key: res.Key,
coords: res.Coords,
}
if t.isImportant(&rinfo) {
t.insert(&rinfo)
}
for _, info := range res.Infos {
if *info.getNodeID() == t.nodeID {
continue
} // Skip self
if _, isIn := t.table[*info.getNodeID()]; isIn {
// TODO? don't skip if coords are different?
continue
}
if t.isImportant(info) {
t.ping(info, nil)
}
}
}
// Sends a lookup request to the specified node.
func (t *dht) sendReq(req *dhtReq, dest *dhtInfo) {
// Send a dhtReq to the node in dhtInfo
bs := req.encode()
shared := t.router.sessions.getSharedKey(&t.router.core.boxPriv, &dest.key)
payload, nonce := crypto.BoxSeal(shared, bs, nil)
p := wire_protoTrafficPacket{
Coords: dest.coords,
ToKey: dest.key,
FromKey: t.router.core.boxPub,
Nonce: *nonce,
Payload: payload,
}
packet := p.encode()
t.router.out(packet)
rq := dhtReqKey{dest.key, req.Dest}
t.reqs[rq] = time.Now()
}
// Sends a lookup to this info, looking for the target.
func (t *dht) ping(info *dhtInfo, target *crypto.NodeID) {
// Creates a req for the node at dhtInfo, asking them about the target (if one is given) or themself (if no target is given)
if target == nil {
target = &t.nodeID
}
req := dhtReq{
Key: t.router.core.boxPub,
Coords: t.router.table.self.getCoords(),
Dest: *target,
}
t.sendReq(&req, info)
}
// Periodic maintenance work to keep important DHT nodes alive.
func (t *dht) doMaintenance() {
now := time.Now()
newReqs := make(map[dhtReqKey]time.Time, len(t.reqs))
for key, start := range t.reqs {
if now.Sub(start) < 6*time.Second {
newReqs[key] = start
}
}
t.reqs = newReqs
newCallbacks := make(map[dhtReqKey][]dht_callbackInfo, len(t.callbacks))
for key, cs := range t.callbacks {
for _, c := range cs {
if now.Before(c.time) {
newCallbacks[key] = append(newCallbacks[key], c)
} else {
// Signal failure
c.f(nil)
}
}
}
t.callbacks = newCallbacks
for infoID, info := range t.table {
switch {
case info.pings > 6:
// It failed to respond to too many pings
fallthrough
case now.Sub(info.recv) > dht_timeout:
// It's too old
fallthrough
case info.dirty && now.Sub(info.recv) > dht_max_delay_dirty && !t.isImportant(info):
// We won't ping it to refresh it, so just drop it
delete(t.table, infoID)
t.imp = nil
}
}
for _, info := range t.getImportant() {
switch {
case now.Sub(info.recv) > info.throttle:
info.throttle *= 2
if info.throttle < time.Second {
info.throttle = time.Second
} else if info.throttle > dht_max_delay {
info.throttle = dht_max_delay
}
fallthrough
case info.dirty && now.Sub(info.recv) > dht_max_delay_dirty:
t.ping(info, nil)
info.pings++
}
}
}
// Gets a list of important nodes, used by isImportant.
func (t *dht) getImportant() []*dhtInfo {
if t.imp == nil {
// Get a list of all known nodes
infos := make([]*dhtInfo, 0, len(t.table))
for _, info := range t.table {
infos = append(infos, info)
}
// Sort them by increasing order in distance along the ring
sort.SliceStable(infos, func(i, j int) bool {
// Sort in order of predecessors (!), reverse from chord normal, because it plays nicer with zero bits for unknown parts of target addresses
return dht_ordered(infos[j].getNodeID(), infos[i].getNodeID(), &t.nodeID)
})
// Keep the ones that are no further than the closest seen so far
minDist := ^uint64(0)
loc := t.router.table.self
important := infos[:0]
for _, info := range infos {
dist := uint64(loc.dist(info.coords))
if dist < minDist {
minDist = dist
important = append(important, info)
} else if len(important) < 2 {
important = append(important, info)
}
}
var temp []*dhtInfo
minDist = ^uint64(0)
for idx := len(infos) - 1; idx >= 0; idx-- {
info := infos[idx]
dist := uint64(loc.dist(info.coords))
if dist < minDist {
minDist = dist
temp = append(temp, info)
} else if len(temp) < 2 {
temp = append(temp, info)
}
}
for idx := len(temp) - 1; idx >= 0; idx-- {
important = append(important, temp[idx])
}
t.imp = important
}
return t.imp
}
// Returns true if this is a node we need to keep track of for the DHT to work.
func (t *dht) isImportant(ninfo *dhtInfo) bool {
if ninfo.key == t.router.core.boxPub {
return false
}
important := t.getImportant()
// Check if ninfo is of equal or greater importance to what we already know
loc := t.router.table.self
ndist := uint64(loc.dist(ninfo.coords))
minDist := ^uint64(0)
for _, info := range important {
if (*info.getNodeID() == *ninfo.getNodeID()) ||
(ndist < minDist && dht_ordered(info.getNodeID(), ninfo.getNodeID(), &t.nodeID)) {
// Either the same node, or a better one
return true
}
dist := uint64(loc.dist(info.coords))
if dist < minDist {
minDist = dist
}
}
minDist = ^uint64(0)
for idx := len(important) - 1; idx >= 0; idx-- {
info := important[idx]
if (*info.getNodeID() == *ninfo.getNodeID()) ||
(ndist < minDist && dht_ordered(&t.nodeID, ninfo.getNodeID(), info.getNodeID())) {
// Either the same node, or a better one
return true
}
dist := uint64(loc.dist(info.coords))
if dist < minDist {
minDist = dist
}
}
// We didn't find any important node that ninfo is better than
return false
}

View File

@ -1,120 +0,0 @@
package yggdrasil
import (
"context"
"encoding/hex"
"errors"
"net"
"strconv"
"strings"
"time"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
)
// Dialer represents an Yggdrasil connection dialer.
type Dialer struct {
core *Core
}
// Dial opens a session to the given node. The first parameter should be
// "curve25519" or "nodeid" and the second parameter should contain a
// hexadecimal representation of the target. It uses DialContext internally.
func (d *Dialer) Dial(network, address string) (net.Conn, error) {
return d.DialContext(nil, network, address)
}
// DialContext is used internally by Dial, and should only be used with a
// context that includes a timeout. It uses DialByNodeIDandMask internally when
// the network is "nodeid", or DialByPublicKey when the network is "curve25519".
func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
var nodeID crypto.NodeID
var nodeMask crypto.NodeID
// Process
switch network {
case "curve25519":
dest, err := hex.DecodeString(address)
if err != nil {
return nil, err
}
if len(dest) != crypto.BoxPubKeyLen {
return nil, errors.New("invalid key length supplied")
}
var pubKey crypto.BoxPubKey
copy(pubKey[:], dest)
return d.DialByPublicKey(ctx, &pubKey)
case "nodeid":
// A node ID was provided - we don't need to do anything special with it
if tokens := strings.Split(address, "/"); len(tokens) == 2 {
l, err := strconv.Atoi(tokens[1])
if err != nil {
return nil, err
}
dest, err := hex.DecodeString(tokens[0])
if err != nil {
return nil, err
}
copy(nodeID[:], dest)
for idx := 0; idx < l; idx++ {
nodeMask[idx/8] |= 0x80 >> byte(idx%8)
}
} else {
dest, err := hex.DecodeString(tokens[0])
if err != nil {
return nil, err
}
copy(nodeID[:], dest)
for i := range nodeMask {
nodeMask[i] = 0xFF
}
}
return d.DialByNodeIDandMask(ctx, &nodeID, &nodeMask)
default:
// An unexpected address type was given, so give up
return nil, errors.New("unexpected address type")
}
}
// DialByNodeIDandMask opens a session to the given node based on raw NodeID
// parameters. If ctx is nil or has no timeout, then a default timeout of 6
// seconds will apply, beginning *after* the search finishes.
func (d *Dialer) DialByNodeIDandMask(ctx context.Context, nodeID, nodeMask *crypto.NodeID) (net.Conn, error) {
startDial := time.Now()
conn := newConn(d.core, nodeID, nodeMask, nil)
if err := conn.search(); err != nil {
// TODO: make searches take a context, so they can be cancelled early
conn.Close()
return nil, err
}
endSearch := time.Now()
d.core.log.Debugln("Dial searched for:", nodeID, "in time:", endSearch.Sub(startDial))
conn.session.setConn(nil, conn)
var cancel context.CancelFunc
if ctx == nil {
ctx = context.Background()
}
ctx, cancel = context.WithTimeout(ctx, 6*time.Second)
defer cancel()
select {
case <-conn.session.init:
endInit := time.Now()
d.core.log.Debugln("Dial initialized session for:", nodeID, "in time:", endInit.Sub(endSearch))
d.core.log.Debugln("Finished dial for:", nodeID, "in time:", endInit.Sub(startDial))
return conn, nil
case <-ctx.Done():
conn.Close()
return nil, errors.New("session handshake timeout")
}
}
// DialByPublicKey opens a session to the given node based on the public key. If
// ctx is nil or has no timeout, then a default timeout of 6 seconds will apply,
// beginning *after* the search finishes.
func (d *Dialer) DialByPublicKey(ctx context.Context, pubKey *crypto.BoxPubKey) (net.Conn, error) {
nodeID := crypto.GetNodeID(pubKey)
var nodeMask crypto.NodeID
for i := range nodeMask {
nodeMask[i] = 0xFF
}
return d.DialByNodeIDandMask(ctx, nodeID, &nodeMask)
}

View File

@ -1,176 +0,0 @@
/*
Package yggdrasil implements the core functionality of the Yggdrasil Network.
Introduction
Yggdrasil is a proof-of-concept mesh network which provides end-to-end encrypted
communication between nodes in a decentralised fashion. The network is arranged
using a globally-agreed spanning tree which provides each node with a locator
(coordinates relative to the root) and a distributed hash table (DHT) mechanism
for finding other nodes.
Each node also implements a router, which is responsible for encryption of
traffic, searches and connections, and a switch, which is responsible ultimately
for forwarding traffic across the network.
While many Yggdrasil nodes in existence today are IP nodes - that is, they are
transporting IPv6 packets, like a kind of mesh VPN - it is also possible to
integrate Yggdrasil into your own applications and use it as a generic data
transport, similar to UDP.
This library is what you need to integrate and use Yggdrasil in your own
application.
Basics
In order to start an Yggdrasil node, you should start by generating node
configuration, which amongst other things, includes encryption keypairs which
are used to generate the node's identity, and supply a logger which Yggdrasil's
output will be written to.
This may look something like this:
import (
"os"
"github.com/gologme/log"
"github.com/yggdrasil-network/yggdrasil-go/src/config"
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
)
type node struct {
core yggdrasil.Core
config *config.NodeConfig
log *log.Logger
}
You then can supply node configuration and a logger:
n := node{}
n.log = log.New(os.Stdout, "", log.Flags())
n.config = config.GenerateConfig()
In the above example, we ask the config package to supply new configuration each
time, which results in fresh encryption keys and therefore a new identity. It is
normally preferable in most cases to persist node configuration onto the
filesystem or into some configuration store so that the node's identity does not
change each time that the program starts. Note that Yggdrasil will automatically
fill in any missing configuration items with sane defaults.
Once you have supplied a logger and some node configuration, you can then start
the node:
n.core.Start(n.config, n.log)
Add some peers to connect to the network:
n.core.AddPeer("tcp://some-host.net:54321", "")
n.core.AddPeer("tcp://[2001::1:2:3]:54321", "")
n.core.AddPeer("tcp://1.2.3.4:54321", "")
You can also ask the API for information about our node:
n.log.Println("My node ID is", n.core.NodeID())
n.log.Println("My public key is", n.core.EncryptionPublicKey())
n.log.Println("My coords are", n.core.Coords())
Incoming Connections
Once your node is started, you can then listen for connections from other nodes
by asking the API for a Listener:
listener, err := n.core.ConnListen()
if err != nil {
// ...
}
The Listener has a blocking Accept function which will wait for incoming
connections from remote nodes. It will return a Conn when a connection is
received. If the node never receives any incoming connections then this function
can block forever, so be prepared for that, perhaps by listening in a separate
goroutine.
Assuming that you have defined a myConnectionHandler function to deal with
incoming connections:
for {
conn, err := listener.Accept()
if err != nil {
// ...
}
// We've got a new connection
go myConnectionHandler(conn)
}
Outgoing Connections
If you know the node ID of the remote node that you want to talk to, you can
dial an outbound connection to it. To do this, you should first ask the API for
a Dialer:
dialer, err := n.core.ConnDialer()
if err != nil {
// ...
}
You can then dial using the node's public key in hexadecimal format, for example:
conn, err := dialer.Dial("curve25519", "55071be281f50d0abbda63aadc59755624280c44b2f1f47684317aa4e0325604")
if err != nil {
// ...
}
Using Connections
Conn objects are implementations of io.ReadWriteCloser, and as such, you can
Read, Write and Close them as necessary.
Each Read or Write operation can deal with a buffer with a maximum size of 65535
bytes - any bigger than this and the operation will return an error.
For example, to write to the Conn from the supplied buffer:
buf := []byte{1, 2, 3, 4, 5}
w, err := conn.Write(buf)
if err != nil {
// ...
} else {
// written w bytes
}
Reading from the Conn into the supplied buffer:
buf := make([]byte, 65535)
r, err := conn.Read(buf)
if err != nil {
// ...
} else {
// read r bytes
}
When you are happy that a connection is no longer required, you can discard it:
err := conn.Close()
if err != nil {
// ...
}
Limitations
You should be aware of the following limitations when working with the Yggdrasil
library:
Individual messages written through Yggdrasil connections can not exceed 65535
bytes in size. Yggdrasil has no concept of fragmentation, so if you try to send
a message that exceeds 65535 bytes in size, it will be dropped altogether and
an error will be returned.
Yggdrasil connections are unreliable by nature. Messages are delivered on a
best-effort basis, and employs congestion control where appropriate to ensure
that congestion does not affect message transport, but Yggdrasil will not
retransmit any messages that have been lost. If reliable delivery is important
then you should manually implement acknowledgement and retransmission of
messages.
*/
package yggdrasil

View File

@ -1,540 +0,0 @@
package yggdrasil
import (
"encoding/hex"
"errors"
"fmt"
"io"
"net"
"net/url"
"strings"
"sync"
//"sync/atomic"
"time"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/yggdrasil-network/yggdrasil-go/src/util"
"golang.org/x/net/proxy"
"github.com/Arceliar/phony"
)
type links struct {
core *Core
mutex sync.RWMutex // protects links below
links map[linkInfo]*link
tcp tcp // TCP interface support
stopped chan struct{}
// TODO timeout (to remove from switch), read from config.ReadTimeout
}
type linkInfo struct {
box crypto.BoxPubKey // Their encryption key
sig crypto.SigPubKey // Their signing key
linkType string // Type of link, e.g. TCP, AWDL
local string // Local name or address
remote string // Remote name or address
}
type linkMsgIO interface {
readMsg() ([]byte, error)
writeMsgs([][]byte) (int, error)
close() error
// These are temporary workarounds to stream semantics
_sendMetaBytes([]byte) error
_recvMetaBytes() ([]byte, error)
}
type link struct {
lname string
links *links
peer *peer
options linkOptions
msgIO linkMsgIO
info linkInfo
incoming bool
force bool
closed chan struct{}
reader linkReader // Reads packets, notifies this link, passes packets to switch
writer linkWriter // Writes packets, notifies this link
phony.Inbox // Protects the below
sendTimer *time.Timer // Fires to signal that sending is blocked
keepAliveTimer *time.Timer // Fires to send keep-alive traffic
stallTimer *time.Timer // Fires to signal that no incoming traffic (including keep-alive) has been seen
closeTimer *time.Timer // Fires when the link has been idle so long we need to close it
readUnblocked bool // True if we've sent a read message unblocking this peer in the switch
writeUnblocked bool // True if we've sent a write message unblocking this peer in the swithc
shutdown bool // True if we're shutting down, avoids sending some messages that could race with new peers being crated in the same port
}
type linkOptions struct {
pinnedCurve25519Keys map[crypto.BoxPubKey]struct{}
pinnedEd25519Keys map[crypto.SigPubKey]struct{}
}
func (l *links) init(c *Core) error {
l.core = c
l.mutex.Lock()
l.links = make(map[linkInfo]*link)
l.mutex.Unlock()
l.stopped = make(chan struct{})
if err := l.tcp.init(l); err != nil {
c.log.Errorln("Failed to start TCP interface")
return err
}
return nil
}
func (l *links) reconfigure() {
l.tcp.reconfigure()
}
func (l *links) call(uri string, sintf string) error {
u, err := url.Parse(uri)
if err != nil {
return fmt.Errorf("peer %s is not correctly formatted (%s)", uri, err)
}
pathtokens := strings.Split(strings.Trim(u.Path, "/"), "/")
tcpOpts := tcpOptions{}
if pubkeys, ok := u.Query()["curve25519"]; ok && len(pubkeys) > 0 {
tcpOpts.pinnedCurve25519Keys = make(map[crypto.BoxPubKey]struct{})
for _, pubkey := range pubkeys {
if boxPub, err := hex.DecodeString(pubkey); err == nil {
var boxPubKey crypto.BoxPubKey
copy(boxPubKey[:], boxPub)
tcpOpts.pinnedCurve25519Keys[boxPubKey] = struct{}{}
}
}
}
if pubkeys, ok := u.Query()["ed25519"]; ok && len(pubkeys) > 0 {
tcpOpts.pinnedEd25519Keys = make(map[crypto.SigPubKey]struct{})
for _, pubkey := range pubkeys {
if sigPub, err := hex.DecodeString(pubkey); err == nil {
var sigPubKey crypto.SigPubKey
copy(sigPubKey[:], sigPub)
tcpOpts.pinnedEd25519Keys[sigPubKey] = struct{}{}
}
}
}
switch u.Scheme {
case "tcp":
l.tcp.call(u.Host, tcpOpts, sintf)
case "socks":
tcpOpts.socksProxyAddr = u.Host
if u.User != nil {
tcpOpts.socksProxyAuth = &proxy.Auth{}
tcpOpts.socksProxyAuth.User = u.User.Username()
tcpOpts.socksProxyAuth.Password, _ = u.User.Password()
}
l.tcp.call(pathtokens[0], tcpOpts, sintf)
case "tls":
tcpOpts.upgrade = l.tcp.tls.forDialer
l.tcp.call(u.Host, tcpOpts, sintf)
default:
return errors.New("unknown call scheme: " + u.Scheme)
}
return nil
}
func (l *links) listen(uri string) error {
u, err := url.Parse(uri)
if err != nil {
return fmt.Errorf("listener %s is not correctly formatted (%s)", uri, err)
}
switch u.Scheme {
case "tcp":
_, err := l.tcp.listen(u.Host, nil)
return err
case "tls":
_, err := l.tcp.listen(u.Host, l.tcp.tls.forListener)
return err
default:
return errors.New("unknown listen scheme: " + u.Scheme)
}
}
func (l *links) create(msgIO linkMsgIO, name, linkType, local, remote string, incoming, force bool, options linkOptions) (*link, error) {
// Technically anything unique would work for names, but let's pick something human readable, just for debugging
intf := link{
lname: name,
links: l,
options: options,
msgIO: msgIO,
info: linkInfo{
linkType: linkType,
local: local,
remote: remote,
},
incoming: incoming,
force: force,
}
intf.writer.intf = &intf
intf.writer.worker = make(chan [][]byte, 1)
intf.reader.intf = &intf
intf.reader.err = make(chan error)
return &intf, nil
}
func (l *links) stop() error {
close(l.stopped)
if err := l.tcp.stop(); err != nil {
return err
}
return nil
}
func (intf *link) handler() (chan struct{}, error) {
// TODO split some of this into shorter functions, so it's easier to read, and for the FIXME duplicate peer issue mentioned later
go func() {
for bss := range intf.writer.worker {
intf.msgIO.writeMsgs(bss)
}
}()
defer intf.writer.Act(nil, func() {
intf.writer.closed = true
close(intf.writer.worker)
})
myLinkPub, myLinkPriv := crypto.NewBoxKeys()
meta := version_getBaseMetadata()
meta.box = intf.links.core.boxPub
meta.sig = intf.links.core.sigPub
meta.link = *myLinkPub
metaBytes := meta.encode()
// TODO timeouts on send/recv (goroutine for send/recv, channel select w/ timer)
var err error
if !util.FuncTimeout(func() { err = intf.msgIO._sendMetaBytes(metaBytes) }, 30*time.Second) {
return nil, errors.New("timeout on metadata send")
}
if err != nil {
return nil, err
}
if !util.FuncTimeout(func() { metaBytes, err = intf.msgIO._recvMetaBytes() }, 30*time.Second) {
return nil, errors.New("timeout on metadata recv")
}
if err != nil {
return nil, err
}
meta = version_metadata{}
if !meta.decode(metaBytes) || !meta.check() {
return nil, errors.New("failed to decode metadata")
}
base := version_getBaseMetadata()
if meta.ver > base.ver || meta.ver == base.ver && meta.minorVer > base.minorVer {
intf.links.core.log.Errorln("Failed to connect to node: " + intf.lname + " version: " + fmt.Sprintf("%d.%d", meta.ver, meta.minorVer))
return nil, errors.New("failed to connect: wrong version")
}
// Check if the remote side matches the keys we expected. This is a bit of a weak
// check - in future versions we really should check a signature or something like that.
if pinned := intf.options.pinnedCurve25519Keys; pinned != nil {
if _, allowed := pinned[meta.box]; !allowed {
intf.links.core.log.Errorf("Failed to connect to node: %q sent curve25519 key that does not match pinned keys", intf.name)
return nil, fmt.Errorf("failed to connect: host sent curve25519 key that does not match pinned keys")
}
}
if pinned := intf.options.pinnedEd25519Keys; pinned != nil {
if _, allowed := pinned[meta.sig]; !allowed {
intf.links.core.log.Errorf("Failed to connect to node: %q sent ed25519 key that does not match pinned keys", intf.name)
return nil, fmt.Errorf("failed to connect: host sent ed25519 key that does not match pinned keys")
}
}
// Check if we're authorized to connect to this key / IP
if intf.incoming && !intf.force && !intf.links.core.peers.isAllowedEncryptionPublicKey(&meta.box) {
intf.links.core.log.Warnf("%s connection from %s forbidden: AllowedEncryptionPublicKeys does not contain key %s",
strings.ToUpper(intf.info.linkType), intf.info.remote, hex.EncodeToString(meta.box[:]))
intf.msgIO.close()
return nil, nil
}
// Check if we already have a link to this node
intf.info.box = meta.box
intf.info.sig = meta.sig
intf.links.mutex.Lock()
if oldIntf, isIn := intf.links.links[intf.info]; isIn {
intf.links.mutex.Unlock()
// FIXME we should really return an error and let the caller block instead
// That lets them do things like close connections on its own, avoid printing a connection message in the first place, etc.
intf.links.core.log.Debugln("DEBUG: found existing interface for", intf.name)
intf.msgIO.close()
return oldIntf.closed, nil
} else {
intf.closed = make(chan struct{})
intf.links.links[intf.info] = intf
defer func() {
intf.links.mutex.Lock()
delete(intf.links.links, intf.info)
intf.links.mutex.Unlock()
close(intf.closed)
}()
intf.links.core.log.Debugln("DEBUG: registered interface for", intf.name)
}
intf.links.mutex.Unlock()
// Create peer
shared := crypto.GetSharedKey(myLinkPriv, &meta.link)
phony.Block(&intf.links.core.peers, func() {
// FIXME don't use phony.Block, it's bad practice, even if it's safe here
intf.peer = intf.links.core.peers._newPeer(&meta.box, &meta.sig, shared, intf)
})
if intf.peer == nil {
return nil, errors.New("failed to create peer")
}
defer func() {
// More cleanup can go here
intf.Act(nil, func() {
intf.shutdown = true
intf.peer.Act(intf, intf.peer._removeSelf)
})
}()
themAddr := address.AddrForNodeID(crypto.GetNodeID(&intf.info.box))
themAddrString := net.IP(themAddr[:]).String()
themString := fmt.Sprintf("%s@%s", themAddrString, intf.info.remote)
intf.links.core.log.Infof("Connected %s: %s, source %s",
strings.ToUpper(intf.info.linkType), themString, intf.info.local)
// Start things
go intf.peer.start()
intf.Act(nil, intf._notifyIdle)
intf.reader.Act(nil, intf.reader._read)
// Wait for the reader to finish
// TODO find a way to do this without keeping live goroutines around
done := make(chan struct{})
defer close(done)
go func() {
select {
case <-intf.links.stopped:
intf.msgIO.close()
case <-done:
}
}()
err = <-intf.reader.err
// TODO don't report an error if it's just a 'use of closed network connection'
if err != nil {
intf.links.core.log.Infof("Disconnected %s: %s, source %s; error: %s",
strings.ToUpper(intf.info.linkType), themString, intf.info.local, err)
} else {
intf.links.core.log.Infof("Disconnected %s: %s, source %s",
strings.ToUpper(intf.info.linkType), themString, intf.info.local)
}
return nil, err
}
////////////////////////////////////////////////////////////////////////////////
// link needs to match the linkInterface type needed by the peers
type linkInterface interface {
out([][]byte)
linkOut([]byte)
close()
// These next ones are only used by the API
name() string
local() string
remote() string
interfaceType() string
}
func (intf *link) out(bss [][]byte) {
intf.Act(nil, func() {
// nil to prevent it from blocking if the link is somehow frozen
// this is safe because another packet won't be sent until the link notifies
// the peer that it's ready for one
intf.writer.sendFrom(nil, bss)
})
}
func (intf *link) linkOut(bs []byte) {
intf.Act(nil, func() {
// nil to prevent it from blocking if the link is somehow frozen
// FIXME this is hypothetically not safe, the peer shouldn't be sending
// additional packets until this one finishes, otherwise this could leak
// memory if writing happens slower than link packets are generated...
// that seems unlikely, so it's a lesser evil than deadlocking for now
intf.writer.sendFrom(nil, [][]byte{bs})
})
}
func (intf *link) close() {
intf.Act(nil, func() { intf.msgIO.close() })
}
func (intf *link) name() string {
return intf.lname
}
func (intf *link) local() string {
return intf.info.local
}
func (intf *link) remote() string {
return intf.info.remote
}
func (intf *link) interfaceType() string {
return intf.info.linkType
}
////////////////////////////////////////////////////////////////////////////////
const (
sendTime = 1 * time.Second // How long to wait before deciding a send is blocked
keepAliveTime = 2 * time.Second // How long to wait before sending a keep-alive response if we have no real traffic to send
stallTime = 6 * time.Second // How long to wait for response traffic before deciding the connection has stalled
closeTime = 2 * switch_timeout // How long to wait before closing the link
)
// notify the intf that we're currently sending
func (intf *link) notifySending(size int) {
intf.Act(&intf.writer, func() {
intf.sendTimer = time.AfterFunc(sendTime, intf.notifyBlockedSend)
if intf.keepAliveTimer != nil {
intf.keepAliveTimer.Stop()
intf.keepAliveTimer = nil
}
intf.peer.notifyBlocked(intf)
})
}
// This gets called from a time.AfterFunc, and notifies the switch that we appear
// to have gotten blocked on a write, so the switch should start routing traffic
// through other links, if alternatives exist
func (intf *link) notifyBlockedSend() {
intf.Act(nil, func() {
if intf.sendTimer != nil {
//As far as we know, we're still trying to send, and the timer fired.
intf.sendTimer.Stop()
intf.sendTimer = nil
if !intf.shutdown && intf.writeUnblocked {
intf.writeUnblocked = false
intf.links.core.switchTable.blockPeer(intf, intf.peer.port, true)
}
}
})
}
// notify the intf that we've finished sending, returning the peer to the switch
func (intf *link) notifySent(size int) {
intf.Act(&intf.writer, func() {
if intf.sendTimer != nil {
intf.sendTimer.Stop()
intf.sendTimer = nil
}
if intf.keepAliveTimer != nil {
// TODO? unset this when we start sending, not when we finish...
intf.keepAliveTimer.Stop()
intf.keepAliveTimer = nil
}
intf._notifyIdle()
if size > 0 && intf.stallTimer == nil {
intf.stallTimer = time.AfterFunc(stallTime, intf.notifyStalled)
}
if !intf.shutdown && !intf.writeUnblocked {
intf.writeUnblocked = true
intf.links.core.switchTable.unblockPeer(intf, intf.peer.port, true)
}
})
}
// Notify the peer that we're ready for more traffic
func (intf *link) _notifyIdle() {
intf.peer.Act(intf, intf.peer._handleIdle)
}
// Set the peer as stalled, to prevent them from returning to the switch until a read succeeds
func (intf *link) notifyStalled() {
intf.Act(nil, func() { // Sent from a time.AfterFunc
if intf.stallTimer != nil {
intf.stallTimer.Stop()
intf.stallTimer = nil
if !intf.shutdown && intf.readUnblocked {
intf.readUnblocked = false
intf.links.core.switchTable.blockPeer(intf, intf.peer.port, false)
}
}
})
}
// reset the close timer
func (intf *link) notifyReading() {
intf.Act(&intf.reader, func() {
intf.closeTimer = time.AfterFunc(closeTime, func() { intf.msgIO.close() })
})
}
// wake up the link if it was stalled, and (if size > 0) prepare to send keep-alive traffic
func (intf *link) notifyRead(size int) {
intf.Act(&intf.reader, func() {
intf.closeTimer.Stop()
if intf.stallTimer != nil {
intf.stallTimer.Stop()
intf.stallTimer = nil
}
if size > 0 && intf.keepAliveTimer == nil {
intf.keepAliveTimer = time.AfterFunc(keepAliveTime, intf.notifyDoKeepAlive)
}
if !intf.shutdown && !intf.readUnblocked {
intf.readUnblocked = true
intf.links.core.switchTable.unblockPeer(intf, intf.peer.port, false)
}
})
}
// We need to send keep-alive traffic now
func (intf *link) notifyDoKeepAlive() {
intf.Act(nil, func() { // Sent from a time.AfterFunc
if intf.keepAliveTimer != nil {
intf.keepAliveTimer.Stop()
intf.keepAliveTimer = nil
intf.writer.sendFrom(nil, [][]byte{nil}) // Empty keep-alive traffic
}
})
}
////////////////////////////////////////////////////////////////////////////////
type linkWriter struct {
phony.Inbox
intf *link
worker chan [][]byte
closed bool
}
func (w *linkWriter) sendFrom(from phony.Actor, bss [][]byte) {
w.Act(from, func() {
if w.closed {
return
}
var size int
for _, bs := range bss {
size += len(bs)
}
w.intf.notifySending(size)
w.worker <- bss
w.intf.notifySent(size)
})
}
////////////////////////////////////////////////////////////////////////////////
type linkReader struct {
phony.Inbox
intf *link
err chan error
}
func (r *linkReader) _read() {
r.intf.notifyReading()
msg, err := r.intf.msgIO.readMsg()
r.intf.notifyRead(len(msg))
if len(msg) > 0 {
r.intf.peer.handlePacketFrom(r, msg)
}
if err != nil {
if err != io.EOF {
r.err <- err
}
close(r.err)
return
}
// Now try to read again
r.Act(nil, r._read)
}

View File

@ -1,45 +0,0 @@
package yggdrasil
import (
"errors"
"net"
)
// Listener waits for incoming sessions
type Listener struct {
core *Core
conn chan *Conn
close chan interface{}
}
// Accept blocks until a new incoming session is received
func (l *Listener) Accept() (net.Conn, error) {
select {
case c, ok := <-l.conn:
if !ok {
return nil, errors.New("listener closed")
}
return c, nil
case <-l.close:
return nil, errors.New("listener closed")
}
}
// Close will stop the listener
func (l *Listener) Close() (err error) {
defer func() {
recover()
err = errors.New("already closed")
}()
if l.core.router.sessions.listener == l {
l.core.router.sessions.listener = nil
}
close(l.close)
close(l.conn)
return nil
}
// Addr returns the address of the listener
func (l *Listener) Addr() net.Addr {
return &l.core.boxPub
}

View File

@ -1,209 +0,0 @@
package yggdrasil
import (
"encoding/json"
"errors"
"runtime"
"strings"
"time"
"github.com/Arceliar/phony"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/yggdrasil-network/yggdrasil-go/src/version"
)
type nodeinfo struct {
phony.Inbox
core *Core
myNodeInfo NodeInfoPayload
callbacks map[crypto.BoxPubKey]nodeinfoCallback
cache map[crypto.BoxPubKey]nodeinfoCached
table *lookupTable
}
type nodeinfoCached struct {
payload NodeInfoPayload
created time.Time
}
type nodeinfoCallback struct {
call func(nodeinfo *NodeInfoPayload)
created time.Time
}
// Represents a session nodeinfo packet.
type nodeinfoReqRes struct {
SendPermPub crypto.BoxPubKey // Sender's permanent key
SendCoords []byte // Sender's coords
IsResponse bool
NodeInfo NodeInfoPayload
}
// Initialises the nodeinfo cache/callback maps, and starts a goroutine to keep
// the cache/callback maps clean of stale entries
func (m *nodeinfo) init(core *Core) {
m.Act(nil, func() {
m._init(core)
})
}
func (m *nodeinfo) _init(core *Core) {
m.core = core
m.callbacks = make(map[crypto.BoxPubKey]nodeinfoCallback)
m.cache = make(map[crypto.BoxPubKey]nodeinfoCached)
m._cleanup()
}
func (m *nodeinfo) _cleanup() {
for boxPubKey, callback := range m.callbacks {
if time.Since(callback.created) > time.Minute {
delete(m.callbacks, boxPubKey)
}
}
for boxPubKey, cache := range m.cache {
if time.Since(cache.created) > time.Hour {
delete(m.cache, boxPubKey)
}
}
time.AfterFunc(time.Second*30, func() {
m.Act(nil, m._cleanup)
})
}
// Add a callback for a nodeinfo lookup
func (m *nodeinfo) addCallback(sender crypto.BoxPubKey, call func(nodeinfo *NodeInfoPayload)) {
m.Act(nil, func() {
m._addCallback(sender, call)
})
}
func (m *nodeinfo) _addCallback(sender crypto.BoxPubKey, call func(nodeinfo *NodeInfoPayload)) {
m.callbacks[sender] = nodeinfoCallback{
created: time.Now(),
call: call,
}
}
// Handles the callback, if there is one
func (m *nodeinfo) _callback(sender crypto.BoxPubKey, nodeinfo NodeInfoPayload) {
if callback, ok := m.callbacks[sender]; ok {
callback.call(&nodeinfo)
delete(m.callbacks, sender)
}
}
// Get the current node's nodeinfo
func (m *nodeinfo) getNodeInfo() (p NodeInfoPayload) {
phony.Block(m, func() {
p = m._getNodeInfo()
})
return
}
func (m *nodeinfo) _getNodeInfo() NodeInfoPayload {
return m.myNodeInfo
}
// Set the current node's nodeinfo
func (m *nodeinfo) setNodeInfo(given interface{}, privacy bool) (err error) {
phony.Block(m, func() {
err = m._setNodeInfo(given, privacy)
})
return
}
func (m *nodeinfo) _setNodeInfo(given interface{}, privacy bool) error {
defaults := map[string]interface{}{
"buildname": version.BuildName(),
"buildversion": version.BuildVersion(),
"buildplatform": runtime.GOOS,
"buildarch": runtime.GOARCH,
}
newnodeinfo := make(map[string]interface{})
if !privacy {
for k, v := range defaults {
newnodeinfo[k] = v
}
}
if nodeinfomap, ok := given.(map[string]interface{}); ok {
for key, value := range nodeinfomap {
if _, ok := defaults[key]; ok {
if strvalue, strok := value.(string); strok && strings.EqualFold(strvalue, "null") || value == nil {
delete(newnodeinfo, key)
}
continue
}
newnodeinfo[key] = value
}
}
newjson, err := json.Marshal(newnodeinfo)
if err == nil {
if len(newjson) > 16384 {
return errors.New("NodeInfo exceeds max length of 16384 bytes")
}
m.myNodeInfo = newjson
return nil
}
return err
}
// Add nodeinfo into the cache for a node
func (m *nodeinfo) _addCachedNodeInfo(key crypto.BoxPubKey, payload NodeInfoPayload) {
m.cache[key] = nodeinfoCached{
created: time.Now(),
payload: payload,
}
}
// Get a nodeinfo entry from the cache
func (m *nodeinfo) _getCachedNodeInfo(key crypto.BoxPubKey) (NodeInfoPayload, error) {
if nodeinfo, ok := m.cache[key]; ok {
return nodeinfo.payload, nil
}
return NodeInfoPayload{}, errors.New("No cache entry found")
}
// Handles a nodeinfo request/response - called from the router
func (m *nodeinfo) handleNodeInfo(from phony.Actor, nodeinfo *nodeinfoReqRes) {
m.Act(from, func() {
m._handleNodeInfo(nodeinfo)
})
}
func (m *nodeinfo) _handleNodeInfo(nodeinfo *nodeinfoReqRes) {
if nodeinfo.IsResponse {
m._callback(nodeinfo.SendPermPub, nodeinfo.NodeInfo)
m._addCachedNodeInfo(nodeinfo.SendPermPub, nodeinfo.NodeInfo)
} else {
m._sendNodeInfo(nodeinfo.SendPermPub, nodeinfo.SendCoords, true)
}
}
// Send nodeinfo request or response - called from the router
func (m *nodeinfo) sendNodeInfo(key crypto.BoxPubKey, coords []byte, isResponse bool) {
m.Act(nil, func() {
m._sendNodeInfo(key, coords, isResponse)
})
}
func (m *nodeinfo) _sendNodeInfo(key crypto.BoxPubKey, coords []byte, isResponse bool) {
loc := m.table.self
nodeinfo := nodeinfoReqRes{
SendCoords: loc.getCoords(),
IsResponse: isResponse,
NodeInfo: m._getNodeInfo(),
}
bs := nodeinfo.encode()
shared := m.core.router.sessions.getSharedKey(&m.core.boxPriv, &key)
payload, nonce := crypto.BoxSeal(shared, bs, nil)
p := wire_protoTrafficPacket{
Coords: coords,
ToKey: key,
FromKey: m.core.boxPub,
Nonce: *nonce,
Payload: payload,
}
packet := p.encode()
m.core.router.out(packet)
}

View File

@ -1,118 +0,0 @@
package yggdrasil
import (
"container/heap"
"time"
)
// TODO separate queues per e.g. traffic flow
// For now, we put everything in queue
type pqStreamID string
type pqPacketInfo struct {
packet []byte
time time.Time
}
type pqStream struct {
id pqStreamID
infos []pqPacketInfo
size uint64
}
type packetQueue struct {
streams []pqStream
size uint64
}
// drop will remove a packet from the queue, returning it to the pool
// returns true if a packet was removed, false otherwise
func (q *packetQueue) drop() bool {
if q.size == 0 {
return false
}
var longestIdx int
for idx := range q.streams {
if q.streams[idx].size > q.streams[longestIdx].size {
longestIdx = idx
}
}
stream := q.streams[longestIdx]
info := stream.infos[0]
if len(stream.infos) > 1 {
stream.infos = stream.infos[1:]
stream.size -= uint64(len(info.packet))
q.streams[longestIdx] = stream
q.size -= uint64(len(info.packet))
heap.Fix(q, longestIdx)
} else {
heap.Remove(q, longestIdx)
}
pool_putBytes(info.packet)
return true
}
func (q *packetQueue) push(packet []byte) {
id := pqStreamID(peer_getPacketCoords(packet)) // just coords for now
info := pqPacketInfo{packet: packet, time: time.Now()}
for idx := range q.streams {
if q.streams[idx].id == id {
q.streams[idx].infos = append(q.streams[idx].infos, info)
q.streams[idx].size += uint64(len(packet))
q.size += uint64(len(packet))
return
}
}
stream := pqStream{id: id, size: uint64(len(packet))}
stream.infos = append(stream.infos, info)
heap.Push(q, stream)
}
func (q *packetQueue) pop() ([]byte, bool) {
if q.size > 0 {
stream := q.streams[0]
info := stream.infos[0]
if len(stream.infos) > 1 {
stream.infos = stream.infos[1:]
stream.size -= uint64(len(info.packet))
q.streams[0] = stream
q.size -= uint64(len(info.packet))
heap.Fix(q, 0)
} else {
heap.Remove(q, 0)
}
return info.packet, true
}
return nil, false
}
////////////////////////////////////////////////////////////////////////////////
// Interface methods for packetQueue to satisfy heap.Interface
func (q *packetQueue) Len() int {
return len(q.streams)
}
func (q *packetQueue) Less(i, j int) bool {
return q.streams[i].infos[0].time.Before(q.streams[j].infos[0].time)
}
func (q *packetQueue) Swap(i, j int) {
q.streams[i], q.streams[j] = q.streams[j], q.streams[i]
}
func (q *packetQueue) Push(x interface{}) {
stream := x.(pqStream)
q.streams = append(q.streams, stream)
q.size += stream.size
}
func (q *packetQueue) Pop() interface{} {
idx := len(q.streams) - 1
stream := q.streams[idx]
q.streams = q.streams[:idx]
q.size -= stream.size
return stream
}

View File

@ -1,436 +0,0 @@
package yggdrasil
// TODO cleanup, this file is kind of a mess
// Commented code should be removed
// Live code should be better commented
import (
"encoding/hex"
"time"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/Arceliar/phony"
)
// The peers struct represents peers with an active connection.
// Incoming packets are passed to the corresponding peer, which handles them somehow.
// In most cases, this involves passing the packet to the handler for outgoing traffic to another peer.
// In other cases, its link protocol traffic is used to build the spanning tree, in which case this checks signatures and passes the message along to the switch.
type peers struct {
phony.Inbox
core *Core
ports map[switchPort]*peer // use CoW semantics, share updated version with each peer
table *lookupTable // Sent from switch, share updated version with each peer
}
// Initializes the peers struct.
func (ps *peers) init(c *Core) {
ps.core = c
ps.ports = make(map[switchPort]*peer)
ps.table = new(lookupTable)
}
func (ps *peers) reconfigure() {
// This is where reconfiguration would go, if we had anything to do
}
// Returns true if an incoming peer connection to a key is allowed, either
// because the key is in the whitelist or because the whitelist is empty.
func (ps *peers) isAllowedEncryptionPublicKey(box *crypto.BoxPubKey) bool {
boxstr := hex.EncodeToString(box[:])
ps.core.config.Mutex.RLock()
defer ps.core.config.Mutex.RUnlock()
for _, v := range ps.core.config.Current.AllowedEncryptionPublicKeys {
if v == boxstr {
return true
}
}
return len(ps.core.config.Current.AllowedEncryptionPublicKeys) == 0
}
// Adds a key to the whitelist.
func (ps *peers) addAllowedEncryptionPublicKey(box string) {
ps.core.config.Mutex.RLock()
defer ps.core.config.Mutex.RUnlock()
ps.core.config.Current.AllowedEncryptionPublicKeys =
append(ps.core.config.Current.AllowedEncryptionPublicKeys, box)
}
// Removes a key from the whitelist.
func (ps *peers) removeAllowedEncryptionPublicKey(box string) {
ps.core.config.Mutex.RLock()
defer ps.core.config.Mutex.RUnlock()
for k, v := range ps.core.config.Current.AllowedEncryptionPublicKeys {
if v == box {
ps.core.config.Current.AllowedEncryptionPublicKeys =
append(ps.core.config.Current.AllowedEncryptionPublicKeys[:k],
ps.core.config.Current.AllowedEncryptionPublicKeys[k+1:]...)
}
}
}
// Gets the whitelist of allowed keys for incoming connections.
func (ps *peers) getAllowedEncryptionPublicKeys() []string {
ps.core.config.Mutex.RLock()
defer ps.core.config.Mutex.RUnlock()
return ps.core.config.Current.AllowedEncryptionPublicKeys
}
// Information known about a peer, including their box/sig keys, precomputed shared keys (static and ephemeral) and a handler for their outgoing traffic
type peer struct {
phony.Inbox
core *Core
intf linkInterface
port switchPort
box crypto.BoxPubKey
sig crypto.SigPubKey
shared crypto.BoxSharedKey
linkShared crypto.BoxSharedKey
endpoint string
firstSeen time.Time // To track uptime for getPeers
dinfo *dhtInfo // used to keep the DHT working
// The below aren't actually useful internally, they're just gathered for getPeers statistics
bytesSent uint64
bytesRecvd uint64
ports map[switchPort]*peer
table *lookupTable
queue packetQueue
max uint64
seq uint64 // this and idle are used to detect when to drop packets from queue
idle bool
drop bool // set to true if we're dropping packets from the queue
}
func (ps *peers) updateTables(from phony.Actor, table *lookupTable) {
ps.Act(from, func() {
ps.table = table
ps._updatePeers()
})
}
func (ps *peers) _updatePeers() {
ports := ps.ports
table := ps.table
for _, peer := range ps.ports {
p := peer // peer is mutated during iteration
p.Act(ps, func() {
p.ports = ports
p.table = table
})
}
}
// Creates a new peer with the specified box, sig, and linkShared keys, using the lowest unoccupied port number.
func (ps *peers) _newPeer(box *crypto.BoxPubKey, sig *crypto.SigPubKey, linkShared *crypto.BoxSharedKey, intf linkInterface) *peer {
now := time.Now()
p := peer{box: *box,
core: ps.core,
intf: intf,
sig: *sig,
shared: *crypto.GetSharedKey(&ps.core.boxPriv, box),
linkShared: *linkShared,
firstSeen: now,
}
oldPorts := ps.ports
newPorts := make(map[switchPort]*peer)
for k, v := range oldPorts {
newPorts[k] = v
}
for idx := switchPort(0); true; idx++ {
if _, isIn := newPorts[idx]; !isIn {
p.port = switchPort(idx)
newPorts[p.port] = &p
break
}
}
ps.ports = newPorts
ps._updatePeers()
return &p
}
func (p *peer) _removeSelf() {
p.core.peers.Act(p, func() {
p.core.peers._removePeer(p)
})
}
// Removes a peer for a given port, if one exists.
func (ps *peers) _removePeer(p *peer) {
if q := ps.ports[p.port]; p.port == 0 || q != p {
return
} // Can't remove self peer or nonexistant peer
ps.core.switchTable.forgetPeer(ps, p.port)
oldPorts := ps.ports
newPorts := make(map[switchPort]*peer)
for k, v := range oldPorts {
newPorts[k] = v
}
delete(newPorts, p.port)
p.intf.close()
ps.ports = newPorts
ps._updatePeers()
}
// If called, sends a notification to each peer that they should send a new switch message.
// Mainly called by the switch after an update.
func (ps *peers) sendSwitchMsgs(from phony.Actor) {
ps.Act(from, func() {
for _, peer := range ps.ports {
p := peer
if p.port == 0 {
continue
}
p.Act(ps, p._sendSwitchMsg)
}
})
}
func (ps *peers) updateDHT(from phony.Actor) {
ps.Act(from, func() {
for _, peer := range ps.ports {
p := peer
if p.port == 0 {
continue
}
p.Act(ps, p._updateDHT)
}
})
}
// This must be launched in a separate goroutine by whatever sets up the peer struct.
func (p *peer) start() {
// Just for good measure, immediately send a switch message to this peer when we start
p.Act(nil, p._sendSwitchMsg)
}
func (p *peer) _updateDHT() {
if p.dinfo != nil {
p.core.router.insertPeer(p, p.dinfo)
}
}
func (p *peer) handlePacketFrom(from phony.Actor, packet []byte) {
p.Act(from, func() {
p._handlePacket(packet)
})
}
// Called to handle incoming packets.
// Passes the packet to a handler for that packet type.
func (p *peer) _handlePacket(packet []byte) {
// FIXME this is off by stream padding and msg length overhead, should be done in tcp.go
p.bytesRecvd += uint64(len(packet))
pType, pTypeLen := wire_decode_uint64(packet)
if pTypeLen == 0 {
return
}
switch pType {
case wire_Traffic:
p._handleTraffic(packet)
case wire_ProtocolTraffic:
p._handleTraffic(packet)
case wire_LinkProtocolTraffic:
p._handleLinkTraffic(packet)
default:
}
}
// Get the coords of a packet without decoding
func peer_getPacketCoords(packet []byte) []byte {
_, pTypeLen := wire_decode_uint64(packet)
coords, _ := wire_decode_coords(packet[pTypeLen:])
return coords
}
// Called to handle traffic or protocolTraffic packets.
// In either case, this reads from the coords of the packet header, does a switch lookup, and forwards to the next node.
func (p *peer) _handleTraffic(packet []byte) {
if _, isIn := p.table.elems[p.port]; !isIn && p.port != 0 {
// Drop traffic if the peer isn't in the switch
return
}
coords := peer_getPacketCoords(packet)
next := p.table.lookup(coords)
if nPeer, isIn := p.ports[next]; isIn {
nPeer.sendPacketFrom(p, packet)
}
//p.core.switchTable.packetInFrom(p, packet)
}
func (p *peer) sendPacketFrom(from phony.Actor, packet []byte) {
p.Act(from, func() {
p._sendPacket(packet)
})
}
func (p *peer) _sendPacket(packet []byte) {
p.queue.push(packet)
if p.idle {
p.idle = false
p._handleIdle()
} else if p.drop {
for p.queue.size > p.max {
p.queue.drop()
}
}
}
func (p *peer) _handleIdle() {
var packets [][]byte
var size uint64
for {
if packet, success := p.queue.pop(); success {
packets = append(packets, packet)
size += uint64(len(packet))
} else {
break
}
}
p.seq++
if len(packets) > 0 {
p.bytesSent += uint64(size)
p.intf.out(packets)
p.max = p.queue.size
} else {
p.idle = true
}
p.drop = false
}
func (p *peer) notifyBlocked(from phony.Actor) {
p.Act(from, func() {
seq := p.seq
p.Act(nil, func() {
if seq == p.seq {
p.drop = true
p.max = 2*p.queue.size + streamMsgSize
}
})
})
}
// This wraps the packet in the inner (ephemeral) and outer (permanent) crypto layers.
// It sends it to p.linkOut, which bypasses the usual packet queues.
func (p *peer) _sendLinkPacket(packet []byte) {
innerPayload, innerNonce := crypto.BoxSeal(&p.linkShared, packet, nil)
innerLinkPacket := wire_linkProtoTrafficPacket{
Nonce: *innerNonce,
Payload: innerPayload,
}
outerPayload := innerLinkPacket.encode()
bs, nonce := crypto.BoxSeal(&p.shared, outerPayload, nil)
linkPacket := wire_linkProtoTrafficPacket{
Nonce: *nonce,
Payload: bs,
}
packet = linkPacket.encode()
p.intf.linkOut(packet)
}
// Decrypts the outer (permanent) and inner (ephemeral) crypto layers on link traffic.
// Identifies the link traffic type and calls the appropriate handler.
func (p *peer) _handleLinkTraffic(bs []byte) {
packet := wire_linkProtoTrafficPacket{}
if !packet.decode(bs) {
return
}
outerPayload, isOK := crypto.BoxOpen(&p.shared, packet.Payload, &packet.Nonce)
if !isOK {
return
}
innerPacket := wire_linkProtoTrafficPacket{}
if !innerPacket.decode(outerPayload) {
return
}
payload, isOK := crypto.BoxOpen(&p.linkShared, innerPacket.Payload, &innerPacket.Nonce)
if !isOK {
return
}
pType, pTypeLen := wire_decode_uint64(payload)
if pTypeLen == 0 {
return
}
switch pType {
case wire_SwitchMsg:
p._handleSwitchMsg(payload)
default:
}
}
// Gets a switchMsg from the switch, adds signed next-hop info for this peer, and sends it to them.
func (p *peer) _sendSwitchMsg() {
msg := p.table.getMsg()
if msg == nil {
return
}
bs := getBytesForSig(&p.sig, msg)
msg.Hops = append(msg.Hops, switchMsgHop{
Port: p.port,
Next: p.sig,
Sig: *crypto.Sign(&p.core.sigPriv, bs),
})
packet := msg.encode()
p._sendLinkPacket(packet)
}
// Handles a switchMsg from the peer, checking signatures and passing good messages to the switch.
// Also creates a dhtInfo struct and arranges for it to be added to the dht (this is how dht bootstrapping begins).
func (p *peer) _handleSwitchMsg(packet []byte) {
var msg switchMsg
if !msg.decode(packet) {
return
}
if len(msg.Hops) < 1 {
p._removeSelf()
return
}
var loc switchLocator
prevKey := msg.Root
for idx, hop := range msg.Hops {
// Check signatures and collect coords for dht
sigMsg := msg
sigMsg.Hops = msg.Hops[:idx]
loc.coords = append(loc.coords, hop.Port)
bs := getBytesForSig(&hop.Next, &sigMsg)
if !crypto.Verify(&prevKey, bs, &hop.Sig) {
p._removeSelf()
return
}
prevKey = hop.Next
}
p.core.switchTable.Act(p, func() {
if !p.core.switchTable._checkRoot(&msg) {
// Bad switch message
p.Act(&p.core.switchTable, func() {
p.dinfo = nil
})
} else {
// handle the message
p.core.switchTable._handleMsg(&msg, p.port, false)
p.Act(&p.core.switchTable, func() {
// Pass a message to the dht informing it that this peer (still) exists
loc.coords = loc.coords[:len(loc.coords)-1]
p.dinfo = &dhtInfo{
key: p.box,
coords: loc.getCoords(),
}
p._updateDHT()
})
}
})
}
// This generates the bytes that we sign or check the signature of for a switchMsg.
// It begins with the next node's key, followed by the root and the timestamp, followed by coords being advertised to the next node.
func getBytesForSig(next *crypto.SigPubKey, msg *switchMsg) []byte {
var loc switchLocator
for _, hop := range msg.Hops {
loc.coords = append(loc.coords, hop.Port)
}
bs := append([]byte(nil), next[:]...)
bs = append(bs, msg.Root[:]...)
bs = append(bs, wire_encode_uint64(wire_intToUint(msg.TStamp))...)
bs = append(bs, wire_encode_coords(loc.getCoords())...)
return bs
}

View File

@ -1,20 +0,0 @@
package yggdrasil
import "sync"
// Used internally to reduce allocations in the hot loop
// I.e. packets being switched or between the crypto and the switch
// For safety reasons, these must not escape this package
var pool = sync.Pool{New: func() interface{} { return []byte(nil) }}
func pool_getBytes(size int) []byte {
bs := pool.Get().([]byte)
if cap(bs) < size {
bs = make([]byte, size)
}
return bs[:size]
}
func pool_putBytes(bs []byte) {
pool.Put(bs)
}

View File

@ -1,289 +0,0 @@
package yggdrasil
// This part does most of the work to handle packets to/from yourself
// It also manages crypto and dht info
// TODO clean up old/unused code, maybe improve comments on whatever is left
// Send:
// Receive a packet from the adapter
// Look up session (if none exists, trigger a search)
// Hand off to session (which encrypts, etc)
// Session will pass it back to router.out, which hands it off to the self peer
// The self peer triggers a lookup to find which peer to send to next
// And then passes it to that's peer's peer.out function
// The peer.out function sends it over the wire to the matching peer
// Recv:
// A packet comes in off the wire, and goes to a peer.handlePacket
// The peer does a lookup, sees no better peer than the self
// Hands it to the self peer.out, which passes it to router.in
// If it's dht/seach/etc. traffic, the router passes it to that part
// If it's an encapsulated IPv6 packet, the router looks up the session for it
// The packet is passed to the session, which decrypts it, router.recvPacket
// The router then runs some sanity checks before passing it to the adapter
import (
//"bytes"
"time"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/Arceliar/phony"
)
// The router struct has channels to/from the adapter device and a self peer (0), which is how messages are passed between this node and the peers/switch layer.
// The router's phony.Inbox goroutine is responsible for managing all information related to the dht, searches, and crypto sessions.
type router struct {
phony.Inbox
core *Core
addr address.Address
subnet address.Subnet
out func([]byte) // packets we're sending to the network, link to peer's "in"
dht dht
nodeinfo nodeinfo
searches searches
sessions sessions
intf routerInterface
peer *peer
table *lookupTable // has a copy of our locator
}
// Initializes the router struct, which includes setting up channels to/from the adapter.
func (r *router) init(core *Core) {
r.core = core
r.addr = *address.AddrForNodeID(&r.dht.nodeID)
r.subnet = *address.SubnetForNodeID(&r.dht.nodeID)
r.intf.router = r
phony.Block(&r.core.peers, func() {
// FIXME don't block here!
r.peer = r.core.peers._newPeer(&r.core.boxPub, &r.core.sigPub, &crypto.BoxSharedKey{}, &r.intf)
})
r.peer.Act(r, r.peer._handleIdle)
r.out = func(bs []byte) {
r.peer.handlePacketFrom(r, bs)
}
r.nodeinfo.init(r.core)
r.core.config.Mutex.RLock()
r.nodeinfo.setNodeInfo(r.core.config.Current.NodeInfo, r.core.config.Current.NodeInfoPrivacy)
r.core.config.Mutex.RUnlock()
r.dht.init(r)
r.searches.init(r)
r.sessions.init(r)
}
func (r *router) updateTable(from phony.Actor, table *lookupTable) {
r.Act(from, func() {
r.table = table
r.nodeinfo.Act(r, func() {
r.nodeinfo.table = table
})
for _, ses := range r.sessions.sinfos {
sinfo := ses
sinfo.Act(r, func() {
sinfo.table = table
})
}
})
}
// Reconfigures the router and any child modules. This should only ever be run
// by the router actor.
func (r *router) reconfigure() {
// Reconfigure the router
current := r.core.config.GetCurrent()
r.core.log.Println("Reloading NodeInfo...")
if err := r.nodeinfo.setNodeInfo(current.NodeInfo, current.NodeInfoPrivacy); err != nil {
r.core.log.Errorln("Error reloading NodeInfo:", err)
} else {
r.core.log.Infoln("NodeInfo updated")
}
// Reconfigure children
r.dht.reconfigure()
r.searches.reconfigure()
r.sessions.reconfigure()
}
// Starts the tickerLoop goroutine.
func (r *router) start() error {
r.core.log.Infoln("Starting router")
go r.doMaintenance()
return nil
}
// Insert a peer info into the dht, TODO? make the dht a separate actor
func (r *router) insertPeer(from phony.Actor, info *dhtInfo) {
r.Act(from, func() {
r.dht.insertPeer(info)
})
}
// Reset sessions and DHT after the switch sees our coords change
func (r *router) reset(from phony.Actor) {
r.Act(from, func() {
r.sessions.reset()
r.dht.reset()
})
}
// TODO remove reconfigure so this is just a ticker loop
// and then find something better than a ticker loop to schedule things...
func (r *router) doMaintenance() {
phony.Block(r, func() {
// Any periodic maintenance stuff goes here
r.core.switchTable.doMaintenance(r)
r.dht.doMaintenance()
r.sessions.cleanup()
})
time.AfterFunc(time.Second, r.doMaintenance)
}
// Checks incoming traffic type and passes it to the appropriate handler.
func (r *router) _handlePacket(packet []byte) {
pType, pTypeLen := wire_decode_uint64(packet)
if pTypeLen == 0 {
return
}
switch pType {
case wire_Traffic:
r._handleTraffic(packet)
case wire_ProtocolTraffic:
r._handleProto(packet)
default:
}
}
// Handles incoming traffic, i.e. encapuslated ordinary IPv6 packets.
// Passes them to the crypto session worker to be decrypted and sent to the adapter.
func (r *router) _handleTraffic(packet []byte) {
p := wire_trafficPacket{}
if !p.decode(packet) {
return
}
sinfo, isIn := r.sessions.getSessionForHandle(&p.Handle)
if !isIn {
return
}
sinfo.recv(r, &p)
}
// Handles protocol traffic by decrypting it, checking its type, and passing it to the appropriate handler for that traffic type.
func (r *router) _handleProto(packet []byte) {
// First parse the packet
p := wire_protoTrafficPacket{}
if !p.decode(packet) {
return
}
// Now try to open the payload
var sharedKey *crypto.BoxSharedKey
if p.ToKey == r.core.boxPub {
// Try to open using our permanent key
sharedKey = r.sessions.getSharedKey(&r.core.boxPriv, &p.FromKey)
} else {
return
}
bs, isOK := crypto.BoxOpen(sharedKey, p.Payload, &p.Nonce)
if !isOK {
return
}
// Now do something with the bytes in bs...
// send dht messages to dht, sessionRefresh to sessions, data to adapter...
// For data, should check that key and IP match...
bsType, bsTypeLen := wire_decode_uint64(bs)
if bsTypeLen == 0 {
return
}
switch bsType {
case wire_SessionPing:
r._handlePing(bs, &p.FromKey)
case wire_SessionPong:
r._handlePong(bs, &p.FromKey)
case wire_NodeInfoRequest:
fallthrough
case wire_NodeInfoResponse:
r._handleNodeInfo(bs, &p.FromKey)
case wire_DHTLookupRequest:
r._handleDHTReq(bs, &p.FromKey)
case wire_DHTLookupResponse:
r._handleDHTRes(bs, &p.FromKey)
default:
}
}
// Decodes session pings from wire format and passes them to sessions.handlePing where they either create or update a session.
func (r *router) _handlePing(bs []byte, fromKey *crypto.BoxPubKey) {
ping := sessionPing{}
if !ping.decode(bs) {
return
}
ping.SendPermPub = *fromKey
r.sessions.handlePing(&ping)
}
// Handles session pongs (which are really pings with an extra flag to prevent acknowledgement).
func (r *router) _handlePong(bs []byte, fromKey *crypto.BoxPubKey) {
r._handlePing(bs, fromKey)
}
// Decodes dht requests and passes them to dht.handleReq to trigger a lookup/response.
func (r *router) _handleDHTReq(bs []byte, fromKey *crypto.BoxPubKey) {
req := dhtReq{}
if !req.decode(bs) {
return
}
req.Key = *fromKey
r.dht.handleReq(&req)
}
// Decodes dht responses and passes them to dht.handleRes to update the DHT table and further pass them to the search code (if applicable).
func (r *router) _handleDHTRes(bs []byte, fromKey *crypto.BoxPubKey) {
res := dhtRes{}
if !res.decode(bs) {
return
}
res.Key = *fromKey
r.dht.handleRes(&res)
}
// Decodes nodeinfo request
func (r *router) _handleNodeInfo(bs []byte, fromKey *crypto.BoxPubKey) {
req := nodeinfoReqRes{}
if !req.decode(bs) {
return
}
req.SendPermPub = *fromKey
r.nodeinfo.handleNodeInfo(r, &req)
}
////////////////////////////////////////////////////////////////////////////////
// routerInterface is a helper that implements linkInterface
type routerInterface struct {
router *router
}
func (intf *routerInterface) out(bss [][]byte) {
// Note that this is run in the peer's goroutine
intf.router.Act(intf.router.peer, func() {
for _, bs := range bss {
intf.router._handlePacket(bs)
}
})
// This should now immediately make the peer idle again
// So the self-peer shouldn't end up buffering anything
// We let backpressure act as a throttle instead
intf.router.peer._handleIdle()
}
func (intf *routerInterface) linkOut(_ []byte) {}
func (intf *routerInterface) close() {}
func (intf *routerInterface) name() string { return "(self)" }
func (intf *routerInterface) local() string { return "(self)" }
func (intf *routerInterface) remote() string { return "(self)" }
func (intf *routerInterface) interfaceType() string { return "self" }

View File

@ -1,271 +0,0 @@
package yggdrasil
// This thing manages search packets
// The basic idea is as follows:
// We may know a NodeID (with a mask) and want to connect
// We begin a search by sending a dht lookup to ourself
// Each time a node responds, we sort the results and filter to only include useful nodes
// We then periodically send a packet to the first node from the list (after re-filtering)
// This happens in parallel for each node that replies
// Meanwhile, we keep a list of the (up to) 16 closest nodes to the destination that we've visited
// We only consider an unvisited node useful if either the list isn't full or the unvisited node is closer to the destination than the furthest node on the list
// That gives the search some chance to recover if it hits a dead end where a node doesn't know everyone it should
import (
"errors"
"sort"
"time"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
)
// This defines the time after which we time out a search (so it can restart).
const search_RETRY_TIME = 3 * time.Second
const search_STEP_TIME = time.Second
const search_MAX_RESULTS = dht_lookup_size
// Information about an ongoing search.
// Includes the target NodeID, the bitmask to match it to an IP, and the list of nodes to visit / already visited.
type searchInfo struct {
searches *searches
dest crypto.NodeID
mask crypto.NodeID
time time.Time
visited []*crypto.NodeID // Closest addresses visited so far
callback func(*sessionInfo, error)
// TODO context.Context for timeout and cancellation
send uint64 // log number of requests sent
recv uint64 // log number of responses received
}
// This stores a map of active searches.
type searches struct {
router *router
searches map[crypto.NodeID]*searchInfo
}
// Initializes the searches struct.
func (s *searches) init(r *router) {
s.router = r
s.searches = make(map[crypto.NodeID]*searchInfo)
}
func (s *searches) reconfigure() {
// This is where reconfiguration would go, if we had anything to do
}
// Creates a new search info, adds it to the searches struct, and returns a pointer to the info.
func (s *searches) createSearch(dest *crypto.NodeID, mask *crypto.NodeID, callback func(*sessionInfo, error)) *searchInfo {
info := searchInfo{
searches: s,
dest: *dest,
mask: *mask,
time: time.Now(),
callback: callback,
}
s.searches[*dest] = &info
return &info
}
////////////////////////////////////////////////////////////////////////////////
// Checks if there's an ongoing search related to a dhtRes.
// If there is, it adds the response info to the search and triggers a new search step.
// If there's no ongoing search, or we if the dhtRes finished the search (it was from the target node), then don't do anything more.
func (sinfo *searchInfo) handleDHTRes(res *dhtRes) {
if nfo := sinfo.searches.searches[sinfo.dest]; nfo != sinfo {
return // already done
}
if res != nil {
sinfo.recv++
if sinfo.checkDHTRes(res) {
return // Search finished successfully
}
// Use results to start an additional search thread
infos := append([]*dhtInfo(nil), res.Infos...)
infos = sinfo.getAllowedInfos(infos)
if len(infos) > 0 {
sinfo.continueSearch(infos)
}
}
}
// If there has been no response in too long, then this cleans up the search.
// Otherwise, it pops the closest node to the destination (in keyspace) off of the toVisit list and sends a dht ping.
func (sinfo *searchInfo) doSearchStep(infos []*dhtInfo) {
if len(infos) > 0 {
// Send to the next search target
next := infos[0]
rq := dhtReqKey{next.key, sinfo.dest}
sinfo.searches.router.dht.addCallback(&rq, sinfo.handleDHTRes)
sinfo.searches.router.dht.ping(next, &sinfo.dest)
sinfo.send++
}
}
// Get a list of search targets that are close enough to the destination to try
// Requires an initial list as input
func (sinfo *searchInfo) getAllowedInfos(infos []*dhtInfo) []*dhtInfo {
var temp []*dhtInfo
for _, info := range infos {
if false && len(sinfo.visited) < search_MAX_RESULTS {
// We're not full on results yet, so don't block anything yet
} else if !dht_ordered(&sinfo.dest, info.getNodeID(), sinfo.visited[len(sinfo.visited)-1]) {
// Too far away
continue
}
var known bool
for _, nfo := range sinfo.visited {
if *nfo == *info.getNodeID() {
known = true
break
}
}
if !known {
temp = append(temp, info)
}
}
infos = append(infos[:0], temp...) // restrict to only the allowed infos
sort.SliceStable(infos, func(i, j int) bool {
// Should return true if i is closer to the destination than j
return dht_ordered(&sinfo.dest, infos[i].getNodeID(), infos[j].getNodeID())
}) // Sort infos to start with the closest
if len(infos) > search_MAX_RESULTS {
infos = infos[:search_MAX_RESULTS] // Limit max number of infos
}
return infos
}
// Run doSearchStep and schedule another continueSearch to happen after search_RETRY_TIME.
// Must not be called with an empty list of infos
func (sinfo *searchInfo) continueSearch(infos []*dhtInfo) {
sinfo.doSearchStep(infos)
infos = infos[1:] // Remove the node we just tried
// In case there's no response, try the next node in infos later
time.AfterFunc(search_STEP_TIME, func() {
sinfo.searches.router.Act(nil, func() {
// FIXME this keeps the search alive forever if not for the searches map, fix that
newSearchInfo := sinfo.searches.searches[sinfo.dest]
if newSearchInfo != sinfo {
return
}
// Get good infos here instead of at the top, to make sure we can always start things off with a continueSearch call to ourself
infos = sinfo.getAllowedInfos(infos)
if len(infos) > 0 {
sinfo.continueSearch(infos)
}
})
})
}
// Initially start a search
func (sinfo *searchInfo) startSearch() {
var infos []*dhtInfo
infos = append(infos, &dhtInfo{
key: sinfo.searches.router.core.boxPub,
coords: sinfo.searches.router.table.self.getCoords(),
})
// Start the search by asking ourself, useful if we're the destination
sinfo.continueSearch(infos)
// Start a timer to clean up the search if everything times out
var cleanupFunc func()
cleanupFunc = func() {
sinfo.searches.router.Act(nil, func() {
// FIXME this keeps the search alive forever if not for the searches map, fix that
newSearchInfo := sinfo.searches.searches[sinfo.dest]
if newSearchInfo != sinfo {
return
}
elapsed := time.Since(sinfo.time)
if elapsed > search_RETRY_TIME {
// cleanup
delete(sinfo.searches.searches, sinfo.dest)
sinfo.searches.router.core.log.Debugln("search timeout:", &sinfo.dest, sinfo.send, sinfo.recv)
sinfo.callback(nil, errors.New("search reached dead end"))
return
}
time.AfterFunc(search_RETRY_TIME-elapsed, cleanupFunc)
})
}
time.AfterFunc(search_RETRY_TIME, cleanupFunc)
}
// Calls create search, and initializes the iterative search parts of the struct before returning it.
func (s *searches) newIterSearch(dest *crypto.NodeID, mask *crypto.NodeID, callback func(*sessionInfo, error)) *searchInfo {
sinfo := s.createSearch(dest, mask, callback)
sinfo.visited = append(sinfo.visited, &s.router.dht.nodeID)
return sinfo
}
// Checks if a dhtRes is good (called by handleDHTRes).
// If the response is from the target, get/create a session, trigger a session ping, and return true.
// Otherwise return false.
func (sinfo *searchInfo) checkDHTRes(res *dhtRes) bool {
from := dhtInfo{key: res.Key, coords: res.Coords}
them := from.getNodeID()
var known bool
for _, v := range sinfo.visited {
if *v == *them {
known = true
break
}
}
if !known {
if len(sinfo.visited) < search_MAX_RESULTS || dht_ordered(&sinfo.dest, them, sinfo.visited[len(sinfo.visited)-1]) {
// Closer to the destination than the threshold, so update visited
sinfo.searches.router.core.log.Debugln("Updating search:", &sinfo.dest, them, sinfo.send, sinfo.recv)
sinfo.visited = append(sinfo.visited, them)
sort.SliceStable(sinfo.visited, func(i, j int) bool {
// Should return true if i is closer to the destination than j
return dht_ordered(&sinfo.dest, sinfo.visited[i], sinfo.visited[j])
}) // Sort infos to start with the closest
if len(sinfo.visited) > search_MAX_RESULTS {
sinfo.visited = sinfo.visited[:search_MAX_RESULTS]
}
sinfo.time = time.Now()
}
}
var destMasked crypto.NodeID
var themMasked crypto.NodeID
for idx := 0; idx < crypto.NodeIDLen; idx++ {
destMasked[idx] = sinfo.dest[idx] & sinfo.mask[idx]
themMasked[idx] = them[idx] & sinfo.mask[idx]
}
if themMasked != destMasked {
return false
}
finishSearch := func(sess *sessionInfo, err error) {
if sess != nil {
// FIXME (!) replay attacks could mess with coords? Give it a handle (tstamp)?
sess.Act(sinfo.searches.router, func() { sess.coords = res.Coords })
sess.ping(sinfo.searches.router)
}
if err != nil {
sinfo.callback(nil, err)
} else {
sinfo.callback(sess, nil)
}
// Cleanup
if _, isIn := sinfo.searches.searches[sinfo.dest]; isIn {
sinfo.searches.router.core.log.Debugln("Finished search:", &sinfo.dest, sinfo.send, sinfo.recv)
delete(sinfo.searches.searches, res.Dest)
}
}
// They match, so create a session and send a sessionRequest
var err error
sess, isIn := sinfo.searches.router.sessions.getByTheirPerm(&res.Key)
if !isIn {
// Don't already have a session
sess = sinfo.searches.router.sessions.createSession(&res.Key)
if sess == nil {
err = errors.New("session not allowed")
} else if _, isIn := sinfo.searches.router.sessions.getByTheirPerm(&res.Key); !isIn {
panic("This should never happen")
}
} else {
err = errors.New("session already exists")
}
finishSearch(sess, err)
return true
}

View File

@ -1,526 +0,0 @@
package yggdrasil
// This is the session manager
// It's responsible for keeping track of open sessions to other nodes
// The session information consists of crypto keys and coords
import (
"bytes"
"sync"
"time"
"github.com/yggdrasil-network/yggdrasil-go/src/address"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/yggdrasil-network/yggdrasil-go/src/util"
"github.com/Arceliar/phony"
)
// All the information we know about an active session.
// This includes coords, permanent and ephemeral keys, handles and nonces, various sorts of timing information for timeout and maintenance, and some metadata for the admin API.
type sessionInfo struct {
phony.Inbox // Protects all of the below, use it any time you read/change the contents of a session
sessions *sessions //
theirAddr address.Address //
theirSubnet address.Subnet //
theirPermPub crypto.BoxPubKey //
theirSesPub crypto.BoxPubKey //
mySesPub crypto.BoxPubKey //
mySesPriv crypto.BoxPrivKey //
sharedPermKey crypto.BoxSharedKey // used for session pings
sharedSesKey crypto.BoxSharedKey // derived from session keys
theirHandle crypto.Handle //
myHandle crypto.Handle //
theirNonce crypto.BoxNonce //
myNonce crypto.BoxNonce //
theirMTU MTU //
myMTU MTU //
wasMTUFixed bool // Was the MTU fixed by a receive error?
timeOpened time.Time // Time the session was opened
time time.Time // Time we last received a packet
mtuTime time.Time // time myMTU was last changed
pingTime time.Time // time the first ping was sent since the last received packet
coords []byte // coords of destination
reset bool // reset if coords change
tstamp int64 // ATOMIC - tstamp from their last session ping, replay attack mitigation
bytesSent uint64 // Bytes of real traffic sent in this session
bytesRecvd uint64 // Bytes of real traffic received in this session
init chan struct{} // Closed when the first session pong arrives, used to signal that the session is ready for initial use
cancel util.Cancellation // Used to terminate workers
conn *Conn // The associated Conn object
callbacks []chan func() // Finished work from crypto workers
table *lookupTable // table.self is a locator where we get our coords
}
// Represents a session ping/pong packet, and includes information like public keys, a session handle, coords, a timestamp to prevent replays, and the tun/tap MTU.
type sessionPing struct {
SendPermPub crypto.BoxPubKey // Sender's permanent key
Handle crypto.Handle // Random number to ID session
SendSesPub crypto.BoxPubKey // Session key to use
Coords []byte //
Tstamp int64 // unix time, but the only real requirement is that it increases
IsPong bool //
MTU MTU //
}
// Updates session info in response to a ping, after checking that the ping is OK.
// Returns true if the session was updated, or false otherwise.
func (s *sessionInfo) _update(p *sessionPing) bool {
if !(p.Tstamp > s.tstamp) {
// To protect against replay attacks
return false
}
if p.SendPermPub != s.theirPermPub {
// Should only happen if two sessions got the same handle
// That shouldn't be allowed anyway, but if it happens then let one time out
return false
}
if p.SendSesPub != s.theirSesPub {
s.theirSesPub = p.SendSesPub
s.theirHandle = p.Handle
s.sharedSesKey = *crypto.GetSharedKey(&s.mySesPriv, &s.theirSesPub)
s.theirNonce = crypto.BoxNonce{}
}
if p.MTU >= 1280 || p.MTU == 0 {
s.theirMTU = p.MTU
if s.conn != nil {
s.conn.setMTU(s, s._getMTU())
}
}
if !bytes.Equal(s.coords, p.Coords) {
// allocate enough space for additional coords
s.coords = append(make([]byte, 0, len(p.Coords)+11), p.Coords...)
}
s.time = time.Now()
s.tstamp = p.Tstamp
s.reset = false
defer func() { recover() }() // Recover if the below panics
select {
case <-s.init:
default:
// Unblock anything waiting for the session to initialize
close(s.init)
}
return true
}
// Struct of all active sessions.
// Sessions are indexed by handle.
// Additionally, stores maps of address/subnet onto keys, and keys onto handles.
type sessions struct {
router *router
listener *Listener
listenerMutex sync.Mutex
lastCleanup time.Time
isAllowedHandler func(pubkey *crypto.BoxPubKey, initiator bool) bool // Returns true or false if session setup is allowed
isAllowedMutex sync.RWMutex // Protects the above
myMaximumMTU MTU // Maximum allowed session MTU
permShared map[crypto.BoxPubKey]*crypto.BoxSharedKey // Maps known permanent keys to their shared key, used by DHT a lot
sinfos map[crypto.Handle]*sessionInfo // Maps handle onto session info
byTheirPerm map[crypto.BoxPubKey]*crypto.Handle // Maps theirPermPub onto handle
}
// Initializes the session struct.
func (ss *sessions) init(r *router) {
ss.router = r
ss.permShared = make(map[crypto.BoxPubKey]*crypto.BoxSharedKey)
ss.sinfos = make(map[crypto.Handle]*sessionInfo)
ss.byTheirPerm = make(map[crypto.BoxPubKey]*crypto.Handle)
ss.lastCleanup = time.Now()
ss.myMaximumMTU = 65535
}
func (ss *sessions) reconfigure() {
ss.router.Act(nil, func() {
for _, session := range ss.sinfos {
sinfo, mtu := session, ss.myMaximumMTU
sinfo.Act(ss.router, func() {
sinfo.myMTU = mtu
})
session.ping(ss.router)
}
})
}
// Determines whether the session with a given publickey is allowed based on
// session firewall rules.
func (ss *sessions) isSessionAllowed(pubkey *crypto.BoxPubKey, initiator bool) bool {
ss.isAllowedMutex.RLock()
defer ss.isAllowedMutex.RUnlock()
if ss.isAllowedHandler == nil {
return true
}
return ss.isAllowedHandler(pubkey, initiator)
}
// Gets the session corresponding to a given handle.
func (ss *sessions) getSessionForHandle(handle *crypto.Handle) (*sessionInfo, bool) {
sinfo, isIn := ss.sinfos[*handle]
return sinfo, isIn
}
// Gets a session corresponding to a permanent key used by the remote node.
func (ss *sessions) getByTheirPerm(key *crypto.BoxPubKey) (*sessionInfo, bool) {
h, isIn := ss.byTheirPerm[*key]
if !isIn {
return nil, false
}
sinfo, isIn := ss.getSessionForHandle(h)
return sinfo, isIn
}
// Creates a new session and lazily cleans up old existing sessions. This
// includes initializing session info to sane defaults (e.g. lowest supported
// MTU).
func (ss *sessions) createSession(theirPermKey *crypto.BoxPubKey) *sessionInfo {
// TODO: this check definitely needs to be moved
if !ss.isSessionAllowed(theirPermKey, true) {
return nil
}
sinfo := sessionInfo{}
sinfo.sessions = ss
sinfo.theirPermPub = *theirPermKey
sinfo.sharedPermKey = *ss.getSharedKey(&ss.router.core.boxPriv, &sinfo.theirPermPub)
pub, priv := crypto.NewBoxKeys()
sinfo.mySesPub = *pub
sinfo.mySesPriv = *priv
sinfo.myNonce = *crypto.NewBoxNonce()
sinfo.theirMTU = 1280
sinfo.myMTU = ss.myMaximumMTU
now := time.Now()
sinfo.timeOpened = now
sinfo.time = now
sinfo.mtuTime = now
sinfo.pingTime = now
sinfo.init = make(chan struct{})
sinfo.cancel = util.NewCancellation()
higher := false
for idx := range ss.router.core.boxPub {
if ss.router.core.boxPub[idx] > sinfo.theirPermPub[idx] {
higher = true
break
} else if ss.router.core.boxPub[idx] < sinfo.theirPermPub[idx] {
break
}
}
if higher {
// higher => odd nonce
sinfo.myNonce[len(sinfo.myNonce)-1] |= 0x01
} else {
// lower => even nonce
sinfo.myNonce[len(sinfo.myNonce)-1] &= 0xfe
}
sinfo.myHandle = *crypto.NewHandle()
sinfo.theirAddr = *address.AddrForNodeID(crypto.GetNodeID(&sinfo.theirPermPub))
sinfo.theirSubnet = *address.SubnetForNodeID(crypto.GetNodeID(&sinfo.theirPermPub))
sinfo.table = ss.router.table
ss.sinfos[sinfo.myHandle] = &sinfo
ss.byTheirPerm[sinfo.theirPermPub] = &sinfo.myHandle
return &sinfo
}
func (ss *sessions) cleanup() {
// Time thresholds almost certainly could use some adjusting
for k := range ss.permShared {
// Delete a key, to make sure this eventually shrinks to 0
delete(ss.permShared, k)
break
}
if time.Since(ss.lastCleanup) < time.Minute {
return
}
permShared := make(map[crypto.BoxPubKey]*crypto.BoxSharedKey, len(ss.permShared))
for k, v := range ss.permShared {
permShared[k] = v
}
ss.permShared = permShared
sinfos := make(map[crypto.Handle]*sessionInfo, len(ss.sinfos))
for k, v := range ss.sinfos {
sinfos[k] = v
}
ss.sinfos = sinfos
byTheirPerm := make(map[crypto.BoxPubKey]*crypto.Handle, len(ss.byTheirPerm))
for k, v := range ss.byTheirPerm {
byTheirPerm[k] = v
}
ss.byTheirPerm = byTheirPerm
ss.lastCleanup = time.Now()
}
func (sinfo *sessionInfo) doRemove() {
sinfo.sessions.router.Act(nil, func() {
sinfo.sessions.removeSession(sinfo)
})
}
// Closes a session, removing it from sessions maps.
func (ss *sessions) removeSession(sinfo *sessionInfo) {
if s := sinfo.sessions.sinfos[sinfo.myHandle]; s == sinfo {
delete(sinfo.sessions.sinfos, sinfo.myHandle)
delete(sinfo.sessions.byTheirPerm, sinfo.theirPermPub)
}
}
// Returns a session ping appropriate for the given session info.
func (sinfo *sessionInfo) _getPing() sessionPing {
coords := sinfo.table.self.getCoords()
ping := sessionPing{
SendPermPub: sinfo.sessions.router.core.boxPub,
Handle: sinfo.myHandle,
SendSesPub: sinfo.mySesPub,
Tstamp: time.Now().Unix(),
Coords: coords,
MTU: sinfo.myMTU,
}
sinfo.myNonce.Increment()
return ping
}
// Gets the shared key for a pair of box keys.
// Used to cache recently used shared keys for protocol traffic.
// This comes up with dht req/res and session ping/pong traffic.
func (ss *sessions) getSharedKey(myPriv *crypto.BoxPrivKey,
theirPub *crypto.BoxPubKey) *crypto.BoxSharedKey {
return crypto.GetSharedKey(myPriv, theirPub)
// FIXME concurrency issues with the below, so for now we just burn the CPU every time
if skey, isIn := ss.permShared[*theirPub]; isIn {
return skey
}
// First do some cleanup
const maxKeys = 1024
for key := range ss.permShared {
// Remove a random key until the store is small enough
if len(ss.permShared) < maxKeys {
break
}
delete(ss.permShared, key)
}
ss.permShared[*theirPub] = crypto.GetSharedKey(myPriv, theirPub)
return ss.permShared[*theirPub]
}
// Sends a session ping by calling sendPingPong in ping mode.
func (sinfo *sessionInfo) ping(from phony.Actor) {
sinfo.Act(from, func() {
sinfo._sendPingPong(false)
})
}
// Calls getPing, sets the appropriate ping/pong flag, encodes to wire format, and send it.
// Updates the time the last ping was sent in the session info.
func (sinfo *sessionInfo) _sendPingPong(isPong bool) {
ping := sinfo._getPing()
ping.IsPong = isPong
bs := ping.encode()
payload, nonce := crypto.BoxSeal(&sinfo.sharedPermKey, bs, nil)
p := wire_protoTrafficPacket{
Coords: sinfo.coords,
ToKey: sinfo.theirPermPub,
FromKey: sinfo.sessions.router.core.boxPub,
Nonce: *nonce,
Payload: payload,
}
packet := p.encode()
// TODO rewrite the below if/when the peer struct becomes an actor, to not go through the router first
sinfo.sessions.router.Act(sinfo, func() { sinfo.sessions.router.out(packet) })
if sinfo.pingTime.Before(sinfo.time) {
sinfo.pingTime = time.Now()
}
}
func (sinfo *sessionInfo) setConn(from phony.Actor, conn *Conn) {
sinfo.Act(from, func() {
sinfo.conn = conn
sinfo.conn.setMTU(sinfo, sinfo._getMTU())
})
}
// Handles a session ping, creating a session if needed and calling update, then possibly responding with a pong if the ping was in ping mode and the update was successful.
// If the session has a packet cached (common when first setting up a session), it will be sent.
func (ss *sessions) handlePing(ping *sessionPing) {
// Get the corresponding session (or create a new session)
sinfo, isIn := ss.getByTheirPerm(&ping.SendPermPub)
switch {
case ping.IsPong: // This is a response, not an initial ping, so ignore it.
case isIn: // Session already exists
case !ss.isSessionAllowed(&ping.SendPermPub, false): // Session is not allowed
default:
ss.listenerMutex.Lock()
if ss.listener != nil {
// This is a ping from an allowed node for which no session exists, and we have a listener ready to handle sessions.
// We need to create a session and pass it to the listener.
sinfo = ss.createSession(&ping.SendPermPub)
if s, _ := ss.getByTheirPerm(&ping.SendPermPub); s != sinfo {
panic("This should not happen")
}
conn := newConn(ss.router.core, crypto.GetNodeID(&sinfo.theirPermPub), &crypto.NodeID{}, sinfo)
for i := range conn.nodeMask {
conn.nodeMask[i] = 0xFF
}
sinfo.setConn(ss.router, conn)
c := ss.listener.conn
go func() { c <- conn }()
}
ss.listenerMutex.Unlock()
}
if sinfo != nil {
sinfo.Act(ss.router, func() {
// Update the session
if !sinfo._update(ping) { /*panic("Should not happen in testing")*/
return
}
if !ping.IsPong {
sinfo._sendPingPong(true)
}
})
}
}
// Get the MTU of the session.
// Will be equal to the smaller of this node's MTU or the remote node's MTU.
// If sending over links with a maximum message size (this was a thing with the old UDP code), it could be further lowered, to a minimum of 1280.
func (sinfo *sessionInfo) _getMTU() MTU {
if sinfo.theirMTU == 0 || sinfo.myMTU == 0 {
return 0
}
if sinfo.theirMTU < sinfo.myMTU {
return sinfo.theirMTU
}
return sinfo.myMTU
}
// Checks if a packet's nonce is newer than any previously received
func (sinfo *sessionInfo) _nonceIsOK(theirNonce *crypto.BoxNonce) bool {
return theirNonce.Minus(&sinfo.theirNonce) > 0
}
// Updates the nonce mask by (possibly) shifting the bitmask and setting the bit corresponding to this nonce to 1, and then updating the most recent nonce
func (sinfo *sessionInfo) _updateNonce(theirNonce *crypto.BoxNonce) {
if theirNonce.Minus(&sinfo.theirNonce) > 0 {
// This nonce is the newest we've seen, so make a note of that
sinfo.theirNonce = *theirNonce
sinfo.time = time.Now()
}
}
// Resets all sessions to an uninitialized state.
// Called after coord changes, so attempts to use a session will trigger a new ping and notify the remote end of the coord change.
// Only call this from the router actor.
func (ss *sessions) reset() {
for _, _sinfo := range ss.sinfos {
sinfo := _sinfo // So we can safely put it in a closure
sinfo.Act(ss.router, func() {
sinfo.reset = true
})
}
}
////////////////////////////////////////////////////////////////////////////////
//////////////////////////// Worker Functions Below ////////////////////////////
////////////////////////////////////////////////////////////////////////////////
type sessionCryptoManager struct {
phony.Inbox
}
func (m *sessionCryptoManager) workerGo(from phony.Actor, f func()) {
m.Act(from, func() {
util.WorkerGo(f)
})
}
var manager = sessionCryptoManager{}
type FlowKeyMessage struct {
FlowKey uint64
Message []byte
}
func (sinfo *sessionInfo) recv(from phony.Actor, packet *wire_trafficPacket) {
sinfo.Act(from, func() {
sinfo._recvPacket(packet)
})
}
func (sinfo *sessionInfo) _recvPacket(p *wire_trafficPacket) {
select {
case <-sinfo.init:
default:
return
}
if !sinfo._nonceIsOK(&p.Nonce) {
return
}
k := sinfo.sharedSesKey
var isOK bool
var bs []byte
ch := make(chan func(), 1)
poolFunc := func() {
bs, isOK = crypto.BoxOpen(&k, p.Payload, &p.Nonce)
callback := func() {
if !isOK || k != sinfo.sharedSesKey || !sinfo._nonceIsOK(&p.Nonce) {
// Either we failed to decrypt, or the session was updated, or we
// received this packet in the mean time
return
}
sinfo._updateNonce(&p.Nonce)
sinfo.bytesRecvd += uint64(len(bs))
sinfo.conn.recvMsg(sinfo, bs)
}
ch <- callback
sinfo.checkCallbacks()
}
sinfo.callbacks = append(sinfo.callbacks, ch)
manager.workerGo(sinfo, poolFunc)
}
func (sinfo *sessionInfo) _send(msg FlowKeyMessage) {
select {
case <-sinfo.init:
default:
return
}
sinfo.bytesSent += uint64(len(msg.Message))
coords := append([]byte(nil), sinfo.coords...)
if msg.FlowKey != 0 {
coords = append(coords, 0)
coords = append(coords, wire_encode_uint64(msg.FlowKey)...)
}
p := wire_trafficPacket{
Coords: coords,
Handle: sinfo.theirHandle,
Nonce: sinfo.myNonce,
}
sinfo.myNonce.Increment()
k := sinfo.sharedSesKey
ch := make(chan func(), 1)
poolFunc := func() {
p.Payload, _ = crypto.BoxSeal(&k, msg.Message, &p.Nonce)
packet := p.encode()
callback := func() {
sinfo.sessions.router.Act(sinfo, func() {
sinfo.sessions.router.out(packet)
})
}
ch <- callback
sinfo.checkCallbacks()
}
sinfo.callbacks = append(sinfo.callbacks, ch)
manager.workerGo(sinfo, poolFunc)
}
func (sinfo *sessionInfo) checkCallbacks() {
sinfo.Act(nil, func() {
if len(sinfo.callbacks) > 0 {
select {
case callback := <-sinfo.callbacks[0]:
sinfo.callbacks = sinfo.callbacks[1:]
callback()
sinfo.checkCallbacks()
default:
}
}
})
}

View File

@ -1,91 +0,0 @@
package yggdrasil
import (
"errors"
"github.com/Arceliar/phony"
)
type Simlink struct {
phony.Inbox
rch chan []byte
dest *Simlink
link *link
started bool
}
func (s *Simlink) readMsg() ([]byte, error) {
bs, ok := <-s.rch
if !ok {
return nil, errors.New("read from closed Simlink")
}
return bs, nil
}
func (s *Simlink) _recvMetaBytes() ([]byte, error) {
return s.readMsg()
}
func (s *Simlink) _sendMetaBytes(bs []byte) error {
_, err := s.writeMsgs([][]byte{bs})
return err
}
func (s *Simlink) close() error {
defer func() { recover() }()
close(s.rch)
return nil
}
func (s *Simlink) writeMsgs(msgs [][]byte) (int, error) {
if s.dest == nil {
return 0, errors.New("write to unpaired Simlink")
}
var size int
for _, msg := range msgs {
size += len(msg)
bs := append([]byte(nil), msg...)
phony.Block(s, func() {
s.dest.Act(s, func() {
defer func() { recover() }()
s.dest.rch <- bs
})
})
}
return size, nil
}
func (c *Core) NewSimlink() *Simlink {
s := &Simlink{rch: make(chan []byte, 1)}
n := "Simlink"
var err error
s.link, err = c.links.create(s, n, n, n, n, false, true, linkOptions{})
if err != nil {
panic(err)
}
return s
}
func (s *Simlink) SetDestination(dest *Simlink) error {
var err error
phony.Block(s, func() {
if s.dest != nil {
err = errors.New("destination already set")
} else {
s.dest = dest
}
})
return err
}
func (s *Simlink) Start() error {
var err error
phony.Block(s, func() {
if s.started {
err = errors.New("already started")
} else {
s.started = true
go s.link.handler()
}
})
return err
}

View File

@ -1,119 +0,0 @@
package yggdrasil
import (
"bufio"
"errors"
"fmt"
"io"
"net"
)
// Test that this matches the interface we expect
var _ = linkMsgIO(&stream{})
type stream struct {
rwc io.ReadWriteCloser
inputBuffer *bufio.Reader
outputBuffer net.Buffers
}
func (s *stream) close() error {
return s.rwc.Close()
}
const streamMsgSize = 2048 + 65535
var streamMsg = [...]byte{0xde, 0xad, 0xb1, 0x75} // "dead bits"
func (s *stream) init(rwc io.ReadWriteCloser) {
// TODO have this also do the metadata handshake and create the peer struct
s.rwc = rwc
// TODO call something to do the metadata exchange
s.inputBuffer = bufio.NewReaderSize(s.rwc, 2*streamMsgSize)
}
// writeMsg writes a message with stream padding, and is *not* thread safe.
func (s *stream) writeMsgs(bss [][]byte) (int, error) {
buf := s.outputBuffer[:0]
var written int
for _, bs := range bss {
buf = append(buf, streamMsg[:])
buf = append(buf, wire_encode_uint64(uint64(len(bs))))
buf = append(buf, bs)
written += len(bs)
}
s.outputBuffer = buf[:0] // So we can reuse the same underlying array later
_, err := buf.WriteTo(s.rwc)
for _, bs := range bss {
pool_putBytes(bs)
}
// TODO only include number of bytes from bs *successfully* written?
return written, err
}
// readMsg reads a message from the stream, accounting for stream padding, and is *not* thread safe.
func (s *stream) readMsg() ([]byte, error) {
for {
bs, err := s.readMsgFromBuffer()
if err != nil {
return nil, fmt.Errorf("message error: %v", err)
}
return bs, err
}
}
// Writes metadata bytes without stream padding, meant to be temporary
func (s *stream) _sendMetaBytes(metaBytes []byte) error {
var written int
for written < len(metaBytes) {
n, err := s.rwc.Write(metaBytes)
written += n
if err != nil {
return err
}
}
return nil
}
// Reads metadata bytes without stream padding, meant to be temporary
func (s *stream) _recvMetaBytes() ([]byte, error) {
var meta version_metadata
frag := meta.encode()
metaBytes := make([]byte, 0, len(frag))
for len(metaBytes) < len(frag) {
n, err := s.rwc.Read(frag)
if err != nil {
return nil, err
}
metaBytes = append(metaBytes, frag[:n]...)
}
return metaBytes, nil
}
// Reads bytes from the underlying rwc and returns 1 full message
func (s *stream) readMsgFromBuffer() ([]byte, error) {
pad := streamMsg // Copy
_, err := io.ReadFull(s.inputBuffer, pad[:])
if err != nil {
return nil, err
} else if pad != streamMsg {
return nil, errors.New("bad message")
}
lenSlice := make([]byte, 0, 10)
// FIXME this nextByte stuff depends on wire.go format, kind of ugly to have it here
nextByte := byte(0xff)
for nextByte > 127 {
nextByte, err = s.inputBuffer.ReadByte()
if err != nil {
return nil, err
}
lenSlice = append(lenSlice, nextByte)
}
msgLen, _ := wire_decode_uint64(lenSlice)
if msgLen > streamMsgSize {
return nil, errors.New("oversized message")
}
msg := pool_getBytes(int(msgLen))
_, err = io.ReadFull(s.inputBuffer, msg)
return msg, err
}

View File

@ -1,647 +0,0 @@
package yggdrasil
// This part constructs a spanning tree of the network
// It routes packets based on distance on the spanning tree
// In general, this is *not* equivalent to routing on the tree
// It falls back to the tree in the worst case, but it can take shortcuts too
// This is the part that makes routing reasonably efficient on scale-free graphs
// TODO document/comment everything in a lot more detail
// TODO? use a pre-computed lookup table (python version had this)
// A little annoying to do with constant changes from backpressure
import (
"time"
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
"github.com/Arceliar/phony"
)
const (
switch_timeout = time.Minute
switch_updateInterval = switch_timeout / 2
switch_throttle = switch_updateInterval / 2
switch_faster_threshold = 240 //Number of switch updates before switching to a faster parent
)
// The switch locator represents the topology and network state dependent info about a node, minus the signatures that go with it.
// Nodes will pick the best root they see, provided that the root continues to push out updates with new timestamps.
// The coords represent a path from the root to a node.
// This path is generally part of a spanning tree, except possibly the last hop (it can loop when sending coords to your parent, but they see this and know not to use a looping path).
type switchLocator struct {
root crypto.SigPubKey
tstamp int64
coords []switchPort
}
// Returns true if the first sigPubKey has a higher TreeID.
func firstIsBetter(first, second *crypto.SigPubKey) bool {
// Higher TreeID is better
ftid := crypto.GetTreeID(first)
stid := crypto.GetTreeID(second)
for idx := 0; idx < len(ftid); idx++ {
if ftid[idx] == stid[idx] {
continue
}
return ftid[idx] > stid[idx]
}
// Edge case, when comparing identical IDs
return false
}
// Returns a copy of the locator which can safely be mutated.
func (l *switchLocator) clone() switchLocator {
// Used to create a deep copy for use in messages
// Copy required because we need to mutate coords before sending
// (By appending the port from us to the destination)
loc := *l
loc.coords = make([]switchPort, len(l.coords), len(l.coords)+1)
copy(loc.coords, l.coords)
return loc
}
// Gets the distance a locator is from the provided destination coords, with the coords provided in []byte format (used to compress integers sent over the wire).
func (l *switchLocator) dist(dest []byte) int {
// Returns distance (on the tree) from these coords
offset := 0
fdc := 0
for {
if fdc >= len(l.coords) {
break
}
coord, length := wire_decode_uint64(dest[offset:])
if length == 0 {
break
}
if l.coords[fdc] != switchPort(coord) {
break
}
fdc++
offset += length
}
dist := len(l.coords[fdc:])
for {
_, length := wire_decode_uint64(dest[offset:])
if length == 0 {
break
}
dist++
offset += length
}
return dist
}
func (l *switchLocator) ldist(sl *switchLocator) int {
lca := -1
for idx := 0; idx < len(l.coords); idx++ {
if idx >= len(sl.coords) {
break
}
if l.coords[idx] != sl.coords[idx] {
break
}
lca = idx
}
return len(l.coords) + len(sl.coords) - 2*(lca+1)
}
// Gets coords in wire encoded format, with *no* length prefix.
func (l *switchLocator) getCoords() []byte {
bs := make([]byte, 0, len(l.coords))
for _, coord := range l.coords {
c := wire_encode_uint64(uint64(coord))
bs = append(bs, c...)
}
return bs
}
// Returns true if this locator represents an ancestor of the locator given as an argument.
// Ancestor means that it's the parent node, or the parent of parent, and so on...
func (x *switchLocator) isAncestorOf(y *switchLocator) bool {
if x.root != y.root {
return false
}
if len(x.coords) > len(y.coords) {
return false
}
for idx := range x.coords {
if x.coords[idx] != y.coords[idx] {
return false
}
}
return true
}
// Information about a peer, used by the switch to build the tree and eventually make routing decisions.
type peerInfo struct {
key crypto.SigPubKey // ID of this peer
locator switchLocator // Should be able to respond with signatures upon request
degree uint64 // Self-reported degree
time time.Time // Time this node was last seen
faster map[switchPort]uint64 // Counter of how often a node is faster than the current parent, penalized extra if slower
port switchPort // Interface number of this peer
msg switchMsg // The wire switchMsg used
readBlock bool // True if the link notified us of a read that blocked too long
writeBlock bool // True of the link notified us of a write that blocked too long
}
func (pinfo *peerInfo) blocked() bool {
return pinfo.readBlock || pinfo.writeBlock
}
// This is just a uint64 with a named type for clarity reasons.
type switchPort uint64
// This is the subset of the information about a peer needed to make routing decisions, and it stored separately in an atomically accessed table, which gets hammered in the "hot loop" of the routing logic (see: peer.handleTraffic in peers.go).
type tableElem struct {
port switchPort
locator switchLocator
time time.Time
next map[switchPort]*tableElem
}
// This is the subset of the information about all peers needed to make routing decisions, and it stored separately in an atomically accessed table, which gets hammered in the "hot loop" of the routing logic (see: peer.handleTraffic in peers.go).
type lookupTable struct {
self switchLocator
elems map[switchPort]tableElem // all switch peers, just for sanity checks + API/debugging
_start tableElem // used for lookups
_msg switchMsg
}
// This is switch information which is mutable and needs to be modified by other goroutines, but is not accessed atomically.
// Use the switchTable functions to access it safely using the RWMutex for synchronization.
type switchData struct {
// All data that's mutable and used by exported Table methods
// To be read/written with atomic.Value Store/Load calls
locator switchLocator
peers map[switchPort]peerInfo
msg *switchMsg
}
// All the information stored by the switch.
type switchTable struct {
core *Core
key crypto.SigPubKey // Our own key
phony.Inbox // Owns the below
time time.Time // Time when locator.tstamp was last updated
drop map[crypto.SigPubKey]int64 // Tstamp associated with a dropped root
parent switchPort // Port of whatever peer is our parent, or self if we're root
data switchData //
}
// Minimum allowed total size of switch queues.
const SwitchQueueTotalMinSize = 4 * 1024 * 1024
// Initializes the switchTable struct.
func (t *switchTable) init(core *Core) {
now := time.Now()
t.core = core
t.key = t.core.sigPub
locator := switchLocator{root: t.key, tstamp: now.Unix()}
peers := make(map[switchPort]peerInfo)
t.data = switchData{locator: locator, peers: peers}
t.drop = make(map[crypto.SigPubKey]int64)
phony.Block(t, t._updateTable)
}
func (t *switchTable) reconfigure() {
// This is where reconfiguration would go, if we had anything useful to do.
t.core.links.reconfigure()
t.core.peers.reconfigure()
}
// Regular maintenance to possibly timeout/reset the root and similar.
func (t *switchTable) doMaintenance(from phony.Actor) {
t.Act(from, func() {
// Periodic maintenance work to keep things internally consistent
t._cleanRoot()
t._cleanDropped()
})
}
// Updates the root periodically if it is ourself, or promotes ourself to root if we're better than the current root or if the current root has timed out.
func (t *switchTable) _cleanRoot() {
// TODO rethink how this is done?...
// Get rid of the root if it looks like its timed out
now := time.Now()
doUpdate := false
if now.Sub(t.time) > switch_timeout {
dropped := t.data.peers[t.parent]
dropped.time = t.time
t.drop[t.data.locator.root] = t.data.locator.tstamp
doUpdate = true
}
// Or, if we're better than our root, root ourself
if firstIsBetter(&t.key, &t.data.locator.root) {
doUpdate = true
}
// Or, if we are the root, possibly update our timestamp
if t.data.locator.root == t.key &&
now.Sub(t.time) > switch_updateInterval {
doUpdate = true
}
if doUpdate {
t.parent = switchPort(0)
t.time = now
if t.data.locator.root != t.key {
defer t.core.router.reset(nil)
}
t.data.locator = switchLocator{root: t.key, tstamp: now.Unix()}
t._updateTable() // updates base copy of switch msg in lookupTable
t.core.peers.sendSwitchMsgs(t)
}
}
// Blocks and, if possible, unparents a peer
func (t *switchTable) blockPeer(from phony.Actor, port switchPort, isWrite bool) {
t.Act(from, func() {
peer, isIn := t.data.peers[port]
switch {
case isIn && !isWrite && !peer.readBlock:
peer.readBlock = true
case isIn && isWrite && !peer.writeBlock:
peer.writeBlock = true
default:
return
}
t.data.peers[port] = peer
defer t._updateTable()
if port != t.parent {
return
}
t.parent = 0
for _, info := range t.data.peers {
if info.port == port {
continue
}
t._handleMsg(&info.msg, info.port, true)
}
t._handleMsg(&peer.msg, peer.port, true)
})
}
func (t *switchTable) unblockPeer(from phony.Actor, port switchPort, isWrite bool) {
t.Act(from, func() {
peer, isIn := t.data.peers[port]
switch {
case isIn && !isWrite && peer.readBlock:
peer.readBlock = false
case isIn && isWrite && peer.writeBlock:
peer.writeBlock = false
default:
return
}
t.data.peers[port] = peer
t._updateTable()
})
}
// Removes a peer.
// Must be called by the router actor with a lambda that calls this.
// If the removed peer was this node's parent, it immediately tries to find a new parent.
func (t *switchTable) forgetPeer(from phony.Actor, port switchPort) {
t.Act(from, func() {
delete(t.data.peers, port)
defer t._updateTable()
if port != t.parent {
return
}
t.parent = 0
for _, info := range t.data.peers {
t._handleMsg(&info.msg, info.port, true)
}
})
}
// Dropped is a list of roots that are better than the current root, but stopped sending new timestamps.
// If we switch to a new root, and that root is better than an old root that previously timed out, then we can clean up the old dropped root infos.
// This function is called periodically to do that cleanup.
func (t *switchTable) _cleanDropped() {
// TODO? only call this after root changes, not periodically
for root := range t.drop {
if !firstIsBetter(&root, &t.data.locator.root) {
delete(t.drop, root)
}
}
}
// A switchMsg contains the root node's sig key, timestamp, and signed per-hop information about a path from the root node to some other node in the network.
// This is exchanged with peers to construct the spanning tree.
// A subset of this information, excluding the signatures, is used to construct locators that are used elsewhere in the code.
type switchMsg struct {
Root crypto.SigPubKey
TStamp int64
Hops []switchMsgHop
}
// This represents the signed information about the path leading from the root the Next node, via the Port specified here.
type switchMsgHop struct {
Port switchPort
Next crypto.SigPubKey
Sig crypto.SigBytes
}
// This returns a *switchMsg to a copy of this node's current switchMsg, which can safely have additional information appended to Hops and sent to a peer.
func (t *switchTable) _getMsg() *switchMsg {
if t.parent == 0 {
return &switchMsg{Root: t.key, TStamp: t.data.locator.tstamp}
} else if parent, isIn := t.data.peers[t.parent]; isIn {
msg := parent.msg
msg.Hops = append([]switchMsgHop(nil), msg.Hops...)
return &msg
} else {
return nil
}
}
func (t *lookupTable) getMsg() *switchMsg {
msg := t._msg
msg.Hops = append([]switchMsgHop(nil), t._msg.Hops...)
return &msg
}
// This function checks that the root information in a switchMsg is OK.
// In particular, that the root is better, or else the same as the current root but with a good timestamp, and that this root+timestamp haven't been dropped due to timeout.
func (t *switchTable) _checkRoot(msg *switchMsg) bool {
// returns false if it's a dropped root, not a better root, or has an older timestamp
// returns true otherwise
// used elsewhere to keep inserting peers into the dht only if root info is OK
dropTstamp, isIn := t.drop[msg.Root]
switch {
case isIn && dropTstamp >= msg.TStamp:
return false
case firstIsBetter(&msg.Root, &t.data.locator.root):
return true
case t.data.locator.root != msg.Root:
return false
case t.data.locator.tstamp > msg.TStamp:
return false
default:
return true
}
}
// This updates the switch with information about a peer.
// Then the tricky part, it decides if it should update our own locator as a result.
// That happens if this node is already our parent, or is advertising a better root, or is advertising a better path to the same root, etc...
// There are a lot of very delicate order sensitive checks here, so its' best to just read the code if you need to understand what it's doing.
// It's very important to not change the order of the statements in the case function unless you're absolutely sure that it's safe, including safe if used alongside nodes that used the previous order.
// Set the third arg to true if you're reprocessing an old message, e.g. to find a new parent after one disconnects, to avoid updating some timing related things.
func (t *switchTable) _handleMsg(msg *switchMsg, fromPort switchPort, reprocessing bool) {
// TODO directly use a switchMsg instead of switchMessage + sigs
now := time.Now()
// Set up the sender peerInfo
var sender peerInfo
sender.locator.root = msg.Root
sender.locator.tstamp = msg.TStamp
prevKey := msg.Root
for _, hop := range msg.Hops {
// Build locator
sender.locator.coords = append(sender.locator.coords, hop.Port)
sender.key = prevKey
prevKey = hop.Next
}
if sender.key == t.key {
return // Don't peer with ourself via different interfaces
}
sender.msg = *msg
sender.port = fromPort
sender.time = now
// Decide what to do
equiv := func(x *switchLocator, y *switchLocator) bool {
if x.root != y.root {
return false
}
if len(x.coords) != len(y.coords) {
return false
}
for idx := range x.coords {
if x.coords[idx] != y.coords[idx] {
return false
}
}
return true
}
doUpdate := false
oldSender := t.data.peers[fromPort]
if !equiv(&sender.locator, &oldSender.locator) {
// Reset faster info, we'll start refilling it right after this
sender.faster = nil
doUpdate = true
}
// Update the matrix of peer "faster" thresholds
if reprocessing {
sender.faster = oldSender.faster
sender.time = oldSender.time
sender.readBlock = oldSender.readBlock
sender.writeBlock = oldSender.writeBlock
} else {
sender.faster = make(map[switchPort]uint64, len(oldSender.faster))
for port, peer := range t.data.peers {
if port == fromPort {
continue
} else if sender.locator.root != peer.locator.root || sender.locator.tstamp > peer.locator.tstamp {
// We were faster than this node, so increment, as long as we don't overflow because of it
if oldSender.faster[peer.port] < switch_faster_threshold {
sender.faster[port] = oldSender.faster[peer.port] + 1
} else {
sender.faster[port] = switch_faster_threshold
}
} else {
// Slower than this node, penalize (more than the reward amount)
if oldSender.faster[port] > 1 {
sender.faster[port] = oldSender.faster[peer.port] - 2
} else {
sender.faster[port] = 0
}
}
}
}
if sender.blocked() != oldSender.blocked() {
doUpdate = true
}
// Update sender
t.data.peers[fromPort] = sender
// Decide if we should also update our root info to make the sender our parent
updateRoot := false
oldParent, isIn := t.data.peers[t.parent]
noParent := !isIn
noLoop := func() bool {
for idx := 0; idx < len(msg.Hops)-1; idx++ {
if msg.Hops[idx].Next == t.core.sigPub {
return false
}
}
if sender.locator.root == t.core.sigPub {
return false
}
return true
}()
dropTstamp, isIn := t.drop[sender.locator.root]
// Decide if we need to update info about the root or change parents.
switch {
case !noLoop:
// This route loops, so we can't use the sender as our parent.
case isIn && dropTstamp >= sender.locator.tstamp:
// This is a known root with a timestamp older than a known timeout, so we can't trust it to be a new announcement.
case firstIsBetter(&sender.locator.root, &t.data.locator.root):
// This is a better root than what we're currently using, so we should update.
updateRoot = true
case t.data.locator.root != sender.locator.root:
// This is not the same root, and it's apparently not better (from the above), so we should ignore it.
case t.data.locator.tstamp > sender.locator.tstamp:
// This timetsamp is older than the most recently seen one from this root, so we should ignore it.
case noParent:
// We currently have no working parent, and at this point in the switch statement, anything is better than nothing.
updateRoot = true
case sender.faster[t.parent] >= switch_faster_threshold:
// The is reliably faster than the current parent.
updateRoot = true
case !sender.blocked() && oldParent.blocked():
// Replace a blocked parent
updateRoot = true
case reprocessing && sender.blocked() && !oldParent.blocked():
// Don't replace an unblocked parent when reprocessing
case reprocessing && sender.faster[t.parent] > oldParent.faster[sender.port]:
// The sender seems to be reliably faster than the current parent, so switch to them instead.
updateRoot = true
case sender.port != t.parent:
// Ignore further cases if the sender isn't our parent.
case !reprocessing && !equiv(&sender.locator, &t.data.locator):
// Special case:
// If coords changed, then we need to penalize this node somehow, to prevent flapping.
// First, reset all faster-related info to 0.
// Then, de-parent the node and reprocess all messages to find a new parent.
t.parent = 0
for _, peer := range t.data.peers {
if peer.port == sender.port {
continue
}
t._handleMsg(&peer.msg, peer.port, true)
}
// Process the sender last, to avoid keeping them as a parent if at all possible.
t._handleMsg(&sender.msg, sender.port, true)
case now.Sub(t.time) < switch_throttle:
// We've already gotten an update from this root recently, so ignore this one to avoid flooding.
case sender.locator.tstamp > t.data.locator.tstamp:
// The timestamp was updated, so we need to update locally and send to our peers.
updateRoot = true
}
// Note that we depend on the LIFO order of the stack of defers here...
if updateRoot {
doUpdate = true
if !equiv(&sender.locator, &t.data.locator) {
defer t.core.router.reset(t)
}
if t.data.locator.tstamp != sender.locator.tstamp {
t.time = now
}
t.data.locator = sender.locator
t.parent = sender.port
defer t.core.peers.sendSwitchMsgs(t)
}
if doUpdate {
t._updateTable()
}
}
////////////////////////////////////////////////////////////////////////////////
// The rest of these are related to the switch lookup table
func (t *switchTable) _updateTable() {
newTable := lookupTable{
self: t.data.locator.clone(),
elems: make(map[switchPort]tableElem, len(t.data.peers)),
_msg: *t._getMsg(),
}
newTable._init()
for _, pinfo := range t.data.peers {
if pinfo.blocked() || pinfo.locator.root != newTable.self.root {
continue
}
loc := pinfo.locator.clone()
loc.coords = loc.coords[:len(loc.coords)-1] // Remove the them->self link
elem := tableElem{
locator: loc,
port: pinfo.port,
time: pinfo.time,
}
newTable._insert(&elem)
newTable.elems[pinfo.port] = elem
}
t.core.peers.updateTables(t, &newTable)
t.core.router.updateTable(t, &newTable)
}
func (t *lookupTable) _init() {
// WARNING: this relies on the convention that the self port is 0
self := tableElem{locator: t.self} // create self elem
t._start = self // initialize _start to self
t._insert(&self) // insert self into table
}
func (t *lookupTable) _insert(elem *tableElem) {
// This is a helper that should only be run during _updateTable
here := &t._start
for idx := 0; idx <= len(elem.locator.coords); idx++ {
refLoc := here.locator
refLoc.coords = refLoc.coords[:idx] // Note that this is length idx (starts at length 0)
oldDist := refLoc.ldist(&here.locator)
newDist := refLoc.ldist(&elem.locator)
var update bool
switch {
case newDist < oldDist: // new elem is closer to this point in the tree
update = true
case newDist > oldDist: // new elem is too far
case elem.locator.tstamp > refLoc.tstamp: // new elem has a closer timestamp
update = true
case elem.locator.tstamp < refLoc.tstamp: // new elem's timestamp is too old
case elem.time.Before(here.time): // same dist+timestamp, but new elem delivered it faster
update = true
}
if update {
here.port = elem.port
here.locator = elem.locator
here.time = elem.time
// Problem: here is a value, so this doesn't actually update anything...
}
if idx < len(elem.locator.coords) {
if here.next == nil {
here.next = make(map[switchPort]*tableElem)
}
var next *tableElem
var ok bool
if next, ok = here.next[elem.locator.coords[idx]]; !ok {
nextVal := *elem
next = &nextVal
here.next[next.locator.coords[idx]] = next
}
here = next
}
}
}
// Starts the switch worker
func (t *switchTable) start() error {
t.core.log.Infoln("Starting switch")
// There's actually nothing to do to start it...
return nil
}
func (t *lookupTable) lookup(coords []byte) switchPort {
var offset int
here := &t._start
for offset < len(coords) {
port, l := wire_decode_uint64(coords[offset:])
offset += l
if next, ok := here.next[switchPort(port)]; ok {
here = next
} else {
break
}
}
return here.port
}

View File

@ -1,496 +0,0 @@
package yggdrasil
// Wire formatting tools
// These are all ugly and probably not very secure
// TODO clean up unused/commented code, and add better comments to whatever is left
// Packet types, as wire_encode_uint64(type) at the start of each packet
import (
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
)
const (
wire_Traffic = iota // data being routed somewhere, handle for crypto
wire_ProtocolTraffic // protocol traffic, pub keys for crypto
wire_LinkProtocolTraffic // link proto traffic, pub keys for crypto
wire_SwitchMsg // inside link protocol traffic header
wire_SessionPing // inside protocol traffic header
wire_SessionPong // inside protocol traffic header
wire_DHTLookupRequest // inside protocol traffic header
wire_DHTLookupResponse // inside protocol traffic header
wire_NodeInfoRequest // inside protocol traffic header
wire_NodeInfoResponse // inside protocol traffic header
)
// Calls wire_put_uint64 on a nil slice.
func wire_encode_uint64(elem uint64) []byte {
return wire_put_uint64(elem, nil)
}
// Encode uint64 using a variable length scheme.
// Similar to binary.Uvarint, but big-endian.
func wire_put_uint64(e uint64, out []byte) []byte {
var b [10]byte
i := len(b) - 1
b[i] = byte(e & 0x7f)
for e >>= 7; e != 0; e >>= 7 {
i--
b[i] = byte(e | 0x80)
}
return append(out, b[i:]...)
}
// Returns the length of a wire encoded uint64 of this value.
func wire_uint64_len(elem uint64) int {
l := 1
for e := elem >> 7; e > 0; e >>= 7 {
l++
}
return l
}
// Decode uint64 from a []byte slice.
// Returns the decoded uint64 and the number of bytes used.
func wire_decode_uint64(bs []byte) (uint64, int) {
length := 0
elem := uint64(0)
for _, b := range bs {
elem <<= 7
elem |= uint64(b & 0x7f)
length++
if b&0x80 == 0 {
break
}
}
return elem, length
}
// Converts an int64 into uint64 so it can be written to the wire.
// Non-negative integers are mapped to even integers: 0 -> 0, 1 -> 2, etc.
// Negative integers are mapped to odd integers: -1 -> 1, -2 -> 3, etc.
// This means the least significant bit is a sign bit.
// This is known as zigzag encoding.
func wire_intToUint(i int64) uint64 {
// signed arithmetic shift
return uint64((i >> 63) ^ (i << 1))
}
// Converts uint64 back to int64, genreally when being read from the wire.
func wire_intFromUint(u uint64) int64 {
// non-arithmetic shift
return int64((u >> 1) ^ -(u & 1))
}
////////////////////////////////////////////////////////////////////////////////
// Takes coords, returns coords prefixed with encoded coord length.
func wire_encode_coords(coords []byte) []byte {
coordLen := wire_encode_uint64(uint64(len(coords)))
bs := make([]byte, 0, len(coordLen)+len(coords))
bs = append(bs, coordLen...)
bs = append(bs, coords...)
return bs
}
// Puts a length prefix and the coords into bs, returns the wire formatted coords.
// Useful in hot loops where we don't want to allocate and we know the rest of the later parts of the slice are safe to overwrite.
func wire_put_coords(coords []byte, bs []byte) []byte {
bs = wire_put_uint64(uint64(len(coords)), bs)
bs = append(bs, coords...)
return bs
}
// Takes a slice that begins with coords (starting with coord length).
// Returns a slice of coords and the number of bytes read.
// Used as part of various decode() functions for structs.
func wire_decode_coords(packet []byte) ([]byte, int) {
coordLen, coordBegin := wire_decode_uint64(packet)
coordEnd := coordBegin + int(coordLen)
if coordBegin == 0 || coordEnd > len(packet) {
return nil, 0
}
return packet[coordBegin:coordEnd], coordEnd
}
// Converts a []uint64 set of coords to a []byte set of coords.
func wire_coordsUint64stoBytes(in []uint64) (out []byte) {
for _, coord := range in {
c := wire_encode_uint64(coord)
out = append(out, c...)
}
return out
}
// Converts a []byte set of coords to a []uint64 set of coords.
func wire_coordsBytestoUint64s(in []byte) (out []uint64) {
offset := 0
for {
coord, length := wire_decode_uint64(in[offset:])
if length == 0 {
break
}
out = append(out, coord)
offset += length
}
return out
}
////////////////////////////////////////////////////////////////////////////////
// Encodes a swtichMsg into its wire format.
func (m *switchMsg) encode() []byte {
bs := wire_encode_uint64(wire_SwitchMsg)
bs = append(bs, m.Root[:]...)
bs = append(bs, wire_encode_uint64(wire_intToUint(m.TStamp))...)
for _, hop := range m.Hops {
bs = append(bs, wire_encode_uint64(uint64(hop.Port))...)
bs = append(bs, hop.Next[:]...)
bs = append(bs, hop.Sig[:]...)
}
return bs
}
// Decodes a wire formatted switchMsg into the struct, returns true if successful.
func (m *switchMsg) decode(bs []byte) bool {
var pType uint64
var tstamp uint64
switch {
case !wire_chop_uint64(&pType, &bs):
return false
case pType != wire_SwitchMsg:
return false
case !wire_chop_slice(m.Root[:], &bs):
return false
case !wire_chop_uint64(&tstamp, &bs):
return false
}
m.TStamp = wire_intFromUint(tstamp)
for len(bs) > 0 {
var hop switchMsgHop
switch {
case !wire_chop_uint64((*uint64)(&hop.Port), &bs):
return false
case !wire_chop_slice(hop.Next[:], &bs):
return false
case !wire_chop_slice(hop.Sig[:], &bs):
return false
}
m.Hops = append(m.Hops, hop)
}
return true
}
////////////////////////////////////////////////////////////////////////////////
// A utility function used to copy bytes into a slice and advance the beginning of the source slice, returns true if successful.
func wire_chop_slice(toSlice []byte, fromSlice *[]byte) bool {
if len(*fromSlice) < len(toSlice) {
return false
}
copy(toSlice, *fromSlice)
*fromSlice = (*fromSlice)[len(toSlice):]
return true
}
// A utility function to extract coords from a slice and advance the source slices, returning true if successful.
func wire_chop_coords(toCoords *[]byte, fromSlice *[]byte) bool {
coords, coordLen := wire_decode_coords(*fromSlice)
if coordLen == 0 {
return false
}
*toCoords = append((*toCoords)[:0], coords...)
*fromSlice = (*fromSlice)[coordLen:]
return true
}
// A utility function to extract a wire encoded uint64 into the provided pointer while advancing the start of the source slice, returning true if successful.
func wire_chop_uint64(toUInt64 *uint64, fromSlice *[]byte) bool {
dec, decLen := wire_decode_uint64(*fromSlice)
if decLen == 0 {
return false
}
*toUInt64 = dec
*fromSlice = (*fromSlice)[decLen:]
return true
}
////////////////////////////////////////////////////////////////////////////////
// Wire traffic packets
// The wire format for ordinary IPv6 traffic encapsulated by the network.
type wire_trafficPacket struct {
Coords []byte
Handle crypto.Handle
Nonce crypto.BoxNonce
Payload []byte
}
// Encodes a wire_trafficPacket into its wire format.
// The returned slice was taken from the pool.
func (p *wire_trafficPacket) encode() []byte {
bs := pool_getBytes(0)
bs = wire_put_uint64(wire_Traffic, bs)
bs = wire_put_coords(p.Coords, bs)
bs = append(bs, p.Handle[:]...)
bs = append(bs, p.Nonce[:]...)
bs = append(bs, p.Payload...)
return bs
}
// Decodes an encoded wire_trafficPacket into the struct, returning true if successful.
// Either way, the argument slice is added to the pool.
func (p *wire_trafficPacket) decode(bs []byte) bool {
defer pool_putBytes(bs)
var pType uint64
switch {
case !wire_chop_uint64(&pType, &bs):
return false
case pType != wire_Traffic:
return false
case !wire_chop_coords(&p.Coords, &bs):
return false
case !wire_chop_slice(p.Handle[:], &bs):
return false
case !wire_chop_slice(p.Nonce[:], &bs):
return false
}
p.Payload = append(p.Payload, bs...)
return true
}
// The wire format for protocol traffic, such as dht req/res or session ping/pong packets.
type wire_protoTrafficPacket struct {
Coords []byte
ToKey crypto.BoxPubKey
FromKey crypto.BoxPubKey
Nonce crypto.BoxNonce
Payload []byte
}
// Encodes a wire_protoTrafficPacket into its wire format.
func (p *wire_protoTrafficPacket) encode() []byte {
coords := wire_encode_coords(p.Coords)
bs := wire_encode_uint64(wire_ProtocolTraffic)
bs = append(bs, coords...)
bs = append(bs, p.ToKey[:]...)
bs = append(bs, p.FromKey[:]...)
bs = append(bs, p.Nonce[:]...)
bs = append(bs, p.Payload...)
return bs
}
// Decodes an encoded wire_protoTrafficPacket into the struct, returning true if successful.
func (p *wire_protoTrafficPacket) decode(bs []byte) bool {
var pType uint64
switch {
case !wire_chop_uint64(&pType, &bs):
return false
case pType != wire_ProtocolTraffic:
return false
case !wire_chop_coords(&p.Coords, &bs):
return false
case !wire_chop_slice(p.ToKey[:], &bs):
return false
case !wire_chop_slice(p.FromKey[:], &bs):
return false
case !wire_chop_slice(p.Nonce[:], &bs):
return false
}
p.Payload = bs
return true
}
// The wire format for link protocol traffic, namely switchMsg.
// There's really two layers of this, with the outer layer using permanent keys, and the inner layer using ephemeral keys.
// The keys themselves are exchanged as part of the connection setup, and then omitted from the packets.
// The two layer logic is handled in peers.go, but it's kind of ugly.
type wire_linkProtoTrafficPacket struct {
Nonce crypto.BoxNonce
Payload []byte
}
// Encodes a wire_linkProtoTrafficPacket into its wire format.
func (p *wire_linkProtoTrafficPacket) encode() []byte {
bs := wire_encode_uint64(wire_LinkProtocolTraffic)
bs = append(bs, p.Nonce[:]...)
bs = append(bs, p.Payload...)
return bs
}
// Decodes an encoded wire_linkProtoTrafficPacket into the struct, returning true if successful.
func (p *wire_linkProtoTrafficPacket) decode(bs []byte) bool {
var pType uint64
switch {
case !wire_chop_uint64(&pType, &bs):
return false
case pType != wire_LinkProtocolTraffic:
return false
case !wire_chop_slice(p.Nonce[:], &bs):
return false
}
p.Payload = bs
return true
}
////////////////////////////////////////////////////////////////////////////////
// Encodes a sessionPing into its wire format.
func (p *sessionPing) encode() []byte {
var pTypeVal uint64
if p.IsPong {
pTypeVal = wire_SessionPong
} else {
pTypeVal = wire_SessionPing
}
bs := wire_encode_uint64(pTypeVal)
//p.sendPermPub used in top level (crypto), so skipped here
bs = append(bs, p.Handle[:]...)
bs = append(bs, p.SendSesPub[:]...)
bs = append(bs, wire_encode_uint64(wire_intToUint(p.Tstamp))...)
coords := wire_encode_coords(p.Coords)
bs = append(bs, coords...)
bs = append(bs, wire_encode_uint64(uint64(p.MTU))...)
return bs
}
// Decodes an encoded sessionPing into the struct, returning true if successful.
func (p *sessionPing) decode(bs []byte) bool {
var pType uint64
var tstamp uint64
var mtu uint64
switch {
case !wire_chop_uint64(&pType, &bs):
return false
case pType != wire_SessionPing && pType != wire_SessionPong:
return false
//p.sendPermPub used in top level (crypto), so skipped here
case !wire_chop_slice(p.Handle[:], &bs):
return false
case !wire_chop_slice(p.SendSesPub[:], &bs):
return false
case !wire_chop_uint64(&tstamp, &bs):
return false
case !wire_chop_coords(&p.Coords, &bs):
return false
case !wire_chop_uint64(&mtu, &bs):
mtu = 1280
}
p.Tstamp = wire_intFromUint(tstamp)
if pType == wire_SessionPong {
p.IsPong = true
}
p.MTU = MTU(mtu)
return true
}
////////////////////////////////////////////////////////////////////////////////
// Encodes a nodeinfoReqRes into its wire format.
func (p *nodeinfoReqRes) encode() []byte {
var pTypeVal uint64
if p.IsResponse {
pTypeVal = wire_NodeInfoResponse
} else {
pTypeVal = wire_NodeInfoRequest
}
bs := wire_encode_uint64(pTypeVal)
bs = wire_put_coords(p.SendCoords, bs)
if pTypeVal == wire_NodeInfoResponse {
bs = append(bs, p.NodeInfo...)
}
return bs
}
// Decodes an encoded nodeinfoReqRes into the struct, returning true if successful.
func (p *nodeinfoReqRes) decode(bs []byte) bool {
var pType uint64
switch {
case !wire_chop_uint64(&pType, &bs):
return false
case pType != wire_NodeInfoRequest && pType != wire_NodeInfoResponse:
return false
case !wire_chop_coords(&p.SendCoords, &bs):
return false
}
if p.IsResponse = pType == wire_NodeInfoResponse; p.IsResponse {
if len(bs) == 0 {
return false
}
p.NodeInfo = make(NodeInfoPayload, len(bs))
if !wire_chop_slice(p.NodeInfo[:], &bs) {
return false
}
}
return true
}
////////////////////////////////////////////////////////////////////////////////
// Encodes a dhtReq into its wire format.
func (r *dhtReq) encode() []byte {
coords := wire_encode_coords(r.Coords)
bs := wire_encode_uint64(wire_DHTLookupRequest)
bs = append(bs, coords...)
bs = append(bs, r.Dest[:]...)
return bs
}
// Decodes an encoded dhtReq into the struct, returning true if successful.
func (r *dhtReq) decode(bs []byte) bool {
var pType uint64
switch {
case !wire_chop_uint64(&pType, &bs):
return false
case pType != wire_DHTLookupRequest:
return false
case !wire_chop_coords(&r.Coords, &bs):
return false
case !wire_chop_slice(r.Dest[:], &bs):
return false
default:
return true
}
}
// Encodes a dhtRes into its wire format.
func (r *dhtRes) encode() []byte {
coords := wire_encode_coords(r.Coords)
bs := wire_encode_uint64(wire_DHTLookupResponse)
bs = append(bs, coords...)
bs = append(bs, r.Dest[:]...)
for _, info := range r.Infos {
coords = wire_encode_coords(info.coords)
bs = append(bs, info.key[:]...)
bs = append(bs, coords...)
}
return bs
}
// Decodes an encoded dhtRes into the struct, returning true if successful.
func (r *dhtRes) decode(bs []byte) bool {
var pType uint64
switch {
case !wire_chop_uint64(&pType, &bs):
return false
case pType != wire_DHTLookupResponse:
return false
case !wire_chop_coords(&r.Coords, &bs):
return false
case !wire_chop_slice(r.Dest[:], &bs):
return false
}
for len(bs) > 0 {
info := dhtInfo{}
switch {
case !wire_chop_slice(info.key[:], &bs):
return false
case !wire_chop_coords(&info.coords, &bs):
return false
}
r.Infos = append(r.Infos, &info)
}
return true
}