5
0
mirror of https://github.com/cwinfo/yggdrasil-go.git synced 2024-12-01 19:41:35 +00:00
yggdrasil-go/src/yggdrasil/tcp.go

410 lines
12 KiB
Go
Raw Normal View History

2017-12-29 04:16:20 +00:00
package yggdrasil
// This sends packets to peers using TCP as a transport
// It's generally better tested than the UDP implementation
// Using it regularly is insane, but I find TCP easier to test/debug with it
// Updating and optimizing the UDP version is a higher priority
// TODO:
// Something needs to make sure we're getting *valid* packets
// Could be used to DoS (connect, give someone else's keys, spew garbage)
// I guess the "peer" part should watch for link packets, disconnect?
// TCP connections start with a metadata exchange.
// It involves exchanging version numbers and crypto keys
// See version.go for version metadata format
2017-12-29 04:16:20 +00:00
import "net"
import "time"
import "errors"
import "sync"
import "sync/atomic"
2017-12-29 04:16:20 +00:00
import "fmt"
2018-05-27 21:13:37 +00:00
import "golang.org/x/net/proxy"
2017-12-29 04:16:20 +00:00
2018-01-04 22:37:51 +00:00
const tcp_msgSize = 2048 + 65535 // TODO figure out what makes sense
2017-12-29 04:16:20 +00:00
// Wrapper function for non tcp/ip connections.
2018-04-19 14:30:40 +00:00
func setNoDelay(c net.Conn, delay bool) {
tcp, ok := c.(*net.TCPConn)
if ok {
tcp.SetNoDelay(delay)
}
}
// The TCP listener and information about active TCP connections, to avoid duplication.
2017-12-29 04:16:20 +00:00
type tcpInterface struct {
2018-01-04 22:37:51 +00:00
core *Core
2018-04-19 14:30:40 +00:00
serv net.Listener
2018-01-04 22:37:51 +00:00
mutex sync.Mutex // Protecting the below
calls map[string]struct{}
2018-02-18 02:44:23 +00:00
conns map[tcpInfo](chan struct{})
2017-12-29 04:16:20 +00:00
}
// This is used as the key to a map that tracks existing connections, to prevent multiple connections to the same keys and local/remote address pair from occuring.
// Different address combinations are allowed, so multi-homing is still technically possible (but not necessarily advisable).
2018-02-18 02:44:23 +00:00
type tcpInfo struct {
box boxPubKey
sig sigPubKey
2018-04-20 12:41:09 +00:00
localAddr string
remoteAddr string
2017-12-29 04:16:20 +00:00
}
// Returns the address of the listener.
2018-05-27 21:13:37 +00:00
func (iface *tcpInterface) getAddr() *net.TCPAddr {
return iface.serv.Addr().(*net.TCPAddr)
}
// Attempts to initiate a connection to the provided address.
2018-05-27 21:13:37 +00:00
func (iface *tcpInterface) connect(addr string) {
iface.call(addr)
}
// Attempst to initiate a connection to the provided address, viathe provided socks proxy address.
2018-05-27 21:13:37 +00:00
func (iface *tcpInterface) connectSOCKS(socksaddr, peeraddr string) {
// TODO make sure this doesn't keep attempting/killing connections when one is already active.
// I think some of the interaction between this and callWithConn needs work, so the dial isn't even attempted if there's already an outgoing call to peeraddr.
// Or maybe only if there's already an outgoing call to peeraddr via this socksaddr?
2018-05-27 21:13:37 +00:00
go func() {
dialer, err := proxy.SOCKS5("tcp", socksaddr, nil, proxy.Direct)
if err == nil {
conn, err := dialer.Dial("tcp", peeraddr)
if err == nil {
iface.callWithConn(&wrappedConn{
c: conn,
raddr: &wrappedAddr{
network: "tcp",
addr: peeraddr,
},
})
}
}
}()
}
// Initializes the struct.
2018-04-19 14:30:40 +00:00
func (iface *tcpInterface) init(core *Core, addr string) (err error) {
2018-01-04 22:37:51 +00:00
iface.core = core
2018-04-19 14:30:40 +00:00
iface.serv, err = net.Listen("tcp", addr)
if err == nil {
iface.calls = make(map[string]struct{})
iface.conns = make(map[tcpInfo](chan struct{}))
go iface.listener()
2018-01-04 22:37:51 +00:00
}
2018-05-27 21:13:37 +00:00
return err
2017-12-29 04:16:20 +00:00
}
// Runs the listener, which spawns off goroutines for incoming connections.
2017-12-29 04:16:20 +00:00
func (iface *tcpInterface) listener() {
2018-01-04 22:37:51 +00:00
defer iface.serv.Close()
iface.core.log.Println("Listening for TCP on:", iface.serv.Addr().String())
2018-01-04 22:37:51 +00:00
for {
2018-04-19 14:30:40 +00:00
sock, err := iface.serv.Accept()
2018-01-04 22:37:51 +00:00
if err != nil {
panic(err)
}
go iface.handler(sock, true)
2018-01-04 22:37:51 +00:00
}
2017-12-29 04:16:20 +00:00
}
// Called by connectSOCKS, it's like call but with the connection already established.
func (iface *tcpInterface) callWithConn(conn net.Conn) {
go func() {
raddr := conn.RemoteAddr().String()
iface.mutex.Lock()
_, isIn := iface.calls[raddr]
iface.mutex.Unlock()
if !isIn {
iface.mutex.Lock()
iface.calls[raddr] = struct{}{}
iface.mutex.Unlock()
defer func() {
iface.mutex.Lock()
delete(iface.calls, raddr)
iface.mutex.Unlock()
}()
iface.handler(conn, false)
}
}()
}
// Checks if a connection already exists.
// If not, it adds it to the list of active outgoing calls (to block future attempts) and dials the address.
// If the dial is successful, it launches the handler.
// When finished, it removes the outgoing call, so reconnection attempts can be made later.
// This all happens in a separate goroutine that it spawns.
2017-12-29 04:16:20 +00:00
func (iface *tcpInterface) call(saddr string) {
2018-01-04 22:37:51 +00:00
go func() {
quit := false
iface.mutex.Lock()
if _, isIn := iface.calls[saddr]; isIn {
quit = true
} else {
iface.calls[saddr] = struct{}{}
defer func() {
iface.mutex.Lock()
delete(iface.calls, saddr)
iface.mutex.Unlock()
}()
}
iface.mutex.Unlock()
if !quit {
conn, err := net.Dial("tcp", saddr)
2018-01-04 22:37:51 +00:00
if err != nil {
return
}
iface.handler(conn, false)
2018-01-04 22:37:51 +00:00
}
}()
2017-12-29 04:16:20 +00:00
}
// This exchanges/checks connection metadata, sets up the peer struct, sets up the writer goroutine, and then runs the reader within the current goroutine.
// It defers a bunch of cleanup stuff to tear down all of these things when the reader exists (e.g. due to a closed connection or a timeout).
func (iface *tcpInterface) handler(sock net.Conn, incoming bool) {
2018-01-04 22:37:51 +00:00
defer sock.Close()
// Get our keys
myLinkPub, myLinkPriv := newBoxKeys() // ephemeral link keys
meta := version_getBaseMetadata()
meta.box = iface.core.boxPub
meta.sig = iface.core.sigPub
meta.link = *myLinkPub
metaBytes := meta.encode()
_, err := sock.Write(metaBytes)
2018-01-04 22:37:51 +00:00
if err != nil {
return
}
timeout := time.Now().Add(6 * time.Second)
sock.SetReadDeadline(timeout)
2018-06-09 23:38:30 +00:00
_, err = sock.Read(metaBytes)
2018-01-04 22:37:51 +00:00
if err != nil {
return
}
meta = version_metadata{} // Reset to zero value
2018-06-09 23:38:30 +00:00
if !meta.decode(metaBytes) || !meta.check() {
// Failed to decode and check the metadata
// If it's a version mismatch issue, then print an error message
base := version_getBaseMetadata()
if meta.meta == base.meta {
if meta.ver > base.ver {
iface.core.log.Println("Failed to connect to node:", sock.RemoteAddr().String(), "version:", meta.ver)
} else if meta.ver == base.ver && meta.minorVer > base.minorVer {
iface.core.log.Println("Failed to connect to node:", sock.RemoteAddr().String(), "version:", fmt.Sprintf("%d.%d", meta.ver, meta.minorVer))
}
}
2018-06-09 23:38:30 +00:00
// TODO? Block forever to prevent future connection attempts? suppress future messages about the same node?
2018-01-04 22:37:51 +00:00
return
}
info := tcpInfo{ // used as a map key, so don't include ephemeral link key
box: meta.box,
sig: meta.sig,
}
2018-01-04 22:37:51 +00:00
// Quit the parent call if this is a connection to ourself
equiv := func(k1, k2 []byte) bool {
for idx := range k1 {
if k1[idx] != k2[idx] {
return false
}
}
return true
}
2018-02-18 02:44:23 +00:00
if equiv(info.box[:], iface.core.boxPub[:]) {
2018-01-04 22:37:51 +00:00
return
} // testing
2018-02-18 02:44:23 +00:00
if equiv(info.sig[:], iface.core.sigPub[:]) {
2018-01-04 22:37:51 +00:00
return
}
// Check if we're authorized to connect to this key / IP
if incoming && !iface.core.peers.isAllowedEncryptionPublicKey(&info.box) {
// Allow unauthorized peers if they're link-local
raddrStr, _, _ := net.SplitHostPort(sock.RemoteAddr().String())
raddr := net.ParseIP(raddrStr)
if !raddr.IsLinkLocalUnicast() {
return
}
}
2018-02-18 02:44:23 +00:00
// Check if we already have a connection to this node, close and block if yes
2018-04-20 12:41:09 +00:00
info.localAddr, _, _ = net.SplitHostPort(sock.LocalAddr().String())
info.remoteAddr, _, _ = net.SplitHostPort(sock.RemoteAddr().String())
2018-02-18 02:44:23 +00:00
iface.mutex.Lock()
if blockChan, isIn := iface.conns[info]; isIn {
iface.mutex.Unlock()
sock.Close()
<-blockChan
return
}
blockChan := make(chan struct{})
iface.conns[info] = blockChan
iface.mutex.Unlock()
defer func() {
iface.mutex.Lock()
delete(iface.conns, info)
iface.mutex.Unlock()
close(blockChan)
}()
2018-01-04 22:37:51 +00:00
// Note that multiple connections to the same node are allowed
// E.g. over different interfaces
p := iface.core.peers.newPeer(&info.box, &info.sig, getSharedKey(myLinkPriv, &meta.link))
p.linkOut = make(chan []byte, 1)
2018-01-04 22:37:51 +00:00
in := func(bs []byte) {
p.handlePacket(bs)
2018-01-04 22:37:51 +00:00
}
out := make(chan []byte, 32) // TODO? what size makes sense
2018-01-04 22:37:51 +00:00
defer close(out)
go func() {
var shadow int64
2018-01-04 22:37:51 +00:00
var stack [][]byte
put := func(msg []byte) {
stack = append(stack, msg)
for len(stack) > 32 {
2018-01-04 22:37:51 +00:00
util_putBytes(stack[0])
stack = stack[1:]
shadow++
2018-01-04 22:37:51 +00:00
}
}
2018-06-07 21:49:51 +00:00
send := func(msg []byte) {
msgLen := wire_encode_uint64(uint64(len(msg)))
buf := net.Buffers{tcp_msg[:], msgLen, msg}
buf.WriteTo(sock)
atomic.AddUint64(&p.bytesSent, uint64(len(tcp_msg)+len(msgLen)+len(msg)))
2018-06-07 21:49:51 +00:00
util_putBytes(msg)
}
timerInterval := 4 * time.Second
timer := time.NewTimer(timerInterval)
defer timer.Stop()
for {
if shadow != 0 {
p.updateQueueSize(-shadow)
shadow = 0
}
2018-06-07 21:49:51 +00:00
timer.Stop()
select {
case <-timer.C:
default:
}
timer.Reset(timerInterval)
select {
2018-06-07 21:49:51 +00:00
case _ = <-timer.C:
//iface.core.log.Println("DEBUG: sending keep-alive:", sock.RemoteAddr().String())
send(nil) // TCP keep-alive traffic
case msg := <-p.linkOut:
send(msg)
case msg, ok := <-out:
if !ok {
return
}
put(msg)
}
2018-01-04 22:37:51 +00:00
for len(stack) > 0 {
select {
case msg := <-p.linkOut:
send(msg)
2018-01-04 22:37:51 +00:00
case msg, ok := <-out:
if !ok {
return
}
put(msg)
default:
msg := stack[len(stack)-1]
stack = stack[:len(stack)-1]
send(msg)
p.updateQueueSize(-1)
2018-01-04 22:37:51 +00:00
}
}
}
}()
p.out = func(msg []byte) {
defer func() { recover() }()
select {
case out <- msg:
p.updateQueueSize(1)
default:
util_putBytes(msg)
2018-01-04 22:37:51 +00:00
}
}
p.close = func() { sock.Close() }
2018-04-19 14:30:40 +00:00
setNoDelay(sock, true)
go p.linkLoop()
2018-01-04 22:37:51 +00:00
defer func() {
// Put all of our cleanup here...
p.core.peers.removePeer(p.port)
2018-01-04 22:37:51 +00:00
}()
2018-04-20 12:41:09 +00:00
them, _, _ := net.SplitHostPort(sock.RemoteAddr().String())
2018-02-18 02:44:23 +00:00
themNodeID := getNodeID(&info.box)
2018-01-04 22:37:51 +00:00
themAddr := address_addrForNodeID(themNodeID)
themAddrString := net.IP(themAddr[:]).String()
themString := fmt.Sprintf("%s@%s", themAddrString, them)
iface.core.log.Println("Connected:", themString)
iface.reader(sock, in) // In this goroutine, because of defers
iface.core.log.Println("Disconnected:", themString)
return
2017-12-29 04:16:20 +00:00
}
// This reads from the socket into a []byte buffer for incomping messages.
// It copies completed messages out of the cache into a new slice, and passes them to the peer struct via the provided `in func([]byte)` argument.
// Then it shifts the incomplete fragments of data forward so future reads won't overwrite it.
2018-04-19 14:30:40 +00:00
func (iface *tcpInterface) reader(sock net.Conn, in func([]byte)) {
2018-01-04 22:37:51 +00:00
bs := make([]byte, 2*tcp_msgSize)
frag := bs[:0]
for {
2018-06-07 21:49:51 +00:00
timeout := time.Now().Add(6 * time.Second)
2018-01-04 22:37:51 +00:00
sock.SetReadDeadline(timeout)
n, err := sock.Read(bs[len(frag):])
if err != nil || n == 0 {
2018-05-27 21:13:37 +00:00
// iface.core.log.Println(err)
2018-01-04 22:37:51 +00:00
break
}
frag = bs[:len(frag)+n]
for {
msg, ok, err := tcp_chop_msg(&frag)
if err != nil {
2018-05-27 21:13:37 +00:00
// iface.core.log.Println(err)
2018-01-04 22:37:51 +00:00
return
}
if !ok {
break
} // We didn't get the whole message yet
newMsg := append(util_getBytes(), msg...)
in(newMsg)
util_yield()
}
frag = append(bs[:0], frag...)
}
2017-12-29 04:16:20 +00:00
}
////////////////////////////////////////////////////////////////////////////////
// These are 4 bytes of padding used to catch if something went horribly wrong with the tcp connection.
2017-12-29 04:16:20 +00:00
var tcp_msg = [...]byte{0xde, 0xad, 0xb1, 0x75} // "dead bits"
// This takes a pointer to a slice as an argument.
// It checks if there's a complete message and, if so, slices out those parts and returns the message, true, and nil.
// If there's no error, but also no complete message, it returns nil, false, and nil.
// If there's an error, it returns nil, false, and the error, which the reader then handles (currently, by returning from the reader, which causes the connection to close).
2017-12-29 04:16:20 +00:00
func tcp_chop_msg(bs *[]byte) ([]byte, bool, error) {
2018-01-04 22:37:51 +00:00
// Returns msg, ok, err
if len(*bs) < len(tcp_msg) {
return nil, false, nil
}
for idx := range tcp_msg {
if (*bs)[idx] != tcp_msg[idx] {
return nil, false, errors.New("Bad message!")
}
}
msgLen, msgLenLen := wire_decode_uint64((*bs)[len(tcp_msg):])
if msgLen > tcp_msgSize {
return nil, false, errors.New("Oversized message!")
}
msgBegin := len(tcp_msg) + msgLenLen
msgEnd := msgBegin + int(msgLen)
if msgLenLen == 0 || len(*bs) < msgEnd {
// We don't have the full message
// Need to buffer this and wait for the rest to come in
return nil, false, nil
}
msg := (*bs)[msgBegin:msgEnd]
(*bs) = (*bs)[msgEnd:]
return msg, true, nil
2017-12-29 04:16:20 +00:00
}