mirror of
https://github.com/cwinfo/yggdrasil-go.git
synced 2024-11-22 17:40:26 +00:00
get rid of session workers, new util.PutBytes/GetBytes logic
This commit is contained in:
parent
39245f8134
commit
e0a3055c2f
5
go.sum
5
go.sum
@ -18,25 +18,30 @@ github.com/yggdrasil-network/water v0.0.0-20190719211521-a76871ea954b/go.mod h1:
|
||||
github.com/yggdrasil-network/water v0.0.0-20190719213007-b160316e362e/go.mod h1:R0SBCsugm+Sf1katgTb2t7GXMm+nRIv43tM4VDZbaOs=
|
||||
github.com/yggdrasil-network/water v0.0.0-20190720101301-5db94379a5eb/go.mod h1:R0SBCsugm+Sf1katgTb2t7GXMm+nRIv43tM4VDZbaOs=
|
||||
github.com/yggdrasil-network/water v0.0.0-20190720145626-28ccb9101d55/go.mod h1:R0SBCsugm+Sf1katgTb2t7GXMm+nRIv43tM4VDZbaOs=
|
||||
github.com/yggdrasil-network/water v0.0.0-20190725073841-250edb919f8a h1:mQ0mPD+dyB/vaDPyVkCBiXUQu9Or7/cRSTjPlV8tXvw=
|
||||
github.com/yggdrasil-network/water v0.0.0-20190725073841-250edb919f8a/go.mod h1:R0SBCsugm+Sf1katgTb2t7GXMm+nRIv43tM4VDZbaOs=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 h1:mKdxBk7AujPs8kU4m80U72y/zjbZ3UcXC7dClwKbUI0=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/net v0.0.0-20181207154023-610586996380 h1:zPQexyRtNYBc7bcHmehl1dH6TB3qn8zytv8cBGLDNY0=
|
||||
golang.org/x/net v0.0.0-20181207154023-610586996380/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20181206074257-70b957f3b65e h1:njOxP/wVblhCLIUhjHXf6X+dzTt5OQ3vMQo9mkOIKIo=
|
||||
golang.org/x/sys v0.0.0-20181206074257-70b957f3b65e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7 h1:LepdCS8Gf/MVejFIt8lsiexZATdoGVyp5bcyS+rYoUI=
|
||||
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||
|
@ -13,33 +13,25 @@ type Cancellation interface {
|
||||
Error() error
|
||||
}
|
||||
|
||||
var CancellationFinalized = errors.New("finalizer called")
|
||||
var CancellationTimeoutError = errors.New("timeout")
|
||||
|
||||
func CancellationFinalizer(c Cancellation) {
|
||||
c.Cancel(errors.New("finalizer called"))
|
||||
c.Cancel(CancellationFinalized)
|
||||
}
|
||||
|
||||
type cancellation struct {
|
||||
signal chan error
|
||||
cancel chan struct{}
|
||||
errMtx sync.RWMutex
|
||||
mutex sync.RWMutex
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *cancellation) worker() {
|
||||
// Launch this in a separate goroutine when creating a cancellation
|
||||
err := <-c.signal
|
||||
c.errMtx.Lock()
|
||||
c.err = err
|
||||
c.errMtx.Unlock()
|
||||
close(c.cancel)
|
||||
done bool
|
||||
}
|
||||
|
||||
func NewCancellation() Cancellation {
|
||||
c := cancellation{
|
||||
signal: make(chan error),
|
||||
cancel: make(chan struct{}),
|
||||
}
|
||||
runtime.SetFinalizer(&c, CancellationFinalizer)
|
||||
go c.worker()
|
||||
return &c
|
||||
}
|
||||
|
||||
@ -48,18 +40,22 @@ func (c *cancellation) Finished() <-chan struct{} {
|
||||
}
|
||||
|
||||
func (c *cancellation) Cancel(err error) error {
|
||||
select {
|
||||
case c.signal <- err:
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
if c.done {
|
||||
return c.err
|
||||
} else {
|
||||
c.err = err
|
||||
c.done = true
|
||||
close(c.cancel)
|
||||
return nil
|
||||
case <-c.cancel:
|
||||
return c.Error()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cancellation) Error() error {
|
||||
c.errMtx.RLock()
|
||||
c.mutex.RLock()
|
||||
err := c.err
|
||||
c.errMtx.RUnlock()
|
||||
c.mutex.RUnlock()
|
||||
return err
|
||||
}
|
||||
|
||||
@ -75,8 +71,6 @@ func CancellationChild(parent Cancellation) Cancellation {
|
||||
return child
|
||||
}
|
||||
|
||||
var CancellationTimeoutError = errors.New("timeout")
|
||||
|
||||
func CancellationWithTimeout(parent Cancellation, timeout time.Duration) Cancellation {
|
||||
child := CancellationChild(parent)
|
||||
go func() {
|
||||
|
@ -3,6 +3,7 @@ package util
|
||||
// These are misc. utility functions that didn't really fit anywhere else
|
||||
|
||||
import "runtime"
|
||||
import "sync"
|
||||
import "time"
|
||||
|
||||
// A wrapper around runtime.Gosched() so it doesn't need to be imported elsewhere.
|
||||
@ -21,29 +22,27 @@ func UnlockThread() {
|
||||
}
|
||||
|
||||
// This is used to buffer recently used slices of bytes, to prevent allocations in the hot loops.
|
||||
// It's used like a sync.Pool, but with a fixed size and typechecked without type casts to/from interface{} (which were making the profiles look ugly).
|
||||
var byteStore chan []byte
|
||||
var byteStoreMutex sync.Mutex
|
||||
var byteStore [][]byte
|
||||
|
||||
func init() {
|
||||
byteStore = make(chan []byte, 32)
|
||||
}
|
||||
|
||||
// Gets an empty slice from the byte store, if one is available, or else returns a new nil slice.
|
||||
// Gets an empty slice from the byte store.
|
||||
func GetBytes() []byte {
|
||||
select {
|
||||
case bs := <-byteStore:
|
||||
return bs[:0]
|
||||
default:
|
||||
byteStoreMutex.Lock()
|
||||
defer byteStoreMutex.Unlock()
|
||||
if len(byteStore) > 0 {
|
||||
var bs []byte
|
||||
bs, byteStore = byteStore[len(byteStore)-1][:0], byteStore[:len(byteStore)-1]
|
||||
return bs
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Puts a slice in the store, if there's room, or else returns and lets the slice get collected.
|
||||
// Puts a slice in the store.
|
||||
func PutBytes(bs []byte) {
|
||||
select {
|
||||
case byteStore <- bs:
|
||||
default:
|
||||
}
|
||||
byteStoreMutex.Lock()
|
||||
defer byteStoreMutex.Unlock()
|
||||
byteStore = append(byteStore, bs)
|
||||
}
|
||||
|
||||
// This is a workaround to go's broken timer implementation
|
||||
|
@ -230,7 +230,7 @@ func (c *Core) GetSessions() []Session {
|
||||
skip = true
|
||||
}
|
||||
}()
|
||||
sinfo.doWorker(workerFunc)
|
||||
sinfo.doFunc(workerFunc)
|
||||
}()
|
||||
if skip {
|
||||
continue
|
||||
|
@ -145,9 +145,9 @@ func (c *Conn) Read(b []byte) (int, error) {
|
||||
}
|
||||
defer util.PutBytes(p.Payload)
|
||||
var err error
|
||||
done := make(chan struct{})
|
||||
//done := make(chan struct{})
|
||||
workerFunc := func() {
|
||||
defer close(done)
|
||||
//defer close(done)
|
||||
// If the nonce is bad then drop the packet and return an error
|
||||
if !sinfo.nonceIsOK(&p.Nonce) {
|
||||
err = ConnError{errors.New("packet dropped due to invalid nonce"), false, true, false, 0}
|
||||
@ -167,33 +167,36 @@ func (c *Conn) Read(b []byte) (int, error) {
|
||||
sinfo.time = time.Now()
|
||||
sinfo.bytesRecvd += uint64(len(bs))
|
||||
}
|
||||
// Hand over to the session worker
|
||||
defer func() {
|
||||
if recover() != nil {
|
||||
err = ConnError{errors.New("read failed, session already closed"), false, false, true, 0}
|
||||
close(done)
|
||||
sinfo.doFunc(workerFunc)
|
||||
/*
|
||||
// Hand over to the session worker
|
||||
defer func() {
|
||||
if recover() != nil {
|
||||
err = ConnError{errors.New("read failed, session already closed"), false, false, true, 0}
|
||||
close(done)
|
||||
}
|
||||
}() // In case we're racing with a close
|
||||
// Send to worker
|
||||
select {
|
||||
case sinfo.worker <- workerFunc:
|
||||
case <-cancel.Finished():
|
||||
if cancel.Error() == util.CancellationTimeoutError {
|
||||
return 0, ConnError{errors.New("read timeout"), true, false, false, 0}
|
||||
} else {
|
||||
return 0, ConnError{errors.New("session closed"), false, false, true, 0}
|
||||
}
|
||||
}
|
||||
}() // In case we're racing with a close
|
||||
// Send to worker
|
||||
select {
|
||||
case sinfo.worker <- workerFunc:
|
||||
case <-cancel.Finished():
|
||||
if cancel.Error() == util.CancellationTimeoutError {
|
||||
return 0, ConnError{errors.New("read timeout"), true, false, false, 0}
|
||||
} else {
|
||||
return 0, ConnError{errors.New("session closed"), false, false, true, 0}
|
||||
// Wait for the worker to finish
|
||||
select {
|
||||
case <-done: // Wait for the worker to finish, failing this can cause memory errors (util.[Get||Put]Bytes stuff)
|
||||
case <-cancel.Finished():
|
||||
if cancel.Error() == util.CancellationTimeoutError {
|
||||
return 0, ConnError{errors.New("read timeout"), true, false, false, 0}
|
||||
} else {
|
||||
return 0, ConnError{errors.New("session closed"), false, false, true, 0}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Wait for the worker to finish
|
||||
select {
|
||||
case <-done: // Wait for the worker to finish, failing this can cause memory errors (util.[Get||Put]Bytes stuff)
|
||||
case <-cancel.Finished():
|
||||
if cancel.Error() == util.CancellationTimeoutError {
|
||||
return 0, ConnError{errors.New("read timeout"), true, false, false, 0}
|
||||
} else {
|
||||
return 0, ConnError{errors.New("session closed"), false, false, true, 0}
|
||||
}
|
||||
}
|
||||
*/
|
||||
// Something went wrong in the session worker so abort
|
||||
if err != nil {
|
||||
if ce, ok := err.(*ConnError); ok && ce.Temporary() {
|
||||
@ -214,10 +217,10 @@ func (c *Conn) Read(b []byte) (int, error) {
|
||||
func (c *Conn) Write(b []byte) (bytesWritten int, err error) {
|
||||
sinfo := c.session
|
||||
var packet []byte
|
||||
done := make(chan struct{})
|
||||
//done := make(chan struct{})
|
||||
written := len(b)
|
||||
workerFunc := func() {
|
||||
defer close(done)
|
||||
//defer close(done)
|
||||
// Does the packet exceed the permitted size for the session?
|
||||
if uint16(len(b)) > sinfo.getMTU() {
|
||||
written, err = 0, ConnError{errors.New("packet too big"), true, false, false, int(sinfo.getMTU())}
|
||||
@ -264,27 +267,30 @@ func (c *Conn) Write(b []byte) (bytesWritten int, err error) {
|
||||
default: // Don't do anything, to keep traffic throttled
|
||||
}
|
||||
}
|
||||
// Set up a timer so this doesn't block forever
|
||||
cancel := c.getDeadlineCancellation(&c.writeDeadline)
|
||||
defer cancel.Cancel(nil)
|
||||
// Hand over to the session worker
|
||||
defer func() {
|
||||
if recover() != nil {
|
||||
err = ConnError{errors.New("write failed, session already closed"), false, false, true, 0}
|
||||
close(done)
|
||||
sinfo.doFunc(workerFunc)
|
||||
/*
|
||||
// Set up a timer so this doesn't block forever
|
||||
cancel := c.getDeadlineCancellation(&c.writeDeadline)
|
||||
defer cancel.Cancel(nil)
|
||||
// Hand over to the session worker
|
||||
defer func() {
|
||||
if recover() != nil {
|
||||
err = ConnError{errors.New("write failed, session already closed"), false, false, true, 0}
|
||||
close(done)
|
||||
}
|
||||
}() // In case we're racing with a close
|
||||
select { // Send to worker
|
||||
case sinfo.worker <- workerFunc:
|
||||
case <-cancel.Finished():
|
||||
if cancel.Error() == util.CancellationTimeoutError {
|
||||
return 0, ConnError{errors.New("write timeout"), true, false, false, 0}
|
||||
} else {
|
||||
return 0, ConnError{errors.New("session closed"), false, false, true, 0}
|
||||
}
|
||||
}
|
||||
}() // In case we're racing with a close
|
||||
select { // Send to worker
|
||||
case sinfo.worker <- workerFunc:
|
||||
case <-cancel.Finished():
|
||||
if cancel.Error() == util.CancellationTimeoutError {
|
||||
return 0, ConnError{errors.New("write timeout"), true, false, false, 0}
|
||||
} else {
|
||||
return 0, ConnError{errors.New("session closed"), false, false, true, 0}
|
||||
}
|
||||
}
|
||||
// Wait for the worker to finish, otherwise there are memory errors ([Get||Put]Bytes stuff)
|
||||
<-done
|
||||
// Wait for the worker to finish, otherwise there are memory errors ([Get||Put]Bytes stuff)
|
||||
<-done
|
||||
*/
|
||||
// Give the packet to the router
|
||||
if written > 0 {
|
||||
sinfo.core.router.out(packet)
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
// All the information we know about an active session.
|
||||
// This includes coords, permanent and ephemeral keys, handles and nonces, various sorts of timing information for timeout and maintenance, and some metadata for the admin API.
|
||||
type sessionInfo struct {
|
||||
mutex sync.Mutex // Protects all of the below, use it any time you read/chance the contents of a session
|
||||
core *Core //
|
||||
reconfigure chan chan error //
|
||||
theirAddr address.Address //
|
||||
@ -43,24 +44,14 @@ type sessionInfo struct {
|
||||
tstamp int64 // ATOMIC - tstamp from their last session ping, replay attack mitigation
|
||||
bytesSent uint64 // Bytes of real traffic sent in this session
|
||||
bytesRecvd uint64 // Bytes of real traffic received in this session
|
||||
worker chan func() // Channel to send work to the session worker
|
||||
recv chan *wire_trafficPacket // Received packets go here, picked up by the associated Conn
|
||||
init chan struct{} // Closed when the first session pong arrives, used to signal that the session is ready for initial use
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) doWorker(f func()) {
|
||||
done := make(chan struct{})
|
||||
sinfo.worker <- func() {
|
||||
f()
|
||||
close(done)
|
||||
}
|
||||
<-done
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) workerMain() {
|
||||
for f := range sinfo.worker {
|
||||
f()
|
||||
}
|
||||
func (sinfo *sessionInfo) doFunc(f func()) {
|
||||
sinfo.mutex.Lock()
|
||||
defer sinfo.mutex.Unlock()
|
||||
f()
|
||||
}
|
||||
|
||||
// Represents a session ping/pong packet, andincludes information like public keys, a session handle, coords, a timestamp to prevent replays, and the tun/tap MTU.
|
||||
@ -231,11 +222,9 @@ func (ss *sessions) createSession(theirPermKey *crypto.BoxPubKey) *sessionInfo {
|
||||
sinfo.myHandle = *crypto.NewHandle()
|
||||
sinfo.theirAddr = *address.AddrForNodeID(crypto.GetNodeID(&sinfo.theirPermPub))
|
||||
sinfo.theirSubnet = *address.SubnetForNodeID(crypto.GetNodeID(&sinfo.theirPermPub))
|
||||
sinfo.worker = make(chan func(), 1)
|
||||
sinfo.recv = make(chan *wire_trafficPacket, 32)
|
||||
ss.sinfos[sinfo.myHandle] = &sinfo
|
||||
ss.byTheirPerm[sinfo.theirPermPub] = &sinfo.myHandle
|
||||
go sinfo.workerMain()
|
||||
return &sinfo
|
||||
}
|
||||
|
||||
@ -267,14 +256,12 @@ func (ss *sessions) cleanup() {
|
||||
ss.lastCleanup = time.Now()
|
||||
}
|
||||
|
||||
// Closes a session, removing it from sessions maps and killing the worker goroutine.
|
||||
// Closes a session, removing it from sessions maps.
|
||||
func (sinfo *sessionInfo) close() {
|
||||
if s := sinfo.core.sessions.sinfos[sinfo.myHandle]; s == sinfo {
|
||||
delete(sinfo.core.sessions.sinfos, sinfo.myHandle)
|
||||
delete(sinfo.core.sessions.byTheirPerm, sinfo.theirPermPub)
|
||||
}
|
||||
defer func() { recover() }()
|
||||
close(sinfo.worker)
|
||||
}
|
||||
|
||||
// Returns a session ping appropriate for the given session info.
|
||||
@ -372,7 +359,7 @@ func (ss *sessions) handlePing(ping *sessionPing) {
|
||||
}
|
||||
ss.listenerMutex.Unlock()
|
||||
}
|
||||
sinfo.doWorker(func() {
|
||||
sinfo.doFunc(func() {
|
||||
// Update the session
|
||||
if !sinfo.update(ping) { /*panic("Should not happen in testing")*/
|
||||
return
|
||||
@ -426,7 +413,7 @@ func (sinfo *sessionInfo) updateNonce(theirNonce *crypto.BoxNonce) {
|
||||
// Called after coord changes, so attemtps to use a session will trigger a new ping and notify the remote end of the coord change.
|
||||
func (ss *sessions) reset() {
|
||||
for _, sinfo := range ss.sinfos {
|
||||
sinfo.doWorker(func() {
|
||||
sinfo.doFunc(func() {
|
||||
sinfo.reset = true
|
||||
})
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user