mirror of
https://github.com/cwinfo/yggdrasil-go.git
synced 2024-11-10 08:40:28 +00:00
refactor things the router owns (dht, sessions, searches) into that struct, to make the ownership more explicit
This commit is contained in:
parent
bbcbbaf3b1
commit
9835c63818
@ -156,11 +156,11 @@ func (c *Core) GetDHT() []DHTEntry {
|
|||||||
getDHT := func() {
|
getDHT := func() {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
var dhtentry []*dhtInfo
|
var dhtentry []*dhtInfo
|
||||||
for _, v := range c.dht.table {
|
for _, v := range c.router.dht.table {
|
||||||
dhtentry = append(dhtentry, v)
|
dhtentry = append(dhtentry, v)
|
||||||
}
|
}
|
||||||
sort.SliceStable(dhtentry, func(i, j int) bool {
|
sort.SliceStable(dhtentry, func(i, j int) bool {
|
||||||
return dht_ordered(&c.dht.nodeID, dhtentry[i].getNodeID(), dhtentry[j].getNodeID())
|
return dht_ordered(&c.router.dht.nodeID, dhtentry[i].getNodeID(), dhtentry[j].getNodeID())
|
||||||
})
|
})
|
||||||
for _, v := range dhtentry {
|
for _, v := range dhtentry {
|
||||||
info := DHTEntry{
|
info := DHTEntry{
|
||||||
@ -208,7 +208,7 @@ func (c *Core) GetSwitchQueues() SwitchQueues {
|
|||||||
func (c *Core) GetSessions() []Session {
|
func (c *Core) GetSessions() []Session {
|
||||||
var sessions []Session
|
var sessions []Session
|
||||||
getSessions := func() {
|
getSessions := func() {
|
||||||
for _, sinfo := range c.sessions.sinfos {
|
for _, sinfo := range c.router.sessions.sinfos {
|
||||||
var session Session
|
var session Session
|
||||||
workerFunc := func() {
|
workerFunc := func() {
|
||||||
session = Session{
|
session = Session{
|
||||||
@ -243,17 +243,17 @@ func (c *Core) GetSessions() []Session {
|
|||||||
|
|
||||||
// ConnListen returns a listener for Yggdrasil session connections.
|
// ConnListen returns a listener for Yggdrasil session connections.
|
||||||
func (c *Core) ConnListen() (*Listener, error) {
|
func (c *Core) ConnListen() (*Listener, error) {
|
||||||
c.sessions.listenerMutex.Lock()
|
c.router.sessions.listenerMutex.Lock()
|
||||||
defer c.sessions.listenerMutex.Unlock()
|
defer c.router.sessions.listenerMutex.Unlock()
|
||||||
if c.sessions.listener != nil {
|
if c.router.sessions.listener != nil {
|
||||||
return nil, errors.New("a listener already exists")
|
return nil, errors.New("a listener already exists")
|
||||||
}
|
}
|
||||||
c.sessions.listener = &Listener{
|
c.router.sessions.listener = &Listener{
|
||||||
core: c,
|
core: c,
|
||||||
conn: make(chan *Conn),
|
conn: make(chan *Conn),
|
||||||
close: make(chan interface{}),
|
close: make(chan interface{}),
|
||||||
}
|
}
|
||||||
return c.sessions.listener, nil
|
return c.router.sessions.listener, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConnDialer returns a dialer for Yggdrasil session connections.
|
// ConnDialer returns a dialer for Yggdrasil session connections.
|
||||||
@ -356,10 +356,10 @@ func (c *Core) GetNodeInfo(key crypto.BoxPubKey, coords []uint64, nocache bool)
|
|||||||
// received an incoming session request. The function should return true to
|
// received an incoming session request. The function should return true to
|
||||||
// allow the session or false to reject it.
|
// allow the session or false to reject it.
|
||||||
func (c *Core) SetSessionGatekeeper(f func(pubkey *crypto.BoxPubKey, initiator bool) bool) {
|
func (c *Core) SetSessionGatekeeper(f func(pubkey *crypto.BoxPubKey, initiator bool) bool) {
|
||||||
c.sessions.isAllowedMutex.Lock()
|
c.router.sessions.isAllowedMutex.Lock()
|
||||||
defer c.sessions.isAllowedMutex.Unlock()
|
defer c.router.sessions.isAllowedMutex.Unlock()
|
||||||
|
|
||||||
c.sessions.isAllowedHandler = f
|
c.router.sessions.isAllowedHandler = f
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetLogger sets the output logger of the Yggdrasil node after startup. This
|
// SetLogger sets the output logger of the Yggdrasil node after startup. This
|
||||||
@ -445,10 +445,10 @@ func (c *Core) DHTPing(key crypto.BoxPubKey, coords []uint64, target *crypto.Nod
|
|||||||
}
|
}
|
||||||
rq := dhtReqKey{info.key, *target}
|
rq := dhtReqKey{info.key, *target}
|
||||||
sendPing := func() {
|
sendPing := func() {
|
||||||
c.dht.addCallback(&rq, func(res *dhtRes) {
|
c.router.dht.addCallback(&rq, func(res *dhtRes) {
|
||||||
resCh <- res
|
resCh <- res
|
||||||
})
|
})
|
||||||
c.dht.ping(&info, &rq.dest)
|
c.router.dht.ping(&info, &rq.dest)
|
||||||
}
|
}
|
||||||
c.router.doAdmin(sendPing)
|
c.router.doAdmin(sendPing)
|
||||||
// TODO: do something better than the below...
|
// TODO: do something better than the below...
|
||||||
|
@ -84,7 +84,7 @@ func (c *Conn) String() string {
|
|||||||
func (c *Conn) search() error {
|
func (c *Conn) search() error {
|
||||||
var sinfo *searchInfo
|
var sinfo *searchInfo
|
||||||
var isIn bool
|
var isIn bool
|
||||||
c.core.router.doAdmin(func() { sinfo, isIn = c.core.searches.searches[*c.nodeID] })
|
c.core.router.doAdmin(func() { sinfo, isIn = c.core.router.searches.searches[*c.nodeID] })
|
||||||
if !isIn {
|
if !isIn {
|
||||||
done := make(chan struct{}, 1)
|
done := make(chan struct{}, 1)
|
||||||
var sess *sessionInfo
|
var sess *sessionInfo
|
||||||
@ -99,7 +99,7 @@ func (c *Conn) search() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
c.core.router.doAdmin(func() {
|
c.core.router.doAdmin(func() {
|
||||||
sinfo = c.core.searches.newIterSearch(c.nodeID, c.nodeMask, searchCompleted)
|
sinfo = c.core.router.searches.newIterSearch(c.nodeID, c.nodeMask, searchCompleted)
|
||||||
sinfo.continueSearch()
|
sinfo.continueSearch()
|
||||||
})
|
})
|
||||||
<-done
|
<-done
|
||||||
@ -124,11 +124,11 @@ func (c *Conn) search() error {
|
|||||||
func (c *Conn) doSearch() {
|
func (c *Conn) doSearch() {
|
||||||
routerWork := func() {
|
routerWork := func() {
|
||||||
// Check to see if there is a search already matching the destination
|
// Check to see if there is a search already matching the destination
|
||||||
sinfo, isIn := c.core.searches.searches[*c.nodeID]
|
sinfo, isIn := c.core.router.searches.searches[*c.nodeID]
|
||||||
if !isIn {
|
if !isIn {
|
||||||
// Nothing was found, so create a new search
|
// Nothing was found, so create a new search
|
||||||
searchCompleted := func(sinfo *sessionInfo, e error) {}
|
searchCompleted := func(sinfo *sessionInfo, e error) {}
|
||||||
sinfo = c.core.searches.newIterSearch(c.nodeID, c.nodeMask, searchCompleted)
|
sinfo = c.core.router.searches.newIterSearch(c.nodeID, c.nodeMask, searchCompleted)
|
||||||
c.core.log.Debugf("%s DHT search started: %p", c.String(), sinfo)
|
c.core.log.Debugf("%s DHT search started: %p", c.String(), sinfo)
|
||||||
// Start the search
|
// Start the search
|
||||||
sinfo.continueSearch()
|
sinfo.continueSearch()
|
||||||
|
@ -26,10 +26,7 @@ type Core struct {
|
|||||||
sigPriv crypto.SigPrivKey
|
sigPriv crypto.SigPrivKey
|
||||||
switchTable switchTable
|
switchTable switchTable
|
||||||
peers peers
|
peers peers
|
||||||
sessions sessions
|
|
||||||
router router
|
router router
|
||||||
dht dht
|
|
||||||
searches searches
|
|
||||||
link link
|
link link
|
||||||
log *log.Logger
|
log *log.Logger
|
||||||
}
|
}
|
||||||
@ -76,9 +73,9 @@ func (c *Core) init() error {
|
|||||||
c.log.Warnln("SigningPublicKey in config is incorrect, should be", sp)
|
c.log.Warnln("SigningPublicKey in config is incorrect, should be", sp)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.searches.init(c)
|
c.router.searches.init(c)
|
||||||
c.dht.init(c)
|
c.router.dht.init(c)
|
||||||
c.sessions.init(c)
|
c.router.sessions.init(c)
|
||||||
c.peers.init(c)
|
c.peers.init(c)
|
||||||
c.router.init(c)
|
c.router.init(c)
|
||||||
c.switchTable.init(c) // TODO move before peers? before router?
|
c.switchTable.init(c) // TODO move before peers? before router?
|
||||||
@ -124,9 +121,9 @@ func (c *Core) UpdateConfig(config *config.NodeConfig) {
|
|||||||
errors := 0
|
errors := 0
|
||||||
|
|
||||||
components := []chan chan error{
|
components := []chan chan error{
|
||||||
c.searches.reconfigure,
|
c.router.searches.reconfigure,
|
||||||
c.dht.reconfigure,
|
c.router.dht.reconfigure,
|
||||||
c.sessions.reconfigure,
|
c.router.sessions.reconfigure,
|
||||||
c.peers.reconfigure,
|
c.peers.reconfigure,
|
||||||
c.router.reconfigure,
|
c.router.reconfigure,
|
||||||
c.switchTable.reconfigure,
|
c.switchTable.reconfigure,
|
||||||
|
@ -221,7 +221,7 @@ func (t *dht) handleReq(req *dhtReq) {
|
|||||||
func (t *dht) sendRes(res *dhtRes, req *dhtReq) {
|
func (t *dht) sendRes(res *dhtRes, req *dhtReq) {
|
||||||
// Send a reply for a dhtReq
|
// Send a reply for a dhtReq
|
||||||
bs := res.encode()
|
bs := res.encode()
|
||||||
shared := t.core.sessions.getSharedKey(&t.core.boxPriv, &req.Key)
|
shared := t.core.router.sessions.getSharedKey(&t.core.boxPriv, &req.Key)
|
||||||
payload, nonce := crypto.BoxSeal(shared, bs, nil)
|
payload, nonce := crypto.BoxSeal(shared, bs, nil)
|
||||||
p := wire_protoTrafficPacket{
|
p := wire_protoTrafficPacket{
|
||||||
Coords: req.Coords,
|
Coords: req.Coords,
|
||||||
@ -285,7 +285,7 @@ func (t *dht) handleRes(res *dhtRes) {
|
|||||||
func (t *dht) sendReq(req *dhtReq, dest *dhtInfo) {
|
func (t *dht) sendReq(req *dhtReq, dest *dhtInfo) {
|
||||||
// Send a dhtReq to the node in dhtInfo
|
// Send a dhtReq to the node in dhtInfo
|
||||||
bs := req.encode()
|
bs := req.encode()
|
||||||
shared := t.core.sessions.getSharedKey(&t.core.boxPriv, &dest.key)
|
shared := t.core.router.sessions.getSharedKey(&t.core.boxPriv, &dest.key)
|
||||||
payload, nonce := crypto.BoxSeal(shared, bs, nil)
|
payload, nonce := crypto.BoxSeal(shared, bs, nil)
|
||||||
p := wire_protoTrafficPacket{
|
p := wire_protoTrafficPacket{
|
||||||
Coords: dest.coords,
|
Coords: dest.coords,
|
||||||
|
@ -31,8 +31,8 @@ func (l *Listener) Close() (err error) {
|
|||||||
recover()
|
recover()
|
||||||
err = errors.New("already closed")
|
err = errors.New("already closed")
|
||||||
}()
|
}()
|
||||||
if l.core.sessions.listener == l {
|
if l.core.router.sessions.listener == l {
|
||||||
l.core.sessions.listener = nil
|
l.core.router.sessions.listener = nil
|
||||||
}
|
}
|
||||||
close(l.close)
|
close(l.close)
|
||||||
close(l.conn)
|
close(l.conn)
|
||||||
|
@ -172,7 +172,7 @@ func (m *nodeinfo) sendNodeInfo(key crypto.BoxPubKey, coords []byte, isResponse
|
|||||||
NodeInfo: m.getNodeInfo(),
|
NodeInfo: m.getNodeInfo(),
|
||||||
}
|
}
|
||||||
bs := nodeinfo.encode()
|
bs := nodeinfo.encode()
|
||||||
shared := m.core.sessions.getSharedKey(&m.core.boxPriv, &key)
|
shared := m.core.router.sessions.getSharedKey(&m.core.boxPriv, &key)
|
||||||
payload, nonce := crypto.BoxSeal(shared, bs, nil)
|
payload, nonce := crypto.BoxSeal(shared, bs, nil)
|
||||||
p := wire_protoTrafficPacket{
|
p := wire_protoTrafficPacket{
|
||||||
Coords: coords,
|
Coords: coords,
|
||||||
|
@ -43,15 +43,18 @@ type router struct {
|
|||||||
addr address.Address
|
addr address.Address
|
||||||
subnet address.Subnet
|
subnet address.Subnet
|
||||||
out func([]byte) // packets we're sending to the network, link to peer's "in"
|
out func([]byte) // packets we're sending to the network, link to peer's "in"
|
||||||
|
dht dht
|
||||||
nodeinfo nodeinfo
|
nodeinfo nodeinfo
|
||||||
|
searches searches
|
||||||
|
sessions sessions
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initializes the router struct, which includes setting up channels to/from the adapter.
|
// Initializes the router struct, which includes setting up channels to/from the adapter.
|
||||||
func (r *router) init(core *Core) {
|
func (r *router) init(core *Core) {
|
||||||
r.core = core
|
r.core = core
|
||||||
r.reconfigure = make(chan chan error, 1)
|
r.reconfigure = make(chan chan error, 1)
|
||||||
r.addr = *address.AddrForNodeID(&r.core.dht.nodeID)
|
r.addr = *address.AddrForNodeID(&r.dht.nodeID)
|
||||||
r.subnet = *address.SubnetForNodeID(&r.core.dht.nodeID)
|
r.subnet = *address.SubnetForNodeID(&r.dht.nodeID)
|
||||||
self := linkInterface{
|
self := linkInterface{
|
||||||
name: "(self)",
|
name: "(self)",
|
||||||
info: linkInfo{
|
info: linkInfo{
|
||||||
@ -91,15 +94,15 @@ func (r *router) handlePackets(from phony.IActor, packets [][]byte) {
|
|||||||
// Insert a peer info into the dht, TODO? make the dht a separate actor
|
// Insert a peer info into the dht, TODO? make the dht a separate actor
|
||||||
func (r *router) insertPeer(from phony.IActor, info *dhtInfo) {
|
func (r *router) insertPeer(from phony.IActor, info *dhtInfo) {
|
||||||
r.EnqueueFrom(from, func() {
|
r.EnqueueFrom(from, func() {
|
||||||
r.core.dht.insertPeer(info)
|
r.dht.insertPeer(info)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset sessions and DHT after the switch sees our coords change
|
// Reset sessions and DHT after the switch sees our coords change
|
||||||
func (r *router) reset(from phony.IActor) {
|
func (r *router) reset(from phony.IActor) {
|
||||||
r.EnqueueFrom(from, func() {
|
r.EnqueueFrom(from, func() {
|
||||||
r.core.sessions.reset(r)
|
r.sessions.reset()
|
||||||
r.core.dht.reset()
|
r.dht.reset()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,8 +117,8 @@ func (r *router) _mainLoop() {
|
|||||||
<-r.SyncExec(func() {
|
<-r.SyncExec(func() {
|
||||||
// Any periodic maintenance stuff goes here
|
// Any periodic maintenance stuff goes here
|
||||||
r.core.switchTable.doMaintenance()
|
r.core.switchTable.doMaintenance()
|
||||||
r.core.dht.doMaintenance()
|
r.dht.doMaintenance()
|
||||||
r.core.sessions.cleanup()
|
r.sessions.cleanup()
|
||||||
})
|
})
|
||||||
case e := <-r.reconfigure:
|
case e := <-r.reconfigure:
|
||||||
<-r.SyncExec(func() {
|
<-r.SyncExec(func() {
|
||||||
@ -149,7 +152,7 @@ func (r *router) _handleTraffic(packet []byte) {
|
|||||||
if !p.decode(packet) {
|
if !p.decode(packet) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
sinfo, isIn := r.core.sessions.getSessionForHandle(&p.Handle)
|
sinfo, isIn := r.sessions.getSessionForHandle(&p.Handle)
|
||||||
if !isIn {
|
if !isIn {
|
||||||
util.PutBytes(p.Payload)
|
util.PutBytes(p.Payload)
|
||||||
return
|
return
|
||||||
@ -172,7 +175,7 @@ func (r *router) _handleProto(packet []byte) {
|
|||||||
var sharedKey *crypto.BoxSharedKey
|
var sharedKey *crypto.BoxSharedKey
|
||||||
if p.ToKey == r.core.boxPub {
|
if p.ToKey == r.core.boxPub {
|
||||||
// Try to open using our permanent key
|
// Try to open using our permanent key
|
||||||
sharedKey = r.core.sessions.getSharedKey(&r.core.boxPriv, &p.FromKey)
|
sharedKey = r.sessions.getSharedKey(&r.core.boxPriv, &p.FromKey)
|
||||||
} else {
|
} else {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -212,7 +215,7 @@ func (r *router) _handlePing(bs []byte, fromKey *crypto.BoxPubKey) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
ping.SendPermPub = *fromKey
|
ping.SendPermPub = *fromKey
|
||||||
r.core.sessions.handlePing(&ping)
|
r.sessions.handlePing(&ping)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handles session pongs (which are really pings with an extra flag to prevent acknowledgement).
|
// Handles session pongs (which are really pings with an extra flag to prevent acknowledgement).
|
||||||
@ -227,7 +230,7 @@ func (r *router) _handleDHTReq(bs []byte, fromKey *crypto.BoxPubKey) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
req.Key = *fromKey
|
req.Key = *fromKey
|
||||||
r.core.dht.handleReq(&req)
|
r.dht.handleReq(&req)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decodes dht responses and passes them to dht.handleRes to update the DHT table and further pass them to the search code (if applicable).
|
// Decodes dht responses and passes them to dht.handleRes to update the DHT table and further pass them to the search code (if applicable).
|
||||||
@ -237,7 +240,7 @@ func (r *router) _handleDHTRes(bs []byte, fromKey *crypto.BoxPubKey) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
res.Key = *fromKey
|
res.Key = *fromKey
|
||||||
r.core.dht.handleRes(&res)
|
r.dht.handleRes(&res)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decodes nodeinfo request
|
// Decodes nodeinfo request
|
||||||
|
@ -100,7 +100,7 @@ func (sinfo *searchInfo) addToSearch(res *dhtRes) {
|
|||||||
from := dhtInfo{key: res.Key, coords: res.Coords}
|
from := dhtInfo{key: res.Key, coords: res.Coords}
|
||||||
sinfo.visited[*from.getNodeID()] = true
|
sinfo.visited[*from.getNodeID()] = true
|
||||||
for _, info := range res.Infos {
|
for _, info := range res.Infos {
|
||||||
if *info.getNodeID() == sinfo.core.dht.nodeID || sinfo.visited[*info.getNodeID()] {
|
if *info.getNodeID() == sinfo.core.router.dht.nodeID || sinfo.visited[*info.getNodeID()] {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if dht_ordered(&sinfo.dest, info.getNodeID(), from.getNodeID()) {
|
if dht_ordered(&sinfo.dest, info.getNodeID(), from.getNodeID()) {
|
||||||
@ -134,7 +134,7 @@ func (sinfo *searchInfo) doSearchStep() {
|
|||||||
if len(sinfo.toVisit) == 0 {
|
if len(sinfo.toVisit) == 0 {
|
||||||
if time.Since(sinfo.time) > search_RETRY_TIME {
|
if time.Since(sinfo.time) > search_RETRY_TIME {
|
||||||
// Dead end and no response in too long, do cleanup
|
// Dead end and no response in too long, do cleanup
|
||||||
delete(sinfo.core.searches.searches, sinfo.dest)
|
delete(sinfo.core.router.searches.searches, sinfo.dest)
|
||||||
sinfo.callback(nil, errors.New("search reached dead end"))
|
sinfo.callback(nil, errors.New("search reached dead end"))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@ -143,8 +143,8 @@ func (sinfo *searchInfo) doSearchStep() {
|
|||||||
var next *dhtInfo
|
var next *dhtInfo
|
||||||
next, sinfo.toVisit = sinfo.toVisit[0], sinfo.toVisit[1:]
|
next, sinfo.toVisit = sinfo.toVisit[0], sinfo.toVisit[1:]
|
||||||
rq := dhtReqKey{next.key, sinfo.dest}
|
rq := dhtReqKey{next.key, sinfo.dest}
|
||||||
sinfo.core.dht.addCallback(&rq, sinfo.handleDHTRes)
|
sinfo.core.router.dht.addCallback(&rq, sinfo.handleDHTRes)
|
||||||
sinfo.core.dht.ping(next, &sinfo.dest)
|
sinfo.core.router.dht.ping(next, &sinfo.dest)
|
||||||
sinfo.time = time.Now()
|
sinfo.time = time.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,7 +157,7 @@ func (sinfo *searchInfo) continueSearch() {
|
|||||||
// Any that die aren't restarted, but a new one will start later
|
// Any that die aren't restarted, but a new one will start later
|
||||||
retryLater := func() {
|
retryLater := func() {
|
||||||
// FIXME this keeps the search alive forever if not for the searches map, fix that
|
// FIXME this keeps the search alive forever if not for the searches map, fix that
|
||||||
newSearchInfo := sinfo.core.searches.searches[sinfo.dest]
|
newSearchInfo := sinfo.core.router.searches.searches[sinfo.dest]
|
||||||
if newSearchInfo != sinfo {
|
if newSearchInfo != sinfo {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -196,17 +196,17 @@ func (sinfo *searchInfo) checkDHTRes(res *dhtRes) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
// They match, so create a session and send a sessionRequest
|
// They match, so create a session and send a sessionRequest
|
||||||
sess, isIn := sinfo.core.sessions.getByTheirPerm(&res.Key)
|
sess, isIn := sinfo.core.router.sessions.getByTheirPerm(&res.Key)
|
||||||
if !isIn {
|
if !isIn {
|
||||||
sess = sinfo.core.sessions.createSession(&res.Key)
|
sess = sinfo.core.router.sessions.createSession(&res.Key)
|
||||||
if sess == nil {
|
if sess == nil {
|
||||||
// nil if the DHT search finished but the session wasn't allowed
|
// nil if the DHT search finished but the session wasn't allowed
|
||||||
sinfo.callback(nil, errors.New("session not allowed"))
|
sinfo.callback(nil, errors.New("session not allowed"))
|
||||||
// Cleanup
|
// Cleanup
|
||||||
delete(sinfo.core.searches.searches, res.Dest)
|
delete(sinfo.core.router.searches.searches, res.Dest)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
_, isIn := sinfo.core.sessions.getByTheirPerm(&res.Key)
|
_, isIn := sinfo.core.router.sessions.getByTheirPerm(&res.Key)
|
||||||
if !isIn {
|
if !isIn {
|
||||||
panic("This should never happen")
|
panic("This should never happen")
|
||||||
}
|
}
|
||||||
@ -216,6 +216,6 @@ func (sinfo *searchInfo) checkDHTRes(res *dhtRes) bool {
|
|||||||
sess.ping(&sinfo.core.router)
|
sess.ping(&sinfo.core.router)
|
||||||
sinfo.callback(sess, nil)
|
sinfo.callback(sess, nil)
|
||||||
// Cleanup
|
// Cleanup
|
||||||
delete(sinfo.core.searches.searches, res.Dest)
|
delete(sinfo.core.router.searches.searches, res.Dest)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -261,7 +261,7 @@ func (ss *sessions) createSession(theirPermKey *crypto.BoxPubKey) *sessionInfo {
|
|||||||
// Run cleanup when the session is canceled
|
// Run cleanup when the session is canceled
|
||||||
<-sinfo.cancel.Finished()
|
<-sinfo.cancel.Finished()
|
||||||
sinfo.core.router.doAdmin(func() {
|
sinfo.core.router.doAdmin(func() {
|
||||||
sinfo.core.sessions.removeSession(&sinfo)
|
sinfo.core.router.sessions.removeSession(&sinfo)
|
||||||
})
|
})
|
||||||
}()
|
}()
|
||||||
go sinfo.startWorkers()
|
go sinfo.startWorkers()
|
||||||
@ -298,9 +298,9 @@ func (ss *sessions) cleanup() {
|
|||||||
|
|
||||||
// Closes a session, removing it from sessions maps.
|
// Closes a session, removing it from sessions maps.
|
||||||
func (ss *sessions) removeSession(sinfo *sessionInfo) {
|
func (ss *sessions) removeSession(sinfo *sessionInfo) {
|
||||||
if s := sinfo.core.sessions.sinfos[sinfo.myHandle]; s == sinfo {
|
if s := sinfo.core.router.sessions.sinfos[sinfo.myHandle]; s == sinfo {
|
||||||
delete(sinfo.core.sessions.sinfos, sinfo.myHandle)
|
delete(sinfo.core.router.sessions.sinfos, sinfo.myHandle)
|
||||||
delete(sinfo.core.sessions.byTheirPerm, sinfo.theirPermPub)
|
delete(sinfo.core.router.sessions.byTheirPerm, sinfo.theirPermPub)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -466,9 +466,9 @@ func (sinfo *sessionInfo) _updateNonce(theirNonce *crypto.BoxNonce) {
|
|||||||
|
|
||||||
// Resets all sessions to an uninitialized state.
|
// Resets all sessions to an uninitialized state.
|
||||||
// Called after coord changes, so attemtps to use a session will trigger a new ping and notify the remote end of the coord change.
|
// Called after coord changes, so attemtps to use a session will trigger a new ping and notify the remote end of the coord change.
|
||||||
func (ss *sessions) reset(from phony.IActor) {
|
func (ss *sessions) reset() {
|
||||||
for _, sinfo := range ss.sinfos {
|
for _, sinfo := range ss.sinfos {
|
||||||
sinfo.EnqueueFrom(from, func() {
|
sinfo.EnqueueFrom(&ss.core.router, func() {
|
||||||
sinfo.reset = true
|
sinfo.reset = true
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user