2019-11-19 10:00:20 -07:00
|
|
|
package nebula
|
|
|
|
|
|
|
|
import (
|
2023-05-04 14:16:37 -06:00
|
|
|
"bytes"
|
2021-11-02 12:14:26 -06:00
|
|
|
"context"
|
2024-07-31 09:18:56 -06:00
|
|
|
"encoding/binary"
|
|
|
|
"net/netip"
|
2019-11-19 10:00:20 -07:00
|
|
|
"sync"
|
|
|
|
"time"
|
2019-12-12 09:34:17 -07:00
|
|
|
|
2023-03-31 14:45:05 -06:00
|
|
|
"github.com/rcrowley/go-metrics"
|
2019-12-12 09:34:17 -07:00
|
|
|
"github.com/sirupsen/logrus"
|
2023-05-04 15:09:42 -06:00
|
|
|
"github.com/slackhq/nebula/cert"
|
2021-11-03 19:54:04 -06:00
|
|
|
"github.com/slackhq/nebula/header"
|
2019-11-19 10:00:20 -07:00
|
|
|
)
|
|
|
|
|
2023-05-04 14:16:37 -06:00
|
|
|
type trafficDecision int
|
|
|
|
|
|
|
|
const (
|
2023-05-08 13:43:03 -06:00
|
|
|
doNothing trafficDecision = 0
|
|
|
|
deleteTunnel trafficDecision = 1 // delete the hostinfo on our side, do not notify the remote
|
|
|
|
closeTunnel trafficDecision = 2 // delete the hostinfo and notify the remote
|
|
|
|
swapPrimary trafficDecision = 3
|
|
|
|
migrateRelays trafficDecision = 4
|
|
|
|
tryRehandshake trafficDecision = 5
|
2023-12-19 10:58:31 -07:00
|
|
|
sendTestPacket trafficDecision = 6
|
2023-05-04 14:16:37 -06:00
|
|
|
)
|
|
|
|
|
2019-11-19 10:00:20 -07:00
|
|
|
type connectionManager struct {
|
2023-03-31 14:45:05 -06:00
|
|
|
in map[uint32]struct{}
|
|
|
|
inLock *sync.RWMutex
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2023-03-31 14:45:05 -06:00
|
|
|
out map[uint32]struct{}
|
|
|
|
outLock *sync.RWMutex
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2023-05-04 14:16:37 -06:00
|
|
|
// relayUsed holds which relay localIndexs are in use
|
|
|
|
relayUsed map[uint32]struct{}
|
|
|
|
relayUsedLock *sync.RWMutex
|
|
|
|
|
2023-03-31 14:45:05 -06:00
|
|
|
hostMap *HostMap
|
|
|
|
trafficTimer *LockingTimerWheel[uint32]
|
|
|
|
intf *Interface
|
|
|
|
pendingDeletion map[uint32]struct{}
|
|
|
|
punchy *Punchy
|
|
|
|
checkInterval time.Duration
|
|
|
|
pendingDeletionInterval time.Duration
|
|
|
|
metricsTxPunchy metrics.Counter
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2021-03-26 08:46:30 -06:00
|
|
|
l *logrus.Logger
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2023-03-31 14:45:05 -06:00
|
|
|
func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval time.Duration, punchy *Punchy) *connectionManager {
|
|
|
|
var max time.Duration
|
|
|
|
if checkInterval < pendingDeletionInterval {
|
|
|
|
max = pendingDeletionInterval
|
|
|
|
} else {
|
|
|
|
max = checkInterval
|
|
|
|
}
|
|
|
|
|
2019-11-19 10:00:20 -07:00
|
|
|
nc := &connectionManager{
|
|
|
|
hostMap: intf.hostMap,
|
2023-02-13 13:41:05 -07:00
|
|
|
in: make(map[uint32]struct{}),
|
2019-11-19 10:00:20 -07:00
|
|
|
inLock: &sync.RWMutex{},
|
2023-02-13 13:41:05 -07:00
|
|
|
out: make(map[uint32]struct{}),
|
2019-11-19 10:00:20 -07:00
|
|
|
outLock: &sync.RWMutex{},
|
2023-05-04 14:16:37 -06:00
|
|
|
relayUsed: make(map[uint32]struct{}),
|
|
|
|
relayUsedLock: &sync.RWMutex{},
|
2023-03-31 14:45:05 -06:00
|
|
|
trafficTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, max),
|
2019-11-19 10:00:20 -07:00
|
|
|
intf: intf,
|
2023-03-31 14:45:05 -06:00
|
|
|
pendingDeletion: make(map[uint32]struct{}),
|
2019-11-19 10:00:20 -07:00
|
|
|
checkInterval: checkInterval,
|
|
|
|
pendingDeletionInterval: pendingDeletionInterval,
|
2023-03-31 14:45:05 -06:00
|
|
|
punchy: punchy,
|
|
|
|
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
|
2021-03-26 08:46:30 -06:00
|
|
|
l: l,
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
2023-03-31 14:45:05 -06:00
|
|
|
|
2021-11-02 12:14:26 -06:00
|
|
|
nc.Start(ctx)
|
2019-11-19 10:00:20 -07:00
|
|
|
return nc
|
|
|
|
}
|
|
|
|
|
2023-02-13 13:41:05 -07:00
|
|
|
func (n *connectionManager) In(localIndex uint32) {
|
2019-11-19 10:00:20 -07:00
|
|
|
n.inLock.RLock()
|
|
|
|
// If this already exists, return
|
2023-02-13 13:41:05 -07:00
|
|
|
if _, ok := n.in[localIndex]; ok {
|
2019-11-19 10:00:20 -07:00
|
|
|
n.inLock.RUnlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
n.inLock.RUnlock()
|
|
|
|
n.inLock.Lock()
|
2023-02-13 13:41:05 -07:00
|
|
|
n.in[localIndex] = struct{}{}
|
2019-11-19 10:00:20 -07:00
|
|
|
n.inLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2023-02-13 13:41:05 -07:00
|
|
|
func (n *connectionManager) Out(localIndex uint32) {
|
2019-11-19 10:00:20 -07:00
|
|
|
n.outLock.RLock()
|
|
|
|
// If this already exists, return
|
2023-02-13 13:41:05 -07:00
|
|
|
if _, ok := n.out[localIndex]; ok {
|
2019-11-19 10:00:20 -07:00
|
|
|
n.outLock.RUnlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
n.outLock.RUnlock()
|
|
|
|
n.outLock.Lock()
|
2023-02-13 13:41:05 -07:00
|
|
|
n.out[localIndex] = struct{}{}
|
2019-11-19 10:00:20 -07:00
|
|
|
n.outLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2023-05-04 14:16:37 -06:00
|
|
|
func (n *connectionManager) RelayUsed(localIndex uint32) {
|
|
|
|
n.relayUsedLock.RLock()
|
|
|
|
// If this already exists, return
|
|
|
|
if _, ok := n.relayUsed[localIndex]; ok {
|
|
|
|
n.relayUsedLock.RUnlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
n.relayUsedLock.RUnlock()
|
|
|
|
n.relayUsedLock.Lock()
|
|
|
|
n.relayUsed[localIndex] = struct{}{}
|
|
|
|
n.relayUsedLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2023-03-31 14:45:05 -06:00
|
|
|
// getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and
|
|
|
|
// resets the state for this local index
|
|
|
|
func (n *connectionManager) getAndResetTrafficCheck(localIndex uint32) (bool, bool) {
|
2019-11-19 10:00:20 -07:00
|
|
|
n.inLock.Lock()
|
|
|
|
n.outLock.Lock()
|
2023-03-31 14:45:05 -06:00
|
|
|
_, in := n.in[localIndex]
|
|
|
|
_, out := n.out[localIndex]
|
2023-02-13 13:41:05 -07:00
|
|
|
delete(n.in, localIndex)
|
|
|
|
delete(n.out, localIndex)
|
2019-11-19 10:00:20 -07:00
|
|
|
n.inLock.Unlock()
|
|
|
|
n.outLock.Unlock()
|
2023-03-31 14:45:05 -06:00
|
|
|
return in, out
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2023-03-31 14:45:05 -06:00
|
|
|
func (n *connectionManager) AddTrafficWatch(localIndex uint32) {
|
2023-05-04 14:16:37 -06:00
|
|
|
// Use a write lock directly because it should be incredibly rare that we are ever already tracking this index
|
|
|
|
n.outLock.Lock()
|
|
|
|
if _, ok := n.out[localIndex]; ok {
|
|
|
|
n.outLock.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
n.out[localIndex] = struct{}{}
|
2023-03-31 14:45:05 -06:00
|
|
|
n.trafficTimer.Add(localIndex, n.checkInterval)
|
2023-05-04 14:16:37 -06:00
|
|
|
n.outLock.Unlock()
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2021-11-02 12:14:26 -06:00
|
|
|
func (n *connectionManager) Start(ctx context.Context) {
|
|
|
|
go n.Run(ctx)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2021-11-02 12:14:26 -06:00
|
|
|
func (n *connectionManager) Run(ctx context.Context) {
|
2023-03-31 14:45:05 -06:00
|
|
|
//TODO: this tick should be based on the min wheel tick? Check firewall
|
2021-11-02 12:14:26 -06:00
|
|
|
clockSource := time.NewTicker(500 * time.Millisecond)
|
|
|
|
defer clockSource.Stop()
|
|
|
|
|
2020-11-19 16:44:05 -07:00
|
|
|
p := []byte("")
|
|
|
|
nb := make([]byte, 12, 12)
|
|
|
|
out := make([]byte, mtu)
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2021-11-02 12:14:26 -06:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
2023-03-31 14:45:05 -06:00
|
|
|
|
2021-11-02 12:14:26 -06:00
|
|
|
case now := <-clockSource.C:
|
2023-03-31 14:45:05 -06:00
|
|
|
n.trafficTimer.Advance(now)
|
|
|
|
for {
|
|
|
|
localIndex, has := n.trafficTimer.Purge()
|
|
|
|
if !has {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
n.doTrafficCheck(localIndex, p, nb, out, now)
|
|
|
|
}
|
2021-11-02 12:14:26 -06:00
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-31 14:45:05 -06:00
|
|
|
func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
|
2023-12-19 10:58:31 -07:00
|
|
|
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, now)
|
2023-05-04 14:16:37 -06:00
|
|
|
|
|
|
|
switch decision {
|
|
|
|
case deleteTunnel:
|
2023-05-04 14:42:12 -06:00
|
|
|
if n.hostMap.DeleteHostInfo(hostinfo) {
|
|
|
|
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
|
|
|
|
n.intf.lightHouse.DeleteVpnIp(hostinfo.vpnIp)
|
|
|
|
}
|
2023-05-04 14:16:37 -06:00
|
|
|
|
|
|
|
case closeTunnel:
|
|
|
|
n.intf.sendCloseTunnel(hostinfo)
|
|
|
|
n.intf.closeTunnel(hostinfo)
|
|
|
|
|
|
|
|
case swapPrimary:
|
|
|
|
n.swapPrimary(hostinfo, primary)
|
|
|
|
|
|
|
|
case migrateRelays:
|
|
|
|
n.migrateRelayUsed(hostinfo, primary)
|
2023-05-08 13:43:03 -06:00
|
|
|
|
|
|
|
case tryRehandshake:
|
|
|
|
n.tryRehandshake(hostinfo)
|
2023-12-19 10:58:31 -07:00
|
|
|
|
|
|
|
case sendTestPacket:
|
|
|
|
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
2023-05-04 14:16:37 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
n.resetRelayTrafficCheck(hostinfo)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
|
|
|
|
if hostinfo != nil {
|
|
|
|
n.relayUsedLock.Lock()
|
|
|
|
defer n.relayUsedLock.Unlock()
|
|
|
|
// No need to migrate any relays, delete usage info now.
|
|
|
|
for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
|
|
|
|
delete(n.relayUsed, idx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
|
|
|
|
relayFor := oldhostinfo.relayState.CopyAllRelayFor()
|
|
|
|
|
|
|
|
for _, r := range relayFor {
|
|
|
|
existing, ok := newhostinfo.relayState.QueryRelayForByIp(r.PeerIp)
|
|
|
|
|
|
|
|
var index uint32
|
2024-07-31 09:18:56 -06:00
|
|
|
var relayFrom netip.Addr
|
|
|
|
var relayTo netip.Addr
|
2023-05-04 14:16:37 -06:00
|
|
|
switch {
|
|
|
|
case ok && existing.State == Established:
|
|
|
|
// This relay already exists in newhostinfo, then do nothing.
|
|
|
|
continue
|
|
|
|
case ok && existing.State == Requested:
|
|
|
|
// The relay exists in a Requested state; re-send the request
|
|
|
|
index = existing.LocalIndex
|
|
|
|
switch r.Type {
|
|
|
|
case TerminalType:
|
2024-07-31 09:18:56 -06:00
|
|
|
relayFrom = n.intf.myVpnNet.Addr()
|
2023-05-04 14:16:37 -06:00
|
|
|
relayTo = existing.PeerIp
|
|
|
|
case ForwardingType:
|
|
|
|
relayFrom = existing.PeerIp
|
|
|
|
relayTo = newhostinfo.vpnIp
|
|
|
|
default:
|
|
|
|
// should never happen
|
|
|
|
}
|
|
|
|
case !ok:
|
|
|
|
n.relayUsedLock.RLock()
|
|
|
|
if _, relayUsed := n.relayUsed[r.LocalIndex]; !relayUsed {
|
|
|
|
// The relay hasn't been used; don't migrate it.
|
|
|
|
n.relayUsedLock.RUnlock()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
n.relayUsedLock.RUnlock()
|
|
|
|
// The relay doesn't exist at all; create some relay state and send the request.
|
|
|
|
var err error
|
|
|
|
index, err = AddRelay(n.l, newhostinfo, n.hostMap, r.PeerIp, nil, r.Type, Requested)
|
|
|
|
if err != nil {
|
|
|
|
n.l.WithError(err).Error("failed to migrate relay to new hostinfo")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
switch r.Type {
|
|
|
|
case TerminalType:
|
2024-07-31 09:18:56 -06:00
|
|
|
relayFrom = n.intf.myVpnNet.Addr()
|
2023-05-04 14:16:37 -06:00
|
|
|
relayTo = r.PeerIp
|
|
|
|
case ForwardingType:
|
|
|
|
relayFrom = r.PeerIp
|
|
|
|
relayTo = newhostinfo.vpnIp
|
|
|
|
default:
|
|
|
|
// should never happen
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
//TODO: IPV6-WORK
|
|
|
|
relayFromB := relayFrom.As4()
|
|
|
|
relayToB := relayTo.As4()
|
|
|
|
|
2023-05-04 14:16:37 -06:00
|
|
|
// Send a CreateRelayRequest to the peer.
|
|
|
|
req := NebulaControl{
|
|
|
|
Type: NebulaControl_CreateRelayRequest,
|
|
|
|
InitiatorRelayIndex: index,
|
2024-07-31 09:18:56 -06:00
|
|
|
RelayFromIp: binary.BigEndian.Uint32(relayFromB[:]),
|
|
|
|
RelayToIp: binary.BigEndian.Uint32(relayToB[:]),
|
2023-05-04 14:16:37 -06:00
|
|
|
}
|
|
|
|
msg, err := req.Marshal()
|
|
|
|
if err != nil {
|
|
|
|
n.l.WithError(err).Error("failed to marshal Control message to migrate relay")
|
|
|
|
} else {
|
|
|
|
n.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
|
|
|
|
n.l.WithFields(logrus.Fields{
|
2024-07-31 09:18:56 -06:00
|
|
|
"relayFrom": req.RelayFromIp,
|
|
|
|
"relayTo": req.RelayToIp,
|
2023-05-04 14:16:37 -06:00
|
|
|
"initiatorRelayIndex": req.InitiatorRelayIndex,
|
|
|
|
"responderRelayIndex": req.ResponderRelayIndex,
|
|
|
|
"vpnIp": newhostinfo.vpnIp}).
|
|
|
|
Info("send CreateRelayRequest")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-19 10:58:31 -07:00
|
|
|
func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
2023-05-04 14:16:37 -06:00
|
|
|
n.hostMap.RLock()
|
|
|
|
defer n.hostMap.RUnlock()
|
|
|
|
|
|
|
|
hostinfo := n.hostMap.Indexes[localIndex]
|
|
|
|
if hostinfo == nil {
|
2023-03-31 14:45:05 -06:00
|
|
|
n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap")
|
|
|
|
delete(n.pendingDeletion, localIndex)
|
2023-05-04 14:16:37 -06:00
|
|
|
return doNothing, nil, nil
|
2023-03-31 14:45:05 -06:00
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2023-05-04 14:16:37 -06:00
|
|
|
if n.isInvalidCertificate(now, hostinfo) {
|
|
|
|
delete(n.pendingDeletion, hostinfo.localIndexId)
|
|
|
|
return closeTunnel, hostinfo, nil
|
2023-03-31 14:45:05 -06:00
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2023-05-04 14:16:37 -06:00
|
|
|
primary := n.hostMap.Hosts[hostinfo.vpnIp]
|
2023-03-31 14:45:05 -06:00
|
|
|
mainHostInfo := true
|
|
|
|
if primary != nil && primary != hostinfo {
|
|
|
|
mainHostInfo = false
|
|
|
|
}
|
2021-10-20 12:23:33 -06:00
|
|
|
|
2023-03-31 14:45:05 -06:00
|
|
|
// Check for traffic on this hostinfo
|
|
|
|
inTraffic, outTraffic := n.getAndResetTrafficCheck(localIndex)
|
2021-10-20 12:23:33 -06:00
|
|
|
|
2023-03-31 14:45:05 -06:00
|
|
|
// A hostinfo is determined alive if there is incoming traffic
|
|
|
|
if inTraffic {
|
2023-05-04 14:16:37 -06:00
|
|
|
decision := doNothing
|
2023-03-31 14:45:05 -06:00
|
|
|
if n.l.Level >= logrus.DebugLevel {
|
|
|
|
hostinfo.logger(n.l).
|
|
|
|
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
|
|
|
|
Debug("Tunnel status")
|
2023-03-13 11:35:14 -06:00
|
|
|
}
|
2023-03-31 14:45:05 -06:00
|
|
|
delete(n.pendingDeletion, hostinfo.localIndexId)
|
2023-03-13 11:35:14 -06:00
|
|
|
|
2023-05-04 14:16:37 -06:00
|
|
|
if mainHostInfo {
|
2023-05-08 13:43:03 -06:00
|
|
|
decision = tryRehandshake
|
|
|
|
|
2023-05-04 14:16:37 -06:00
|
|
|
} else {
|
|
|
|
if n.shouldSwapPrimary(hostinfo, primary) {
|
|
|
|
decision = swapPrimary
|
|
|
|
} else {
|
|
|
|
// migrate the relays to the primary, if in use.
|
|
|
|
decision = migrateRelays
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
2023-03-29 14:09:36 -06:00
|
|
|
}
|
|
|
|
|
2023-03-31 14:45:05 -06:00
|
|
|
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2023-03-31 14:45:05 -06:00
|
|
|
if !outTraffic {
|
|
|
|
// Send a punch packet to keep the NAT state alive
|
|
|
|
n.sendPunch(hostinfo)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2023-05-04 14:16:37 -06:00
|
|
|
return decision, hostinfo, primary
|
2023-03-31 14:45:05 -06:00
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2023-03-31 14:45:05 -06:00
|
|
|
if _, ok := n.pendingDeletion[hostinfo.localIndexId]; ok {
|
|
|
|
// We have already sent a test packet and nothing was returned, this hostinfo is dead
|
|
|
|
hostinfo.logger(n.l).
|
|
|
|
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
|
|
|
|
Info("Tunnel status")
|
2021-10-20 12:23:33 -06:00
|
|
|
|
2023-03-31 14:45:05 -06:00
|
|
|
delete(n.pendingDeletion, hostinfo.localIndexId)
|
2023-05-04 14:16:37 -06:00
|
|
|
return deleteTunnel, hostinfo, nil
|
2023-03-31 14:45:05 -06:00
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2023-12-19 10:58:31 -07:00
|
|
|
decision := doNothing
|
2023-03-31 14:45:05 -06:00
|
|
|
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
|
2023-04-04 12:42:24 -06:00
|
|
|
if !outTraffic {
|
|
|
|
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
|
|
|
|
// Just maintain NAT state if configured to do so.
|
|
|
|
n.sendPunch(hostinfo)
|
|
|
|
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
|
2023-05-04 14:16:37 -06:00
|
|
|
return doNothing, nil, nil
|
2023-04-04 12:42:24 -06:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2023-03-31 14:45:05 -06:00
|
|
|
if n.punchy.GetTargetEverything() {
|
2023-04-04 12:42:24 -06:00
|
|
|
// This is similar to the old punchy behavior with a slight optimization.
|
|
|
|
// We aren't receiving traffic but we are sending it, punch on all known
|
|
|
|
// ips in case we need to re-prime NAT state
|
2023-03-31 14:45:05 -06:00
|
|
|
n.sendPunch(hostinfo)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2023-05-04 14:16:37 -06:00
|
|
|
if n.l.Level >= logrus.DebugLevel {
|
|
|
|
hostinfo.logger(n.l).
|
|
|
|
WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
|
|
|
|
Debug("Tunnel status")
|
2023-04-04 12:42:24 -06:00
|
|
|
}
|
|
|
|
|
2023-03-31 14:45:05 -06:00
|
|
|
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
|
2023-12-19 10:58:31 -07:00
|
|
|
decision = sendTestPacket
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2023-03-31 14:45:05 -06:00
|
|
|
} else {
|
2023-05-04 14:16:37 -06:00
|
|
|
if n.l.Level >= logrus.DebugLevel {
|
|
|
|
hostinfo.logger(n.l).Debugf("Hostinfo sadness")
|
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
2023-03-31 14:45:05 -06:00
|
|
|
|
|
|
|
n.pendingDeletion[hostinfo.localIndexId] = struct{}{}
|
|
|
|
n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval)
|
2023-12-19 10:58:31 -07:00
|
|
|
return decision, hostinfo, nil
|
2023-05-04 14:16:37 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
|
|
|
|
// The primary tunnel is the most recent handshake to complete locally and should work entirely fine.
|
|
|
|
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
|
|
|
|
// Let's sort this out.
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
if current.vpnIp.Compare(n.intf.myVpnNet.Addr()) < 0 {
|
2023-05-04 14:16:37 -06:00
|
|
|
// Only one side should flip primary because if both flip then we may never resolve to a single tunnel.
|
|
|
|
// vpn ip is static across all tunnels for this host pair so lets use that to determine who is flipping.
|
|
|
|
// The remotes vpn ip is lower than mine. I will not flip.
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2023-08-14 20:32:40 -06:00
|
|
|
certState := n.intf.pki.GetCertState()
|
2024-10-10 17:00:22 -06:00
|
|
|
return bytes.Equal(current.ConnectionState.myCert.Signature(), certState.Certificate.Signature())
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
2021-10-20 12:23:33 -06:00
|
|
|
|
2023-05-04 14:16:37 -06:00
|
|
|
func (n *connectionManager) swapPrimary(current, primary *HostInfo) {
|
|
|
|
n.hostMap.Lock()
|
|
|
|
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
|
|
|
|
if n.hostMap.Hosts[current.vpnIp] == primary {
|
|
|
|
n.hostMap.unlockedMakePrimary(current)
|
|
|
|
}
|
|
|
|
n.hostMap.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and
|
2023-05-04 15:09:42 -06:00
|
|
|
// the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid
|
|
|
|
// check and return true.
|
2023-05-04 14:16:37 -06:00
|
|
|
func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
|
2021-10-20 12:23:33 -06:00
|
|
|
remoteCert := hostinfo.GetCert()
|
|
|
|
if remoteCert == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2024-10-10 17:00:22 -06:00
|
|
|
caPool := n.intf.pki.GetCAPool()
|
|
|
|
err := caPool.VerifyCachedCertificate(now, remoteCert)
|
|
|
|
if err == nil {
|
2021-10-20 12:23:33 -06:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2023-11-13 11:39:38 -07:00
|
|
|
if !n.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
|
2023-05-04 15:09:42 -06:00
|
|
|
// Block listed certificates should always be disconnected
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2023-02-13 13:41:05 -07:00
|
|
|
hostinfo.logger(n.l).WithError(err).
|
2024-10-10 17:00:22 -06:00
|
|
|
WithField("fingerprint", remoteCert.Fingerprint).
|
2021-10-20 12:23:33 -06:00
|
|
|
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
2023-03-31 14:45:05 -06:00
|
|
|
|
|
|
|
func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
|
|
|
|
if !n.punchy.GetPunch() {
|
|
|
|
// Punching is disabled
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if n.punchy.GetTargetEverything() {
|
2024-07-31 09:18:56 -06:00
|
|
|
hostinfo.remotes.ForEach(n.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) {
|
2023-03-31 14:45:05 -06:00
|
|
|
n.metricsTxPunchy.Inc(1)
|
|
|
|
n.intf.outside.WriteTo([]byte{1}, addr)
|
|
|
|
})
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
} else if hostinfo.remote.IsValid() {
|
2023-03-31 14:45:05 -06:00
|
|
|
n.metricsTxPunchy.Inc(1)
|
|
|
|
n.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
|
|
|
|
}
|
|
|
|
}
|
2023-05-04 14:16:37 -06:00
|
|
|
|
|
|
|
func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
2023-08-14 20:32:40 -06:00
|
|
|
certState := n.intf.pki.GetCertState()
|
2024-10-10 17:00:22 -06:00
|
|
|
if bytes.Equal(hostinfo.ConnectionState.myCert.Signature(), certState.Certificate.Signature()) {
|
2023-05-04 14:16:37 -06:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
n.l.WithField("vpnIp", hostinfo.vpnIp).
|
|
|
|
WithField("reason", "local certificate is not current").
|
|
|
|
Info("Re-handshaking with remote")
|
|
|
|
|
2023-08-21 17:51:45 -06:00
|
|
|
n.intf.handshakeManager.StartHandshake(hostinfo.vpnIp, nil)
|
2023-05-04 14:16:37 -06:00
|
|
|
}
|