2019-11-19 10:00:20 -07:00
|
|
|
package nebula
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"net"
|
2024-07-31 09:18:56 -06:00
|
|
|
"net/netip"
|
2019-11-19 10:00:20 -07:00
|
|
|
"sync"
|
2021-03-05 19:18:33 -07:00
|
|
|
"sync/atomic"
|
2019-11-19 10:00:20 -07:00
|
|
|
"time"
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
"github.com/gaissmai/bart"
|
2019-11-19 10:00:20 -07:00
|
|
|
"github.com/rcrowley/go-metrics"
|
|
|
|
"github.com/sirupsen/logrus"
|
|
|
|
"github.com/slackhq/nebula/cert"
|
2024-04-03 21:14:51 -06:00
|
|
|
"github.com/slackhq/nebula/config"
|
2021-11-03 19:54:04 -06:00
|
|
|
"github.com/slackhq/nebula/header"
|
2019-11-19 10:00:20 -07:00
|
|
|
)
|
|
|
|
|
2022-10-31 11:37:41 -06:00
|
|
|
// const ProbeLen = 100
|
2023-08-08 12:26:41 -06:00
|
|
|
const defaultPromoteEvery = 1000 // Count of packets sent before we try moving a tunnel to a preferred underlay ip address
|
|
|
|
const defaultReQueryEvery = 5000 // Count of packets sent before re-querying a hostinfo to the lighthouse
|
|
|
|
const defaultReQueryWait = time.Minute // Minimum amount of seconds to wait before re-querying a hostinfo the lighthouse. Evaluated every ReQueryEvery
|
2019-11-19 10:00:20 -07:00
|
|
|
const MaxRemotes = 10
|
2023-11-02 15:53:59 -06:00
|
|
|
const maxRecvError = 4
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2023-03-13 11:35:14 -06:00
|
|
|
// MaxHostInfosPerVpnIp is the max number of hostinfos we will track for a given vpn ip
|
|
|
|
// 5 allows for an initial handshake and each host pair re-handshaking twice
|
|
|
|
const MaxHostInfosPerVpnIp = 5
|
|
|
|
|
2019-11-19 10:00:20 -07:00
|
|
|
// How long we should prevent roaming back to the previous IP.
|
|
|
|
// This helps prevent flapping due to packets already in flight
|
2021-03-01 09:14:34 -07:00
|
|
|
const RoamingSuppressSeconds = 2
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2022-06-21 12:35:23 -06:00
|
|
|
const (
|
|
|
|
Requested = iota
|
2023-05-04 14:16:37 -06:00
|
|
|
PeerRequested
|
2022-06-21 12:35:23 -06:00
|
|
|
Established
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
Unknowntype = iota
|
|
|
|
ForwardingType
|
|
|
|
TerminalType
|
|
|
|
)
|
|
|
|
|
|
|
|
type Relay struct {
|
|
|
|
Type int
|
|
|
|
State int
|
|
|
|
LocalIndex uint32
|
|
|
|
RemoteIndex uint32
|
2024-07-31 09:18:56 -06:00
|
|
|
PeerIp netip.Addr
|
2022-06-21 12:35:23 -06:00
|
|
|
}
|
|
|
|
|
2019-11-19 10:00:20 -07:00
|
|
|
type HostMap struct {
|
|
|
|
sync.RWMutex //Because we concurrently read and write to our maps
|
|
|
|
Indexes map[uint32]*HostInfo
|
2022-06-21 12:35:23 -06:00
|
|
|
Relays map[uint32]*HostInfo // Maps a Relay IDX to a Relay HostInfo object
|
2020-11-23 12:51:16 -07:00
|
|
|
RemoteIndexes map[uint32]*HostInfo
|
2024-07-31 09:18:56 -06:00
|
|
|
Hosts map[netip.Addr]*HostInfo
|
|
|
|
preferredRanges atomic.Pointer[[]netip.Prefix]
|
|
|
|
vpnCIDR netip.Prefix
|
2021-03-26 08:46:30 -06:00
|
|
|
l *logrus.Logger
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2023-03-30 10:09:20 -06:00
|
|
|
// For synchronization, treat the pointed-to Relay struct as immutable. To edit the Relay
|
|
|
|
// struct, make a copy of an existing value, edit the fileds in the copy, and
|
|
|
|
// then store a pointer to the new copy in both realyForBy* maps.
|
2022-06-21 12:35:23 -06:00
|
|
|
type RelayState struct {
|
|
|
|
sync.RWMutex
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
relays map[netip.Addr]struct{} // Set of VpnIp's of Hosts to use as relays to access this peer
|
|
|
|
relayForByIp map[netip.Addr]*Relay // Maps VpnIps of peers for which this HostInfo is a relay to some Relay info
|
|
|
|
relayForByIdx map[uint32]*Relay // Maps a local index to some Relay info
|
2022-06-21 12:35:23 -06:00
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
func (rs *RelayState) DeleteRelay(ip netip.Addr) {
|
2022-06-21 12:35:23 -06:00
|
|
|
rs.Lock()
|
|
|
|
defer rs.Unlock()
|
|
|
|
delete(rs.relays, ip)
|
|
|
|
}
|
|
|
|
|
2023-05-04 14:16:37 -06:00
|
|
|
func (rs *RelayState) CopyAllRelayFor() []*Relay {
|
|
|
|
rs.RLock()
|
|
|
|
defer rs.RUnlock()
|
|
|
|
ret := make([]*Relay, 0, len(rs.relayForByIdx))
|
|
|
|
for _, r := range rs.relayForByIdx {
|
|
|
|
ret = append(ret, r)
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
func (rs *RelayState) GetRelayForByIp(ip netip.Addr) (*Relay, bool) {
|
2022-06-21 12:35:23 -06:00
|
|
|
rs.RLock()
|
|
|
|
defer rs.RUnlock()
|
|
|
|
r, ok := rs.relayForByIp[ip]
|
|
|
|
return r, ok
|
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
func (rs *RelayState) InsertRelayTo(ip netip.Addr) {
|
2022-06-21 12:35:23 -06:00
|
|
|
rs.Lock()
|
|
|
|
defer rs.Unlock()
|
|
|
|
rs.relays[ip] = struct{}{}
|
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
func (rs *RelayState) CopyRelayIps() []netip.Addr {
|
2022-06-21 12:35:23 -06:00
|
|
|
rs.RLock()
|
|
|
|
defer rs.RUnlock()
|
2024-07-31 09:18:56 -06:00
|
|
|
ret := make([]netip.Addr, 0, len(rs.relays))
|
2022-06-21 12:35:23 -06:00
|
|
|
for ip := range rs.relays {
|
|
|
|
ret = append(ret, ip)
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
func (rs *RelayState) CopyRelayForIps() []netip.Addr {
|
2022-06-21 12:35:23 -06:00
|
|
|
rs.RLock()
|
|
|
|
defer rs.RUnlock()
|
2024-07-31 09:18:56 -06:00
|
|
|
currentRelays := make([]netip.Addr, 0, len(rs.relayForByIp))
|
2022-06-21 12:35:23 -06:00
|
|
|
for relayIp := range rs.relayForByIp {
|
|
|
|
currentRelays = append(currentRelays, relayIp)
|
|
|
|
}
|
|
|
|
return currentRelays
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rs *RelayState) CopyRelayForIdxs() []uint32 {
|
|
|
|
rs.RLock()
|
|
|
|
defer rs.RUnlock()
|
|
|
|
ret := make([]uint32, 0, len(rs.relayForByIdx))
|
|
|
|
for i := range rs.relayForByIdx {
|
|
|
|
ret = append(ret, i)
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
func (rs *RelayState) CompleteRelayByIP(vpnIp netip.Addr, remoteIdx uint32) bool {
|
2023-03-30 10:09:20 -06:00
|
|
|
rs.Lock()
|
|
|
|
defer rs.Unlock()
|
|
|
|
r, ok := rs.relayForByIp[vpnIp]
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
newRelay := *r
|
|
|
|
newRelay.State = Established
|
|
|
|
newRelay.RemoteIndex = remoteIdx
|
|
|
|
rs.relayForByIdx[r.LocalIndex] = &newRelay
|
|
|
|
rs.relayForByIp[r.PeerIp] = &newRelay
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rs *RelayState) CompleteRelayByIdx(localIdx uint32, remoteIdx uint32) (*Relay, bool) {
|
|
|
|
rs.Lock()
|
|
|
|
defer rs.Unlock()
|
|
|
|
r, ok := rs.relayForByIdx[localIdx]
|
|
|
|
if !ok {
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
newRelay := *r
|
|
|
|
newRelay.State = Established
|
|
|
|
newRelay.RemoteIndex = remoteIdx
|
|
|
|
rs.relayForByIdx[r.LocalIndex] = &newRelay
|
|
|
|
rs.relayForByIp[r.PeerIp] = &newRelay
|
|
|
|
return &newRelay, true
|
2022-06-21 12:35:23 -06:00
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
func (rs *RelayState) QueryRelayForByIp(vpnIp netip.Addr) (*Relay, bool) {
|
2022-06-21 12:35:23 -06:00
|
|
|
rs.RLock()
|
|
|
|
defer rs.RUnlock()
|
|
|
|
r, ok := rs.relayForByIp[vpnIp]
|
|
|
|
return r, ok
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rs *RelayState) QueryRelayForByIdx(idx uint32) (*Relay, bool) {
|
|
|
|
rs.RLock()
|
|
|
|
defer rs.RUnlock()
|
|
|
|
r, ok := rs.relayForByIdx[idx]
|
|
|
|
return r, ok
|
|
|
|
}
|
2023-03-30 10:09:20 -06:00
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
func (rs *RelayState) InsertRelay(ip netip.Addr, idx uint32, r *Relay) {
|
2022-06-21 12:35:23 -06:00
|
|
|
rs.Lock()
|
|
|
|
defer rs.Unlock()
|
|
|
|
rs.relayForByIp[ip] = r
|
|
|
|
rs.relayForByIdx[idx] = r
|
|
|
|
}
|
|
|
|
|
2019-11-19 10:00:20 -07:00
|
|
|
type HostInfo struct {
|
2024-07-31 09:18:56 -06:00
|
|
|
remote netip.AddrPort
|
2023-11-02 15:53:59 -06:00
|
|
|
remotes *RemoteList
|
|
|
|
promoteCounter atomic.Uint32
|
|
|
|
ConnectionState *ConnectionState
|
|
|
|
remoteIndexId uint32
|
|
|
|
localIndexId uint32
|
2024-07-31 09:18:56 -06:00
|
|
|
vpnIp netip.Addr
|
2023-11-02 15:53:59 -06:00
|
|
|
recvError atomic.Uint32
|
2024-07-31 09:18:56 -06:00
|
|
|
remoteCidr *bart.Table[struct{}]
|
2023-11-02 15:53:59 -06:00
|
|
|
relayState RelayState
|
|
|
|
|
|
|
|
// HandshakePacket records the packets used to create this hostinfo
|
|
|
|
// We need these to avoid replayed handshake packets creating new hostinfos which causes churn
|
|
|
|
HandshakePacket map[uint8][]byte
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2023-08-08 12:26:41 -06:00
|
|
|
// nextLHQuery is the earliest we can ask the lighthouse for new information.
|
|
|
|
// This is used to limit lighthouse re-queries in chatty clients
|
|
|
|
nextLHQuery atomic.Int64
|
|
|
|
|
2021-03-01 18:06:01 -07:00
|
|
|
// lastRebindCount is the other side of Interface.rebindCount, if these values don't match then we need to ask LH
|
|
|
|
// for a punch from the remote end of this tunnel. The goal being to prime their conntrack for our traffic just like
|
|
|
|
// with a handshake
|
|
|
|
lastRebindCount int8
|
|
|
|
|
2021-04-27 20:15:34 -06:00
|
|
|
// lastHandshakeTime records the time the remote side told us about at the stage when the handshake was completed locally
|
|
|
|
// Stage 1 packet will contain it if I am a responder, stage 2 packet if I am an initiator
|
|
|
|
// This is used to avoid an attack where a handshake packet is replayed after some time
|
|
|
|
lastHandshakeTime uint64
|
|
|
|
|
2019-11-19 10:00:20 -07:00
|
|
|
lastRoam time.Time
|
2024-07-31 09:18:56 -06:00
|
|
|
lastRoamRemote netip.AddrPort
|
2023-03-13 11:35:14 -06:00
|
|
|
|
|
|
|
// Used to track other hostinfos for this vpn ip since only 1 can be primary
|
|
|
|
// Synchronised via hostmap lock and not the hostinfo lock.
|
|
|
|
next, prev *HostInfo
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2022-06-21 12:35:23 -06:00
|
|
|
type ViaSender struct {
|
|
|
|
relayHI *HostInfo // relayHI is the host info object of the relay
|
|
|
|
remoteIdx uint32 // remoteIdx is the index included in the header of the received packet
|
|
|
|
relay *Relay // relay contains the rest of the relay information, including the PeerIP of the host trying to communicate with us.
|
|
|
|
}
|
|
|
|
|
2019-11-19 10:00:20 -07:00
|
|
|
type cachedPacket struct {
|
2021-11-03 19:54:04 -06:00
|
|
|
messageType header.MessageType
|
|
|
|
messageSubType header.MessageSubType
|
2019-11-19 10:00:20 -07:00
|
|
|
callback packetCallback
|
|
|
|
packet []byte
|
|
|
|
}
|
|
|
|
|
2021-11-03 19:54:04 -06:00
|
|
|
type packetCallback func(t header.MessageType, st header.MessageSubType, h *HostInfo, p, nb, out []byte)
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2021-04-27 20:23:18 -06:00
|
|
|
type cachedPacketMetrics struct {
|
|
|
|
sent metrics.Counter
|
|
|
|
dropped metrics.Counter
|
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
func NewHostMapFromConfig(l *logrus.Logger, vpnCIDR netip.Prefix, c *config.C) *HostMap {
|
2024-04-03 21:14:51 -06:00
|
|
|
hm := newHostMap(l, vpnCIDR)
|
|
|
|
|
|
|
|
hm.reload(c, true)
|
|
|
|
c.RegisterReloadCallback(func(c *config.C) {
|
|
|
|
hm.reload(c, false)
|
|
|
|
})
|
|
|
|
|
|
|
|
l.WithField("network", hm.vpnCIDR.String()).
|
|
|
|
WithField("preferredRanges", hm.GetPreferredRanges()).
|
|
|
|
Info("Main HostMap created")
|
|
|
|
|
|
|
|
return hm
|
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
func newHostMap(l *logrus.Logger, vpnCIDR netip.Prefix) *HostMap {
|
2024-04-03 21:14:51 -06:00
|
|
|
return &HostMap{
|
|
|
|
Indexes: map[uint32]*HostInfo{},
|
|
|
|
Relays: map[uint32]*HostInfo{},
|
|
|
|
RemoteIndexes: map[uint32]*HostInfo{},
|
2024-07-31 09:18:56 -06:00
|
|
|
Hosts: map[netip.Addr]*HostInfo{},
|
2024-04-03 21:14:51 -06:00
|
|
|
vpnCIDR: vpnCIDR,
|
|
|
|
l: l,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hm *HostMap) reload(c *config.C, initial bool) {
|
|
|
|
if initial || c.HasChanged("preferred_ranges") {
|
2024-07-31 09:18:56 -06:00
|
|
|
var preferredRanges []netip.Prefix
|
2024-04-03 21:14:51 -06:00
|
|
|
rawPreferredRanges := c.GetStringSlice("preferred_ranges", []string{})
|
|
|
|
|
|
|
|
for _, rawPreferredRange := range rawPreferredRanges {
|
2024-07-31 09:18:56 -06:00
|
|
|
preferredRange, err := netip.ParsePrefix(rawPreferredRange)
|
2024-04-03 21:14:51 -06:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
hm.l.WithError(err).WithField("range", rawPreferredRanges).Warn("Failed to parse preferred ranges, ignoring")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
preferredRanges = append(preferredRanges, preferredRange)
|
|
|
|
}
|
|
|
|
|
|
|
|
oldRanges := hm.preferredRanges.Swap(&preferredRanges)
|
|
|
|
if !initial {
|
|
|
|
hm.l.WithField("oldPreferredRanges", *oldRanges).WithField("newPreferredRanges", preferredRanges).Info("preferred_ranges changed")
|
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-24 11:37:52 -06:00
|
|
|
// EmitStats reports host, index, and relay counts to the stats collection system
|
|
|
|
func (hm *HostMap) EmitStats() {
|
2019-11-19 10:00:20 -07:00
|
|
|
hm.RLock()
|
|
|
|
hostLen := len(hm.Hosts)
|
|
|
|
indexLen := len(hm.Indexes)
|
2020-11-23 12:51:16 -07:00
|
|
|
remoteIndexLen := len(hm.RemoteIndexes)
|
2022-06-21 12:35:23 -06:00
|
|
|
relaysLen := len(hm.Relays)
|
2019-11-19 10:00:20 -07:00
|
|
|
hm.RUnlock()
|
|
|
|
|
2023-07-24 11:37:52 -06:00
|
|
|
metrics.GetOrRegisterGauge("hostmap.main.hosts", nil).Update(int64(hostLen))
|
|
|
|
metrics.GetOrRegisterGauge("hostmap.main.indexes", nil).Update(int64(indexLen))
|
|
|
|
metrics.GetOrRegisterGauge("hostmap.main.remoteIndexes", nil).Update(int64(remoteIndexLen))
|
|
|
|
metrics.GetOrRegisterGauge("hostmap.main.relayIndexes", nil).Update(int64(relaysLen))
|
2022-06-21 12:35:23 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func (hm *HostMap) RemoveRelay(localIdx uint32) {
|
|
|
|
hm.Lock()
|
2023-05-04 14:16:37 -06:00
|
|
|
_, ok := hm.Relays[localIdx]
|
2022-06-21 12:35:23 -06:00
|
|
|
if !ok {
|
|
|
|
hm.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
delete(hm.Relays, localIdx)
|
|
|
|
hm.Unlock()
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2023-03-13 11:35:14 -06:00
|
|
|
// DeleteHostInfo will fully unlink the hostinfo and return true if it was the final hostinfo for this vpn ip
|
|
|
|
func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool {
|
2022-06-21 12:35:23 -06:00
|
|
|
// Delete the host itself, ensuring it's not modified anymore
|
2020-11-23 12:51:16 -07:00
|
|
|
hm.Lock()
|
2023-03-13 11:35:14 -06:00
|
|
|
// If we have a previous or next hostinfo then we are not the last one for this vpn ip
|
|
|
|
final := (hostinfo.next == nil && hostinfo.prev == nil)
|
2021-04-14 12:50:09 -06:00
|
|
|
hm.unlockedDeleteHostInfo(hostinfo)
|
2022-06-21 12:35:23 -06:00
|
|
|
hm.Unlock()
|
|
|
|
|
2023-03-13 11:35:14 -06:00
|
|
|
return final
|
2022-06-21 12:35:23 -06:00
|
|
|
}
|
|
|
|
|
2023-03-13 11:35:14 -06:00
|
|
|
func (hm *HostMap) MakePrimary(hostinfo *HostInfo) {
|
|
|
|
hm.Lock()
|
|
|
|
defer hm.Unlock()
|
|
|
|
hm.unlockedMakePrimary(hostinfo)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hm *HostMap) unlockedMakePrimary(hostinfo *HostInfo) {
|
|
|
|
oldHostinfo := hm.Hosts[hostinfo.vpnIp]
|
|
|
|
if oldHostinfo == hostinfo {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if hostinfo.prev != nil {
|
|
|
|
hostinfo.prev.next = hostinfo.next
|
|
|
|
}
|
|
|
|
|
|
|
|
if hostinfo.next != nil {
|
|
|
|
hostinfo.next.prev = hostinfo.prev
|
|
|
|
}
|
|
|
|
|
|
|
|
hm.Hosts[hostinfo.vpnIp] = hostinfo
|
|
|
|
|
|
|
|
if oldHostinfo == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
hostinfo.next = oldHostinfo
|
|
|
|
oldHostinfo.prev = hostinfo
|
|
|
|
hostinfo.prev = nil
|
|
|
|
}
|
|
|
|
|
2021-04-14 12:50:09 -06:00
|
|
|
func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
2023-03-13 11:35:14 -06:00
|
|
|
primary, ok := hm.Hosts[hostinfo.vpnIp]
|
|
|
|
if ok && primary == hostinfo {
|
|
|
|
// The vpnIp pointer points to the same hostinfo as the local index id, we can remove it
|
|
|
|
delete(hm.Hosts, hostinfo.vpnIp)
|
|
|
|
if len(hm.Hosts) == 0 {
|
2024-07-31 09:18:56 -06:00
|
|
|
hm.Hosts = map[netip.Addr]*HostInfo{}
|
2023-03-13 11:35:14 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
if hostinfo.next != nil {
|
|
|
|
// We had more than 1 hostinfo at this vpnip, promote the next in the list to primary
|
|
|
|
hm.Hosts[hostinfo.vpnIp] = hostinfo.next
|
|
|
|
// It is primary, there is no previous hostinfo now
|
|
|
|
hostinfo.next.prev = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// Relink if we were in the middle of multiple hostinfos for this vpn ip
|
|
|
|
if hostinfo.prev != nil {
|
|
|
|
hostinfo.prev.next = hostinfo.next
|
|
|
|
}
|
|
|
|
|
|
|
|
if hostinfo.next != nil {
|
|
|
|
hostinfo.next.prev = hostinfo.prev
|
|
|
|
}
|
2021-03-01 10:40:46 -07:00
|
|
|
}
|
|
|
|
|
2023-03-13 11:35:14 -06:00
|
|
|
hostinfo.next = nil
|
|
|
|
hostinfo.prev = nil
|
|
|
|
|
|
|
|
// The remote index uses index ids outside our control so lets make sure we are only removing
|
|
|
|
// the remote index pointer here if it points to the hostinfo we are deleting
|
|
|
|
hostinfo2, ok := hm.RemoteIndexes[hostinfo.remoteIndexId]
|
|
|
|
if ok && hostinfo2 == hostinfo {
|
|
|
|
delete(hm.RemoteIndexes, hostinfo.remoteIndexId)
|
|
|
|
if len(hm.RemoteIndexes) == 0 {
|
|
|
|
hm.RemoteIndexes = map[uint32]*HostInfo{}
|
|
|
|
}
|
2020-11-23 12:51:16 -07:00
|
|
|
}
|
2023-03-13 11:35:14 -06:00
|
|
|
|
2020-11-23 12:51:16 -07:00
|
|
|
delete(hm.Indexes, hostinfo.localIndexId)
|
|
|
|
if len(hm.Indexes) == 0 {
|
|
|
|
hm.Indexes = map[uint32]*HostInfo{}
|
|
|
|
}
|
|
|
|
|
2021-03-26 08:46:30 -06:00
|
|
|
if hm.l.Level >= logrus.DebugLevel {
|
2023-07-24 11:37:52 -06:00
|
|
|
hm.l.WithField("hostMap", m{"mapTotalSize": len(hm.Hosts),
|
2021-11-03 19:54:04 -06:00
|
|
|
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
|
2020-11-23 12:51:16 -07:00
|
|
|
Debug("Hostmap hostInfo deleted")
|
|
|
|
}
|
2023-05-04 14:16:37 -06:00
|
|
|
|
|
|
|
for _, localRelayIdx := range hostinfo.relayState.CopyRelayForIdxs() {
|
|
|
|
delete(hm.Relays, localRelayIdx)
|
|
|
|
}
|
2020-11-23 12:51:16 -07:00
|
|
|
}
|
|
|
|
|
2023-07-24 11:37:52 -06:00
|
|
|
func (hm *HostMap) QueryIndex(index uint32) *HostInfo {
|
2019-11-19 10:00:20 -07:00
|
|
|
hm.RLock()
|
|
|
|
if h, ok := hm.Indexes[index]; ok {
|
|
|
|
hm.RUnlock()
|
2023-07-24 11:37:52 -06:00
|
|
|
return h
|
2019-11-19 10:00:20 -07:00
|
|
|
} else {
|
|
|
|
hm.RUnlock()
|
2023-07-24 11:37:52 -06:00
|
|
|
return nil
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
}
|
2023-03-30 10:09:20 -06:00
|
|
|
|
2023-07-24 11:37:52 -06:00
|
|
|
func (hm *HostMap) QueryRelayIndex(index uint32) *HostInfo {
|
2022-06-21 12:35:23 -06:00
|
|
|
hm.RLock()
|
|
|
|
if h, ok := hm.Relays[index]; ok {
|
|
|
|
hm.RUnlock()
|
2023-07-24 11:37:52 -06:00
|
|
|
return h
|
2022-06-21 12:35:23 -06:00
|
|
|
} else {
|
|
|
|
hm.RUnlock()
|
2023-07-24 11:37:52 -06:00
|
|
|
return nil
|
2022-06-21 12:35:23 -06:00
|
|
|
}
|
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2023-07-24 11:37:52 -06:00
|
|
|
func (hm *HostMap) QueryReverseIndex(index uint32) *HostInfo {
|
2019-11-19 10:00:20 -07:00
|
|
|
hm.RLock()
|
2020-11-23 12:51:16 -07:00
|
|
|
if h, ok := hm.RemoteIndexes[index]; ok {
|
|
|
|
hm.RUnlock()
|
2023-07-24 11:37:52 -06:00
|
|
|
return h
|
2020-11-23 12:51:16 -07:00
|
|
|
} else {
|
|
|
|
hm.RUnlock()
|
2023-07-24 11:37:52 -06:00
|
|
|
return nil
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
func (hm *HostMap) QueryVpnIp(vpnIp netip.Addr) *HostInfo {
|
2021-11-03 19:54:04 -06:00
|
|
|
return hm.queryVpnIp(vpnIp, nil)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp netip.Addr) (*HostInfo, *Relay, error) {
|
2023-05-04 14:16:37 -06:00
|
|
|
hm.RLock()
|
|
|
|
defer hm.RUnlock()
|
|
|
|
|
|
|
|
h, ok := hm.Hosts[relayHostIp]
|
|
|
|
if !ok {
|
|
|
|
return nil, nil, errors.New("unable to find host")
|
|
|
|
}
|
|
|
|
for h != nil {
|
|
|
|
r, ok := h.relayState.QueryRelayForByIp(targetIp)
|
|
|
|
if ok && r.State == Established {
|
|
|
|
return h, r, nil
|
|
|
|
}
|
|
|
|
h = h.next
|
|
|
|
}
|
|
|
|
return nil, nil, errors.New("unable to find host with relay")
|
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
func (hm *HostMap) queryVpnIp(vpnIp netip.Addr, promoteIfce *Interface) *HostInfo {
|
2019-11-19 10:00:20 -07:00
|
|
|
hm.RLock()
|
|
|
|
if h, ok := hm.Hosts[vpnIp]; ok {
|
2021-05-05 12:10:55 -06:00
|
|
|
hm.RUnlock()
|
2021-04-14 12:50:09 -06:00
|
|
|
// Do not attempt promotion if you are a lighthouse
|
|
|
|
if promoteIfce != nil && !promoteIfce.lightHouse.amLighthouse {
|
2024-04-03 21:14:51 -06:00
|
|
|
h.TryPromoteBest(hm.GetPreferredRanges(), promoteIfce)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
2023-07-24 11:37:52 -06:00
|
|
|
return h
|
2021-04-14 12:50:09 -06:00
|
|
|
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
2021-05-05 12:10:55 -06:00
|
|
|
|
|
|
|
hm.RUnlock()
|
2023-07-24 11:37:52 -06:00
|
|
|
return nil
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2023-03-13 11:35:14 -06:00
|
|
|
// unlockedAddHostInfo assumes you have a write-lock and will add a hostinfo object to the hostmap Indexes and RemoteIndexes maps.
|
|
|
|
// If an entry exists for the Hosts table (vpnIp -> hostinfo) then the provided hostinfo will be made primary
|
|
|
|
func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
|
2021-03-12 12:16:25 -07:00
|
|
|
if f.serveDns {
|
2021-04-14 12:50:09 -06:00
|
|
|
remoteCert := hostinfo.ConnectionState.peerCert
|
2021-03-12 12:16:25 -07:00
|
|
|
dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String())
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2023-03-13 11:35:14 -06:00
|
|
|
existing := hm.Hosts[hostinfo.vpnIp]
|
2021-11-03 19:54:04 -06:00
|
|
|
hm.Hosts[hostinfo.vpnIp] = hostinfo
|
2023-03-13 11:35:14 -06:00
|
|
|
|
|
|
|
if existing != nil {
|
|
|
|
hostinfo.next = existing
|
|
|
|
existing.prev = hostinfo
|
|
|
|
}
|
|
|
|
|
2021-03-12 12:16:25 -07:00
|
|
|
hm.Indexes[hostinfo.localIndexId] = hostinfo
|
|
|
|
hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2021-03-26 08:46:30 -06:00
|
|
|
if hm.l.Level >= logrus.DebugLevel {
|
2023-07-24 11:37:52 -06:00
|
|
|
hm.l.WithField("hostMap", m{"vpnIp": hostinfo.vpnIp, "mapTotalSize": len(hm.Hosts),
|
2021-11-03 19:54:04 -06:00
|
|
|
"hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "hostId": hostinfo.vpnIp}}).
|
2021-03-12 12:16:25 -07:00
|
|
|
Debug("Hostmap vpnIp added")
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
2023-03-13 11:35:14 -06:00
|
|
|
|
|
|
|
i := 1
|
|
|
|
check := hostinfo
|
|
|
|
for check != nil {
|
|
|
|
if i > MaxHostInfosPerVpnIp {
|
|
|
|
hm.unlockedDeleteHostInfo(check)
|
|
|
|
}
|
|
|
|
check = check.next
|
|
|
|
i++
|
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
func (hm *HostMap) GetPreferredRanges() []netip.Prefix {
|
2024-04-03 21:14:51 -06:00
|
|
|
//NOTE: if preferredRanges is ever not stored before a load this will fail to dereference a nil pointer
|
|
|
|
return *hm.preferredRanges.Load()
|
2023-07-24 11:37:52 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func (hm *HostMap) ForEachVpnIp(f controlEach) {
|
|
|
|
hm.RLock()
|
|
|
|
defer hm.RUnlock()
|
|
|
|
|
|
|
|
for _, v := range hm.Hosts {
|
|
|
|
f(v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hm *HostMap) ForEachIndex(f controlEach) {
|
|
|
|
hm.RLock()
|
|
|
|
defer hm.RUnlock()
|
|
|
|
|
|
|
|
for _, v := range hm.Indexes {
|
|
|
|
f(v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-14 12:50:09 -06:00
|
|
|
// TryPromoteBest handles re-querying lighthouses and probing for better paths
|
|
|
|
// NOTE: It is an error to call this if you are a lighthouse since they should not roam clients!
|
2024-07-31 09:18:56 -06:00
|
|
|
func (i *HostInfo) TryPromoteBest(preferredRanges []netip.Prefix, ifce *Interface) {
|
2022-10-31 11:37:41 -06:00
|
|
|
c := i.promoteCounter.Add(1)
|
2023-08-08 12:26:41 -06:00
|
|
|
if c%ifce.tryPromoteEvery.Load() == 0 {
|
2022-06-21 12:35:23 -06:00
|
|
|
remote := i.remote
|
2021-05-05 12:10:55 -06:00
|
|
|
|
2019-11-19 10:00:20 -07:00
|
|
|
// return early if we are already on a preferred remote
|
2024-07-31 09:18:56 -06:00
|
|
|
if remote.IsValid() {
|
|
|
|
rIP := remote.Addr()
|
2022-06-21 12:35:23 -06:00
|
|
|
for _, l := range preferredRanges {
|
|
|
|
if l.Contains(rIP) {
|
|
|
|
return
|
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
i.remotes.ForEach(preferredRanges, func(addr netip.AddrPort, preferred bool) {
|
|
|
|
if remote.IsValid() && (!addr.IsValid() || !preferred) {
|
2021-04-14 12:50:09 -06:00
|
|
|
return
|
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
|
|
|
|
// Try to send a test packet to that host, this should
|
|
|
|
// cause it to detect a roaming event and switch remotes
|
2022-06-21 12:35:23 -06:00
|
|
|
ifce.sendTo(header.Test, header.TestRequest, i.ConnectionState, i, addr, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
2021-04-14 12:50:09 -06:00
|
|
|
})
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2021-04-14 12:50:09 -06:00
|
|
|
// Re query our lighthouses for new remotes occasionally
|
2023-08-08 12:26:41 -06:00
|
|
|
if c%ifce.reQueryEvery.Load() == 0 && ifce.lightHouse != nil {
|
|
|
|
now := time.Now().UnixNano()
|
|
|
|
if now < i.nextLHQuery.Load() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
i.nextLHQuery.Store(now + ifce.reQueryWait.Load())
|
2023-12-19 10:58:31 -07:00
|
|
|
ifce.lightHouse.QueryServer(i.vpnIp)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *HostInfo) GetCert() *cert.NebulaCertificate {
|
|
|
|
if i.ConnectionState != nil {
|
|
|
|
return i.ConnectionState.peerCert
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
func (i *HostInfo) SetRemote(remote netip.AddrPort) {
|
2021-04-14 12:50:09 -06:00
|
|
|
// We copy here because we likely got this remote from a source that reuses the object
|
2024-07-31 09:18:56 -06:00
|
|
|
if i.remote != remote {
|
|
|
|
i.remote = remote
|
|
|
|
i.remotes.LearnRemote(i.vpnIp, remote)
|
2021-03-31 09:26:35 -06:00
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2021-10-19 08:53:55 -06:00
|
|
|
// SetRemoteIfPreferred returns true if the remote was changed. The lastRoam
|
|
|
|
// time on the HostInfo will also be updated.
|
2024-07-31 09:18:56 -06:00
|
|
|
func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote netip.AddrPort) bool {
|
|
|
|
if !newRemote.IsValid() {
|
2022-06-21 12:35:23 -06:00
|
|
|
// relays have nil udp Addrs
|
|
|
|
return false
|
|
|
|
}
|
2021-10-19 08:53:55 -06:00
|
|
|
currentRemote := i.remote
|
2024-07-31 09:18:56 -06:00
|
|
|
if !currentRemote.IsValid() {
|
2021-10-19 08:53:55 -06:00
|
|
|
i.SetRemote(newRemote)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: We do this loop here instead of calling `isPreferred` in
|
|
|
|
// remote_list.go so that we only have to loop over preferredRanges once.
|
|
|
|
newIsPreferred := false
|
2024-04-03 21:14:51 -06:00
|
|
|
for _, l := range hm.GetPreferredRanges() {
|
2021-10-19 08:53:55 -06:00
|
|
|
// return early if we are already on a preferred remote
|
2024-07-31 09:18:56 -06:00
|
|
|
if l.Contains(currentRemote.Addr()) {
|
2021-10-19 08:53:55 -06:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
if l.Contains(newRemote.Addr()) {
|
2021-10-19 08:53:55 -06:00
|
|
|
newIsPreferred = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if newIsPreferred {
|
|
|
|
// Consider this a roaming event
|
|
|
|
i.lastRoam = time.Now()
|
2024-07-31 09:18:56 -06:00
|
|
|
i.lastRoamRemote = currentRemote
|
2021-10-19 08:53:55 -06:00
|
|
|
|
|
|
|
i.SetRemote(newRemote)
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-11-19 10:00:20 -07:00
|
|
|
func (i *HostInfo) RecvErrorExceeded() bool {
|
2023-11-02 15:53:59 -06:00
|
|
|
if i.recvError.Add(1) >= maxRecvError {
|
|
|
|
return true
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-12-12 09:34:17 -07:00
|
|
|
func (i *HostInfo) CreateRemoteCIDR(c *cert.NebulaCertificate) {
|
2020-03-02 14:21:33 -07:00
|
|
|
if len(c.Details.Ips) == 1 && len(c.Details.Subnets) == 0 {
|
|
|
|
// Simple case, no CIDRTree needed
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
remoteCidr := new(bart.Table[struct{}])
|
2019-12-12 09:34:17 -07:00
|
|
|
for _, ip := range c.Details.Ips {
|
2024-07-31 09:18:56 -06:00
|
|
|
//TODO: IPV6-WORK what to do when ip is invalid?
|
|
|
|
nip, _ := netip.AddrFromSlice(ip.IP)
|
|
|
|
nip = nip.Unmap()
|
|
|
|
bits, _ := ip.Mask.Size()
|
|
|
|
remoteCidr.Insert(netip.PrefixFrom(nip, bits), struct{}{})
|
2019-12-12 09:34:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, n := range c.Details.Subnets {
|
2024-07-31 09:18:56 -06:00
|
|
|
//TODO: IPV6-WORK what to do when ip is invalid?
|
|
|
|
nip, _ := netip.AddrFromSlice(n.IP)
|
|
|
|
nip = nip.Unmap()
|
|
|
|
bits, _ := n.Mask.Size()
|
|
|
|
remoteCidr.Insert(netip.PrefixFrom(nip, bits), struct{}{})
|
2019-12-12 09:34:17 -07:00
|
|
|
}
|
|
|
|
i.remoteCidr = remoteCidr
|
|
|
|
}
|
|
|
|
|
2021-03-26 08:46:30 -06:00
|
|
|
func (i *HostInfo) logger(l *logrus.Logger) *logrus.Entry {
|
2020-04-06 12:34:00 -06:00
|
|
|
if i == nil {
|
|
|
|
return logrus.NewEntry(l)
|
|
|
|
}
|
|
|
|
|
2023-02-13 13:41:05 -07:00
|
|
|
li := l.WithField("vpnIp", i.vpnIp).
|
|
|
|
WithField("localIndex", i.localIndexId).
|
|
|
|
WithField("remoteIndex", i.remoteIndexId)
|
|
|
|
|
2020-04-06 12:34:00 -06:00
|
|
|
if connState := i.ConnectionState; connState != nil {
|
|
|
|
if peerCert := connState.peerCert; peerCert != nil {
|
|
|
|
li = li.WithField("certName", peerCert.Details.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return li
|
|
|
|
}
|
|
|
|
|
2019-11-19 10:00:20 -07:00
|
|
|
// Utility functions
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
func localIps(l *logrus.Logger, allowList *LocalAllowList) []netip.Addr {
|
2019-11-19 10:00:20 -07:00
|
|
|
//FIXME: This function is pretty garbage
|
2024-07-31 09:18:56 -06:00
|
|
|
var ips []netip.Addr
|
2019-11-19 10:00:20 -07:00
|
|
|
ifaces, _ := net.Interfaces()
|
|
|
|
for _, i := range ifaces {
|
Add lighthouse.{remoteAllowList,localAllowList} (#217)
These settings make it possible to blacklist / whitelist IP addresses
that are used for remote connections.
`lighthouse.remoteAllowList` filters which remote IPs are allow when
fetching from the lighthouse (or, if you are the lighthouse, which IPs
you store and forward to querying hosts). By default, any remote IPs are
allowed. You can provide CIDRs here with `true` to allow and `false` to
deny. The most specific CIDR rule applies to each remote. If all rules
are "allow", the default will be "deny", and vice-versa. If both "allow"
and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0"
as the default.
lighthouse:
remoteAllowList:
# Example to block IPs from this subnet from being used for remote IPs.
"172.16.0.0/12": false
# A more complicated example, allow public IPs but only private IPs from a specific subnet
"0.0.0.0/0": true
"10.0.0.0/8": false
"10.42.42.0/24": true
`lighthouse.localAllowList` has the same logic as above, but it applies
to the local addresses we advertise to the lighthouse. Additionally, you
can specify an `interfaces` map of regular expressions to match against
interface names. The regexp must match the entire name. All interface
rules must be either true or false (and the default rule will be the
inverse). CIDR rules are matched after interface name rules.
Default is all local IP addresses.
lighthouse:
localAllowList:
# Example to blacklist docker interfaces.
interfaces:
'docker.*': false
# Example to only advertise IPs in this subnet to the lighthouse.
"10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
|
|
|
allow := allowList.AllowName(i.Name)
|
2021-03-31 09:26:35 -06:00
|
|
|
if l.Level >= logrus.TraceLevel {
|
|
|
|
l.WithField("interfaceName", i.Name).WithField("allow", allow).Trace("localAllowList.AllowName")
|
|
|
|
}
|
|
|
|
|
Add lighthouse.{remoteAllowList,localAllowList} (#217)
These settings make it possible to blacklist / whitelist IP addresses
that are used for remote connections.
`lighthouse.remoteAllowList` filters which remote IPs are allow when
fetching from the lighthouse (or, if you are the lighthouse, which IPs
you store and forward to querying hosts). By default, any remote IPs are
allowed. You can provide CIDRs here with `true` to allow and `false` to
deny. The most specific CIDR rule applies to each remote. If all rules
are "allow", the default will be "deny", and vice-versa. If both "allow"
and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0"
as the default.
lighthouse:
remoteAllowList:
# Example to block IPs from this subnet from being used for remote IPs.
"172.16.0.0/12": false
# A more complicated example, allow public IPs but only private IPs from a specific subnet
"0.0.0.0/0": true
"10.0.0.0/8": false
"10.42.42.0/24": true
`lighthouse.localAllowList` has the same logic as above, but it applies
to the local addresses we advertise to the lighthouse. Additionally, you
can specify an `interfaces` map of regular expressions to match against
interface names. The regexp must match the entire name. All interface
rules must be either true or false (and the default rule will be the
inverse). CIDR rules are matched after interface name rules.
Default is all local IP addresses.
lighthouse:
localAllowList:
# Example to blacklist docker interfaces.
interfaces:
'docker.*': false
# Example to only advertise IPs in this subnet to the lighthouse.
"10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
|
|
|
if !allow {
|
|
|
|
continue
|
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
addrs, _ := i.Addrs()
|
|
|
|
for _, addr := range addrs {
|
|
|
|
var ip net.IP
|
|
|
|
switch v := addr.(type) {
|
|
|
|
case *net.IPNet:
|
|
|
|
//continue
|
|
|
|
ip = v.IP
|
|
|
|
case *net.IPAddr:
|
|
|
|
ip = v.IP
|
|
|
|
}
|
2021-03-18 19:37:24 -06:00
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
nip, ok := netip.AddrFromSlice(ip)
|
|
|
|
if !ok {
|
|
|
|
if l.Level >= logrus.DebugLevel {
|
|
|
|
l.WithField("localIp", ip).Debug("ip was invalid for netip")
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
nip = nip.Unmap()
|
|
|
|
|
2021-03-18 19:37:24 -06:00
|
|
|
//TODO: Filtering out link local for now, this is probably the most correct thing
|
|
|
|
//TODO: Would be nice to filter out SLAAC MAC based ips as well
|
2024-07-31 09:18:56 -06:00
|
|
|
if nip.IsLoopback() == false && nip.IsLinkLocalUnicast() == false {
|
|
|
|
allow := allowList.Allow(nip)
|
2021-03-31 09:26:35 -06:00
|
|
|
if l.Level >= logrus.TraceLevel {
|
2024-07-31 09:18:56 -06:00
|
|
|
l.WithField("localIp", nip).WithField("allow", allow).Trace("localAllowList.Allow")
|
2021-03-31 09:26:35 -06:00
|
|
|
}
|
Add lighthouse.{remoteAllowList,localAllowList} (#217)
These settings make it possible to blacklist / whitelist IP addresses
that are used for remote connections.
`lighthouse.remoteAllowList` filters which remote IPs are allow when
fetching from the lighthouse (or, if you are the lighthouse, which IPs
you store and forward to querying hosts). By default, any remote IPs are
allowed. You can provide CIDRs here with `true` to allow and `false` to
deny. The most specific CIDR rule applies to each remote. If all rules
are "allow", the default will be "deny", and vice-versa. If both "allow"
and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0"
as the default.
lighthouse:
remoteAllowList:
# Example to block IPs from this subnet from being used for remote IPs.
"172.16.0.0/12": false
# A more complicated example, allow public IPs but only private IPs from a specific subnet
"0.0.0.0/0": true
"10.0.0.0/8": false
"10.42.42.0/24": true
`lighthouse.localAllowList` has the same logic as above, but it applies
to the local addresses we advertise to the lighthouse. Additionally, you
can specify an `interfaces` map of regular expressions to match against
interface names. The regexp must match the entire name. All interface
rules must be either true or false (and the default rule will be the
inverse). CIDR rules are matched after interface name rules.
Default is all local IP addresses.
lighthouse:
localAllowList:
# Example to blacklist docker interfaces.
interfaces:
'docker.*': false
# Example to only advertise IPs in this subnet to the lighthouse.
"10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
|
|
|
if !allow {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-07-31 09:18:56 -06:00
|
|
|
ips = append(ips, nip)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-07-31 09:18:56 -06:00
|
|
|
return ips
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|