nebula/lighthouse.go

1159 lines
32 KiB
Go
Raw Permalink Normal View History

2019-11-19 10:00:20 -07:00
package nebula
import (
"context"
2021-04-01 09:23:31 -06:00
"encoding/binary"
"errors"
2019-11-19 10:00:20 -07:00
"fmt"
"net"
"net/netip"
2019-11-19 10:00:20 -07:00
"sync"
"sync/atomic"
2019-11-19 10:00:20 -07:00
"time"
"github.com/rcrowley/go-metrics"
2021-03-18 19:37:24 -06:00
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cidr"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
"github.com/slackhq/nebula/util"
2019-11-19 10:00:20 -07:00
)
//TODO: if a lighthouse doesn't have an answer, clients AGGRESSIVELY REQUERY.. why? handshake manager and/or getOrHandshake?
2021-03-31 16:32:02 -06:00
//TODO: nodes are roaming lighthouses, this is bad. How are they learning?
var ErrHostNotKnown = errors.New("host not known")
type netIpAndPort struct {
ip net.IP
port uint16
}
2019-11-19 10:00:20 -07:00
type LightHouse struct {
2021-03-31 16:32:02 -06:00
//TODO: We need a timer wheel to kick out vpnIps that haven't reported in a long time
2019-11-19 10:00:20 -07:00
sync.RWMutex //Because we concurrently read and write to our maps
ctx context.Context
2019-11-19 10:00:20 -07:00
amLighthouse bool
myVpnIp iputil.VpnIp
myVpnZeros iputil.VpnIp
myVpnNet *net.IPNet
2023-06-14 09:48:52 -06:00
punchConn udp.Conn
punchy *Punchy
2019-11-19 10:00:20 -07:00
// Local cache of answers from light houses
// map of vpn Ip to answers
addrMap map[iputil.VpnIp]*RemoteList
2019-11-19 10:00:20 -07:00
Add lighthouse.{remoteAllowList,localAllowList} (#217) These settings make it possible to blacklist / whitelist IP addresses that are used for remote connections. `lighthouse.remoteAllowList` filters which remote IPs are allow when fetching from the lighthouse (or, if you are the lighthouse, which IPs you store and forward to querying hosts). By default, any remote IPs are allowed. You can provide CIDRs here with `true` to allow and `false` to deny. The most specific CIDR rule applies to each remote. If all rules are "allow", the default will be "deny", and vice-versa. If both "allow" and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the default. lighthouse: remoteAllowList: # Example to block IPs from this subnet from being used for remote IPs. "172.16.0.0/12": false # A more complicated example, allow public IPs but only private IPs from a specific subnet "0.0.0.0/0": true "10.0.0.0/8": false "10.42.42.0/24": true `lighthouse.localAllowList` has the same logic as above, but it applies to the local addresses we advertise to the lighthouse. Additionally, you can specify an `interfaces` map of regular expressions to match against interface names. The regexp must match the entire name. All interface rules must be either true or false (and the default rule will be the inverse). CIDR rules are matched after interface name rules. Default is all local IP addresses. lighthouse: localAllowList: # Example to blacklist docker interfaces. interfaces: 'docker.*': false # Example to only advertise IPs in this subnet to the lighthouse. "10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
// filters remote addresses allowed for each host
// - When we are a lighthouse, this filters what addresses we store and
// respond with.
// - When we are not a lighthouse, this filters which addresses we accept
// from lighthouses.
remoteAllowList atomic.Pointer[RemoteAllowList]
Add lighthouse.{remoteAllowList,localAllowList} (#217) These settings make it possible to blacklist / whitelist IP addresses that are used for remote connections. `lighthouse.remoteAllowList` filters which remote IPs are allow when fetching from the lighthouse (or, if you are the lighthouse, which IPs you store and forward to querying hosts). By default, any remote IPs are allowed. You can provide CIDRs here with `true` to allow and `false` to deny. The most specific CIDR rule applies to each remote. If all rules are "allow", the default will be "deny", and vice-versa. If both "allow" and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the default. lighthouse: remoteAllowList: # Example to block IPs from this subnet from being used for remote IPs. "172.16.0.0/12": false # A more complicated example, allow public IPs but only private IPs from a specific subnet "0.0.0.0/0": true "10.0.0.0/8": false "10.42.42.0/24": true `lighthouse.localAllowList` has the same logic as above, but it applies to the local addresses we advertise to the lighthouse. Additionally, you can specify an `interfaces` map of regular expressions to match against interface names. The regexp must match the entire name. All interface rules must be either true or false (and the default rule will be the inverse). CIDR rules are matched after interface name rules. Default is all local IP addresses. lighthouse: localAllowList: # Example to blacklist docker interfaces. interfaces: 'docker.*': false # Example to only advertise IPs in this subnet to the lighthouse. "10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
// filters local addresses that we advertise to lighthouses
localAllowList atomic.Pointer[LocalAllowList]
Add lighthouse.{remoteAllowList,localAllowList} (#217) These settings make it possible to blacklist / whitelist IP addresses that are used for remote connections. `lighthouse.remoteAllowList` filters which remote IPs are allow when fetching from the lighthouse (or, if you are the lighthouse, which IPs you store and forward to querying hosts). By default, any remote IPs are allowed. You can provide CIDRs here with `true` to allow and `false` to deny. The most specific CIDR rule applies to each remote. If all rules are "allow", the default will be "deny", and vice-versa. If both "allow" and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the default. lighthouse: remoteAllowList: # Example to block IPs from this subnet from being used for remote IPs. "172.16.0.0/12": false # A more complicated example, allow public IPs but only private IPs from a specific subnet "0.0.0.0/0": true "10.0.0.0/8": false "10.42.42.0/24": true `lighthouse.localAllowList` has the same logic as above, but it applies to the local addresses we advertise to the lighthouse. Additionally, you can specify an `interfaces` map of regular expressions to match against interface names. The regexp must match the entire name. All interface rules must be either true or false (and the default rule will be the inverse). CIDR rules are matched after interface name rules. Default is all local IP addresses. lighthouse: localAllowList: # Example to blacklist docker interfaces. interfaces: 'docker.*': false # Example to only advertise IPs in this subnet to the lighthouse. "10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
// used to trigger the HandshakeManager when we receive HostQueryReply
handshakeTrigger chan<- iputil.VpnIp
// staticList exists to avoid having a bool in each addrMap entry
2019-11-19 10:00:20 -07:00
// since static should be rare
staticList atomic.Pointer[map[iputil.VpnIp]struct{}]
lighthouses atomic.Pointer[map[iputil.VpnIp]struct{}]
interval atomic.Int64
updateCancel context.CancelFunc
ifce EncWriter
nebulaPort uint32 // 32 bits because protobuf does not have a uint16
advertiseAddrs atomic.Pointer[[]netIpAndPort]
// IP's of relays that can be used by peers to access me
relaysForMe atomic.Pointer[[]iputil.VpnIp]
queryChan chan iputil.VpnIp
calculatedRemotes atomic.Pointer[cidr.Tree4[[]*calculatedRemote]] // Maps VpnIp to []*calculatedRemote
metrics *MessageMetrics
metricHolepunchTx metrics.Counter
2021-03-26 08:46:30 -06:00
l *logrus.Logger
2019-11-19 10:00:20 -07:00
}
// NewLightHouseFromConfig will build a Lighthouse struct from the values provided in the config object
// addrMap should be nil unless this is during a config reload
2023-06-14 09:48:52 -06:00
func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C, myVpnNet *net.IPNet, pc udp.Conn, p *Punchy) (*LightHouse, error) {
amLighthouse := c.GetBool("lighthouse.am_lighthouse", false)
nebulaPort := uint32(c.GetInt("listen.port", 0))
if amLighthouse && nebulaPort == 0 {
return nil, util.NewContextualError("lighthouse.am_lighthouse enabled on node but no port number is set in config", nil, nil)
}
// If port is dynamic, discover it
if nebulaPort == 0 && pc != nil {
uPort, err := pc.LocalAddr()
if err != nil {
return nil, util.NewContextualError("Failed to get listening port", nil, err)
}
nebulaPort = uint32(uPort.Port)
}
ones, _ := myVpnNet.Mask.Size()
2019-11-19 10:00:20 -07:00
h := LightHouse{
ctx: ctx,
amLighthouse: amLighthouse,
myVpnIp: iputil.Ip2VpnIp(myVpnNet.IP),
myVpnZeros: iputil.VpnIp(32 - ones),
myVpnNet: myVpnNet,
addrMap: make(map[iputil.VpnIp]*RemoteList),
nebulaPort: nebulaPort,
punchConn: pc,
punchy: p,
queryChan: make(chan iputil.VpnIp, c.GetUint32("handshakes.query_buffer", 64)),
l: l,
}
lighthouses := make(map[iputil.VpnIp]struct{})
h.lighthouses.Store(&lighthouses)
staticList := make(map[iputil.VpnIp]struct{})
h.staticList.Store(&staticList)
if c.GetBool("stats.lighthouse_metrics", false) {
h.metrics = newLighthouseMetrics()
h.metricHolepunchTx = metrics.GetOrRegisterCounter("messages.tx.holepunch", nil)
} else {
h.metricHolepunchTx = metrics.NilCounter{}
}
err := h.reload(c, true)
if err != nil {
return nil, err
2019-11-19 10:00:20 -07:00
}
c.RegisterReloadCallback(func(c *config.C) {
err := h.reload(c, false)
switch v := err.(type) {
case *util.ContextualError:
v.Log(l)
case error:
l.WithError(err).Error("failed to reload lighthouse")
}
})
h.startQueryWorker()
return &h, nil
2019-11-19 10:00:20 -07:00
}
func (lh *LightHouse) GetStaticHostList() map[iputil.VpnIp]struct{} {
return *lh.staticList.Load()
}
Add lighthouse.{remoteAllowList,localAllowList} (#217) These settings make it possible to blacklist / whitelist IP addresses that are used for remote connections. `lighthouse.remoteAllowList` filters which remote IPs are allow when fetching from the lighthouse (or, if you are the lighthouse, which IPs you store and forward to querying hosts). By default, any remote IPs are allowed. You can provide CIDRs here with `true` to allow and `false` to deny. The most specific CIDR rule applies to each remote. If all rules are "allow", the default will be "deny", and vice-versa. If both "allow" and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the default. lighthouse: remoteAllowList: # Example to block IPs from this subnet from being used for remote IPs. "172.16.0.0/12": false # A more complicated example, allow public IPs but only private IPs from a specific subnet "0.0.0.0/0": true "10.0.0.0/8": false "10.42.42.0/24": true `lighthouse.localAllowList` has the same logic as above, but it applies to the local addresses we advertise to the lighthouse. Additionally, you can specify an `interfaces` map of regular expressions to match against interface names. The regexp must match the entire name. All interface rules must be either true or false (and the default rule will be the inverse). CIDR rules are matched after interface name rules. Default is all local IP addresses. lighthouse: localAllowList: # Example to blacklist docker interfaces. interfaces: 'docker.*': false # Example to only advertise IPs in this subnet to the lighthouse. "10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
func (lh *LightHouse) GetLighthouses() map[iputil.VpnIp]struct{} {
return *lh.lighthouses.Load()
Add lighthouse.{remoteAllowList,localAllowList} (#217) These settings make it possible to blacklist / whitelist IP addresses that are used for remote connections. `lighthouse.remoteAllowList` filters which remote IPs are allow when fetching from the lighthouse (or, if you are the lighthouse, which IPs you store and forward to querying hosts). By default, any remote IPs are allowed. You can provide CIDRs here with `true` to allow and `false` to deny. The most specific CIDR rule applies to each remote. If all rules are "allow", the default will be "deny", and vice-versa. If both "allow" and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the default. lighthouse: remoteAllowList: # Example to block IPs from this subnet from being used for remote IPs. "172.16.0.0/12": false # A more complicated example, allow public IPs but only private IPs from a specific subnet "0.0.0.0/0": true "10.0.0.0/8": false "10.42.42.0/24": true `lighthouse.localAllowList` has the same logic as above, but it applies to the local addresses we advertise to the lighthouse. Additionally, you can specify an `interfaces` map of regular expressions to match against interface names. The regexp must match the entire name. All interface rules must be either true or false (and the default rule will be the inverse). CIDR rules are matched after interface name rules. Default is all local IP addresses. lighthouse: localAllowList: # Example to blacklist docker interfaces. interfaces: 'docker.*': false # Example to only advertise IPs in this subnet to the lighthouse. "10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
}
func (lh *LightHouse) GetRemoteAllowList() *RemoteAllowList {
return lh.remoteAllowList.Load()
}
func (lh *LightHouse) GetLocalAllowList() *LocalAllowList {
return lh.localAllowList.Load()
}
func (lh *LightHouse) GetAdvertiseAddrs() []netIpAndPort {
return *lh.advertiseAddrs.Load()
}
func (lh *LightHouse) GetRelaysForMe() []iputil.VpnIp {
return *lh.relaysForMe.Load()
}
func (lh *LightHouse) getCalculatedRemotes() *cidr.Tree4[[]*calculatedRemote] {
return lh.calculatedRemotes.Load()
}
func (lh *LightHouse) GetUpdateInterval() int64 {
return lh.interval.Load()
}
func (lh *LightHouse) reload(c *config.C, initial bool) error {
if initial || c.HasChanged("lighthouse.advertise_addrs") {
rawAdvAddrs := c.GetStringSlice("lighthouse.advertise_addrs", []string{})
advAddrs := make([]netIpAndPort, 0)
for i, rawAddr := range rawAdvAddrs {
fIp, fPort, err := udp.ParseIPAndPort(rawAddr)
if err != nil {
return util.NewContextualError("Unable to parse lighthouse.advertise_addrs entry", m{"addr": rawAddr, "entry": i + 1}, err)
}
if fPort == 0 {
fPort = uint16(lh.nebulaPort)
}
if ip4 := fIp.To4(); ip4 != nil && lh.myVpnNet.Contains(fIp) {
lh.l.WithField("addr", rawAddr).WithField("entry", i+1).
Warn("Ignoring lighthouse.advertise_addrs report because it is within the nebula network range")
continue
}
advAddrs = append(advAddrs, netIpAndPort{ip: fIp, port: fPort})
}
lh.advertiseAddrs.Store(&advAddrs)
if !initial {
lh.l.Info("lighthouse.advertise_addrs has changed")
}
}
if initial || c.HasChanged("lighthouse.interval") {
lh.interval.Store(int64(c.GetInt("lighthouse.interval", 10)))
if !initial {
lh.l.Infof("lighthouse.interval changed to %v", lh.interval.Load())
if lh.updateCancel != nil {
// May not always have a running routine
lh.updateCancel()
}
lh.StartUpdateWorker()
}
}
if initial || c.HasChanged("lighthouse.remote_allow_list") || c.HasChanged("lighthouse.remote_allow_ranges") {
ral, err := NewRemoteAllowListFromConfig(c, "lighthouse.remote_allow_list", "lighthouse.remote_allow_ranges")
if err != nil {
return util.NewContextualError("Invalid lighthouse.remote_allow_list", nil, err)
}
lh.remoteAllowList.Store(ral)
if !initial {
//TODO: a diff will be annoyingly difficult
lh.l.Info("lighthouse.remote_allow_list and/or lighthouse.remote_allow_ranges has changed")
}
}
if initial || c.HasChanged("lighthouse.local_allow_list") {
lal, err := NewLocalAllowListFromConfig(c, "lighthouse.local_allow_list")
if err != nil {
return util.NewContextualError("Invalid lighthouse.local_allow_list", nil, err)
}
lh.localAllowList.Store(lal)
if !initial {
//TODO: a diff will be annoyingly difficult
lh.l.Info("lighthouse.local_allow_list has changed")
}
}
if initial || c.HasChanged("lighthouse.calculated_remotes") {
cr, err := NewCalculatedRemotesFromConfig(c, "lighthouse.calculated_remotes")
if err != nil {
return util.NewContextualError("Invalid lighthouse.calculated_remotes", nil, err)
}
lh.calculatedRemotes.Store(cr)
if !initial {
//TODO: a diff will be annoyingly difficult
lh.l.Info("lighthouse.calculated_remotes has changed")
}
}
//NOTE: many things will get much simpler when we combine static_host_map and lighthouse.hosts in config
if initial || c.HasChanged("static_host_map") || c.HasChanged("static_map.cadence") || c.HasChanged("static_map.network") || c.HasChanged("static_map.lookup_timeout") {
// Clean up. Entries still in the static_host_map will be re-built.
// Entries no longer present must have their (possible) background DNS goroutines stopped.
if existingStaticList := lh.staticList.Load(); existingStaticList != nil {
lh.RLock()
for staticVpnIp := range *existingStaticList {
if am, ok := lh.addrMap[staticVpnIp]; ok && am != nil {
am.hr.Cancel()
}
}
lh.RUnlock()
}
// Build a new list based on current config.
staticList := make(map[iputil.VpnIp]struct{})
err := lh.loadStaticMap(c, lh.myVpnNet, staticList)
if err != nil {
return err
}
lh.staticList.Store(&staticList)
if !initial {
//TODO: we should remove any remote list entries for static hosts that were removed/modified?
if c.HasChanged("static_host_map") {
lh.l.Info("static_host_map has changed")
}
if c.HasChanged("static_map.cadence") {
lh.l.Info("static_map.cadence has changed")
}
if c.HasChanged("static_map.network") {
lh.l.Info("static_map.network has changed")
}
if c.HasChanged("static_map.lookup_timeout") {
lh.l.Info("static_map.lookup_timeout has changed")
}
}
}
if initial || c.HasChanged("lighthouse.hosts") {
lhMap := make(map[iputil.VpnIp]struct{})
err := lh.parseLighthouses(c, lh.myVpnNet, lhMap)
if err != nil {
return err
}
lh.lighthouses.Store(&lhMap)
if !initial {
//NOTE: we are not tearing down existing lighthouse connections because they might be used for non lighthouse traffic
lh.l.Info("lighthouse.hosts has changed")
}
}
if initial || c.HasChanged("relay.relays") {
switch c.GetBool("relay.am_relay", false) {
case true:
// Relays aren't allowed to specify other relays
if len(c.GetStringSlice("relay.relays", nil)) > 0 {
lh.l.Info("Ignoring relays from config because am_relay is true")
}
relaysForMe := []iputil.VpnIp{}
lh.relaysForMe.Store(&relaysForMe)
case false:
relaysForMe := []iputil.VpnIp{}
for _, v := range c.GetStringSlice("relay.relays", nil) {
2023-03-30 14:07:31 -06:00
lh.l.WithField("relay", v).Info("Read relay from config")
configRIP := net.ParseIP(v)
if configRIP != nil {
relaysForMe = append(relaysForMe, iputil.Ip2VpnIp(configRIP))
}
}
lh.relaysForMe.Store(&relaysForMe)
}
}
return nil
}
func (lh *LightHouse) parseLighthouses(c *config.C, tunCidr *net.IPNet, lhMap map[iputil.VpnIp]struct{}) error {
lhs := c.GetStringSlice("lighthouse.hosts", []string{})
if lh.amLighthouse && len(lhs) != 0 {
lh.l.Warn("lighthouse.am_lighthouse enabled on node but upstream lighthouses exist in config")
}
for i, host := range lhs {
ip := net.ParseIP(host)
if ip == nil {
return util.NewContextualError("Unable to parse lighthouse host entry", m{"host": host, "entry": i + 1}, nil)
}
if !tunCidr.Contains(ip) {
return util.NewContextualError("lighthouse host is not in our subnet, invalid", m{"vpnIp": ip, "network": tunCidr.String()}, nil)
}
lhMap[iputil.Ip2VpnIp(ip)] = struct{}{}
}
if !lh.amLighthouse && len(lhMap) == 0 {
lh.l.Warn("No lighthouse.hosts configured, this host will only be able to initiate tunnels with static_host_map entries")
}
Add lighthouse.{remoteAllowList,localAllowList} (#217) These settings make it possible to blacklist / whitelist IP addresses that are used for remote connections. `lighthouse.remoteAllowList` filters which remote IPs are allow when fetching from the lighthouse (or, if you are the lighthouse, which IPs you store and forward to querying hosts). By default, any remote IPs are allowed. You can provide CIDRs here with `true` to allow and `false` to deny. The most specific CIDR rule applies to each remote. If all rules are "allow", the default will be "deny", and vice-versa. If both "allow" and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the default. lighthouse: remoteAllowList: # Example to block IPs from this subnet from being used for remote IPs. "172.16.0.0/12": false # A more complicated example, allow public IPs but only private IPs from a specific subnet "0.0.0.0/0": true "10.0.0.0/8": false "10.42.42.0/24": true `lighthouse.localAllowList` has the same logic as above, but it applies to the local addresses we advertise to the lighthouse. Additionally, you can specify an `interfaces` map of regular expressions to match against interface names. The regexp must match the entire name. All interface rules must be either true or false (and the default rule will be the inverse). CIDR rules are matched after interface name rules. Default is all local IP addresses. lighthouse: localAllowList: # Example to blacklist docker interfaces. interfaces: 'docker.*': false # Example to only advertise IPs in this subnet to the lighthouse. "10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
staticList := lh.GetStaticHostList()
for lhIP, _ := range lhMap {
if _, ok := staticList[lhIP]; !ok {
return fmt.Errorf("lighthouse %s does not have a static_host_map entry", lhIP)
}
}
return nil
Add lighthouse.{remoteAllowList,localAllowList} (#217) These settings make it possible to blacklist / whitelist IP addresses that are used for remote connections. `lighthouse.remoteAllowList` filters which remote IPs are allow when fetching from the lighthouse (or, if you are the lighthouse, which IPs you store and forward to querying hosts). By default, any remote IPs are allowed. You can provide CIDRs here with `true` to allow and `false` to deny. The most specific CIDR rule applies to each remote. If all rules are "allow", the default will be "deny", and vice-versa. If both "allow" and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the default. lighthouse: remoteAllowList: # Example to block IPs from this subnet from being used for remote IPs. "172.16.0.0/12": false # A more complicated example, allow public IPs but only private IPs from a specific subnet "0.0.0.0/0": true "10.0.0.0/8": false "10.42.42.0/24": true `lighthouse.localAllowList` has the same logic as above, but it applies to the local addresses we advertise to the lighthouse. Additionally, you can specify an `interfaces` map of regular expressions to match against interface names. The regexp must match the entire name. All interface rules must be either true or false (and the default rule will be the inverse). CIDR rules are matched after interface name rules. Default is all local IP addresses. lighthouse: localAllowList: # Example to blacklist docker interfaces. interfaces: 'docker.*': false # Example to only advertise IPs in this subnet to the lighthouse. "10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
}
func getStaticMapCadence(c *config.C) (time.Duration, error) {
cadence := c.GetString("static_map.cadence", "30s")
d, err := time.ParseDuration(cadence)
if err != nil {
return 0, err
}
return d, nil
}
func getStaticMapLookupTimeout(c *config.C) (time.Duration, error) {
lookupTimeout := c.GetString("static_map.lookup_timeout", "250ms")
d, err := time.ParseDuration(lookupTimeout)
if err != nil {
return 0, err
}
return d, nil
}
func getStaticMapNetwork(c *config.C) (string, error) {
network := c.GetString("static_map.network", "ip4")
if network != "ip" && network != "ip4" && network != "ip6" {
return "", fmt.Errorf("static_map.network must be one of ip, ip4, or ip6")
}
return network, nil
}
func (lh *LightHouse) loadStaticMap(c *config.C, tunCidr *net.IPNet, staticList map[iputil.VpnIp]struct{}) error {
d, err := getStaticMapCadence(c)
if err != nil {
return err
}
network, err := getStaticMapNetwork(c)
if err != nil {
return err
}
lookup_timeout, err := getStaticMapLookupTimeout(c)
if err != nil {
return err
}
shm := c.GetMap("static_host_map", map[interface{}]interface{}{})
i := 0
for k, v := range shm {
rip := net.ParseIP(fmt.Sprintf("%v", k))
if rip == nil {
return util.NewContextualError("Unable to parse static_host_map entry", m{"host": k, "entry": i + 1}, nil)
}
if !tunCidr.Contains(rip) {
return util.NewContextualError("static_host_map key is not in our subnet, invalid", m{"vpnIp": rip, "network": tunCidr.String(), "entry": i + 1}, nil)
}
vpnIp := iputil.Ip2VpnIp(rip)
vals, ok := v.([]interface{})
if !ok {
vals = []interface{}{v}
}
remoteAddrs := []string{}
for _, v := range vals {
remoteAddrs = append(remoteAddrs, fmt.Sprintf("%v", v))
}
err := lh.addStaticRemotes(i, d, network, lookup_timeout, vpnIp, remoteAddrs, staticList)
if err != nil {
return err
}
i++
}
2019-11-23 16:55:23 -07:00
return nil
}
func (lh *LightHouse) Query(ip iputil.VpnIp) *RemoteList {
2019-11-19 10:00:20 -07:00
if !lh.IsLighthouseIP(ip) {
lh.QueryServer(ip)
2019-11-19 10:00:20 -07:00
}
lh.RLock()
if v, ok := lh.addrMap[ip]; ok {
lh.RUnlock()
return v
2019-11-19 10:00:20 -07:00
}
lh.RUnlock()
return nil
2019-11-19 10:00:20 -07:00
}
// QueryServer is asynchronous so no reply should be expected
func (lh *LightHouse) QueryServer(ip iputil.VpnIp) {
// Don't put lighthouse ips in the query channel because we can't query lighthouses about lighthouses
if lh.amLighthouse || lh.IsLighthouseIP(ip) {
return
}
lh.queryChan <- ip
2019-11-19 10:00:20 -07:00
}
func (lh *LightHouse) QueryCache(ip iputil.VpnIp) *RemoteList {
2019-11-19 10:00:20 -07:00
lh.RLock()
if v, ok := lh.addrMap[ip]; ok {
lh.RUnlock()
return v
2019-11-19 10:00:20 -07:00
}
lh.RUnlock()
lh.Lock()
defer lh.Unlock()
// Add an entry if we don't already have one
return lh.unlockedGetRemoteList(ip)
2019-11-19 10:00:20 -07:00
}
// queryAndPrepMessage is a lock helper on RemoteList, assisting the caller to build a lighthouse message containing
// details from the remote list. It looks for a hit in the addrMap and a hit in the RemoteList under the owner vpnIp
// If one is found then f() is called with proper locking, f() must return result of n.MarshalTo()
func (lh *LightHouse) queryAndPrepMessage(vpnIp iputil.VpnIp, f func(*cache) (int, error)) (bool, int, error) {
2021-03-31 16:32:02 -06:00
lh.RLock()
// Do we have an entry in the main cache?
if v, ok := lh.addrMap[vpnIp]; ok {
// Swap lh lock for remote list lock
v.RLock()
defer v.RUnlock()
2021-03-31 16:32:02 -06:00
lh.RUnlock()
// vpnIp should also be the owner here since we are a lighthouse.
c := v.cache[vpnIp]
// Make sure we have
if c != nil {
n, err := f(c)
return true, n, err
}
return false, 0, nil
2021-03-31 16:32:02 -06:00
}
lh.RUnlock()
return false, 0, nil
}
func (lh *LightHouse) DeleteVpnIp(vpnIp iputil.VpnIp) {
2019-11-19 10:00:20 -07:00
// First we check the static mapping
// and do nothing if it is there
if _, ok := lh.GetStaticHostList()[vpnIp]; ok {
2019-11-19 10:00:20 -07:00
return
}
lh.Lock()
//l.Debugln(lh.addrMap)
delete(lh.addrMap, vpnIp)
2021-03-31 16:32:02 -06:00
if lh.l.Level >= logrus.DebugLevel {
lh.l.Debugf("deleting %s from lighthouse.", vpnIp)
2021-03-31 16:32:02 -06:00
}
2019-11-19 10:00:20 -07:00
lh.Unlock()
}
// AddStaticRemote adds a static host entry for vpnIp as ourselves as the owner
// We are the owner because we don't want a lighthouse server to advertise for static hosts it was configured with
// And we don't want a lighthouse query reply to interfere with our learned cache if we are a client
// NOTE: this function should not interact with any hot path objects, like lh.staticList, the caller should handle it
func (lh *LightHouse) addStaticRemotes(i int, d time.Duration, network string, timeout time.Duration, vpnIp iputil.VpnIp, toAddrs []string, staticList map[iputil.VpnIp]struct{}) error {
2019-11-19 10:00:20 -07:00
lh.Lock()
am := lh.unlockedGetRemoteList(vpnIp)
am.Lock()
defer am.Unlock()
ctx := lh.ctx
lh.Unlock()
2021-03-31 16:32:02 -06:00
hr, err := NewHostnameResults(ctx, lh.l, d, network, timeout, toAddrs, func() {
// This callback runs whenever the DNS hostname resolver finds a different set of IP's
// in its resolution for hostnames.
am.Lock()
defer am.Unlock()
am.shouldRebuild = true
})
if err != nil {
return util.NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp, "entry": i + 1}, err)
}
am.unlockedSetHostnamesResults(hr)
for _, addrPort := range hr.GetIPs() {
switch {
case addrPort.Addr().Is4():
to := NewIp4AndPortFromNetIP(addrPort.Addr(), addrPort.Port())
if !lh.unlockedShouldAddV4(vpnIp, to) {
continue
}
am.unlockedPrependV4(lh.myVpnIp, to)
case addrPort.Addr().Is6():
to := NewIp6AndPortFromNetIP(addrPort.Addr(), addrPort.Port())
if !lh.unlockedShouldAddV6(vpnIp, to) {
continue
}
am.unlockedPrependV6(lh.myVpnIp, to)
2021-03-31 16:32:02 -06:00
}
}
// Mark it as static in the caller provided map
staticList[vpnIp] = struct{}{}
return nil
2021-03-31 16:32:02 -06:00
}
// addCalculatedRemotes adds any calculated remotes based on the
// lighthouse.calculated_remotes configuration. It returns true if any
// calculated remotes were added
func (lh *LightHouse) addCalculatedRemotes(vpnIp iputil.VpnIp) bool {
tree := lh.getCalculatedRemotes()
if tree == nil {
return false
}
ok, calculatedRemotes := tree.MostSpecificContains(vpnIp)
if !ok {
return false
}
var calculated []*Ip4AndPort
for _, cr := range calculatedRemotes {
c := cr.Apply(vpnIp)
if c != nil {
calculated = append(calculated, c)
}
}
lh.Lock()
am := lh.unlockedGetRemoteList(vpnIp)
am.Lock()
defer am.Unlock()
lh.Unlock()
am.unlockedSetV4(lh.myVpnIp, vpnIp, calculated, lh.unlockedShouldAddV4)
return len(calculated) > 0
}
// unlockedGetRemoteList assumes you have the lh lock
func (lh *LightHouse) unlockedGetRemoteList(vpnIp iputil.VpnIp) *RemoteList {
am, ok := lh.addrMap[vpnIp]
if !ok {
am = NewRemoteList(func(a netip.Addr) bool { return lh.shouldAdd(vpnIp, a) })
lh.addrMap[vpnIp] = am
2021-03-31 16:32:02 -06:00
}
return am
2021-03-31 16:32:02 -06:00
}
func (lh *LightHouse) shouldAdd(vpnIp iputil.VpnIp, to netip.Addr) bool {
switch {
case to.Is4():
ipBytes := to.As4()
ip := iputil.Ip2VpnIp(ipBytes[:])
allow := lh.GetRemoteAllowList().AllowIpV4(vpnIp, ip)
if lh.l.Level >= logrus.TraceLevel {
lh.l.WithField("remoteIp", vpnIp).WithField("allow", allow).Trace("remoteAllowList.Allow")
}
if !allow || ipMaskContains(lh.myVpnIp, lh.myVpnZeros, ip) {
return false
}
case to.Is6():
ipBytes := to.As16()
hi := binary.BigEndian.Uint64(ipBytes[:8])
lo := binary.BigEndian.Uint64(ipBytes[8:])
allow := lh.GetRemoteAllowList().AllowIpV6(vpnIp, hi, lo)
if lh.l.Level >= logrus.TraceLevel {
lh.l.WithField("remoteIp", to).WithField("allow", allow).Trace("remoteAllowList.Allow")
}
// We don't check our vpn network here because nebula does not support ipv6 on the inside
if !allow {
return false
}
}
return true
}
// unlockedShouldAddV4 checks if to is allowed by our allow list
func (lh *LightHouse) unlockedShouldAddV4(vpnIp iputil.VpnIp, to *Ip4AndPort) bool {
allow := lh.GetRemoteAllowList().AllowIpV4(vpnIp, iputil.VpnIp(to.Ip))
2021-04-01 09:23:31 -06:00
if lh.l.Level >= logrus.TraceLevel {
lh.l.WithField("remoteIp", vpnIp).WithField("allow", allow).Trace("remoteAllowList.Allow")
2019-11-19 10:00:20 -07:00
}
Add lighthouse.{remoteAllowList,localAllowList} (#217) These settings make it possible to blacklist / whitelist IP addresses that are used for remote connections. `lighthouse.remoteAllowList` filters which remote IPs are allow when fetching from the lighthouse (or, if you are the lighthouse, which IPs you store and forward to querying hosts). By default, any remote IPs are allowed. You can provide CIDRs here with `true` to allow and `false` to deny. The most specific CIDR rule applies to each remote. If all rules are "allow", the default will be "deny", and vice-versa. If both "allow" and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the default. lighthouse: remoteAllowList: # Example to block IPs from this subnet from being used for remote IPs. "172.16.0.0/12": false # A more complicated example, allow public IPs but only private IPs from a specific subnet "0.0.0.0/0": true "10.0.0.0/8": false "10.42.42.0/24": true `lighthouse.localAllowList` has the same logic as above, but it applies to the local addresses we advertise to the lighthouse. Additionally, you can specify an `interfaces` map of regular expressions to match against interface names. The regexp must match the entire name. All interface rules must be either true or false (and the default rule will be the inverse). CIDR rules are matched after interface name rules. Default is all local IP addresses. lighthouse: localAllowList: # Example to blacklist docker interfaces. interfaces: 'docker.*': false # Example to only advertise IPs in this subnet to the lighthouse. "10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
if !allow || ipMaskContains(lh.myVpnIp, lh.myVpnZeros, iputil.VpnIp(to.Ip)) {
2021-03-31 16:32:02 -06:00
return false
Add lighthouse.{remoteAllowList,localAllowList} (#217) These settings make it possible to blacklist / whitelist IP addresses that are used for remote connections. `lighthouse.remoteAllowList` filters which remote IPs are allow when fetching from the lighthouse (or, if you are the lighthouse, which IPs you store and forward to querying hosts). By default, any remote IPs are allowed. You can provide CIDRs here with `true` to allow and `false` to deny. The most specific CIDR rule applies to each remote. If all rules are "allow", the default will be "deny", and vice-versa. If both "allow" and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0" as the default. lighthouse: remoteAllowList: # Example to block IPs from this subnet from being used for remote IPs. "172.16.0.0/12": false # A more complicated example, allow public IPs but only private IPs from a specific subnet "0.0.0.0/0": true "10.0.0.0/8": false "10.42.42.0/24": true `lighthouse.localAllowList` has the same logic as above, but it applies to the local addresses we advertise to the lighthouse. Additionally, you can specify an `interfaces` map of regular expressions to match against interface names. The regexp must match the entire name. All interface rules must be either true or false (and the default rule will be the inverse). CIDR rules are matched after interface name rules. Default is all local IP addresses. lighthouse: localAllowList: # Example to blacklist docker interfaces. interfaces: 'docker.*': false # Example to only advertise IPs in this subnet to the lighthouse. "10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
}
2021-03-31 16:32:02 -06:00
return true
}
// unlockedShouldAddV6 checks if to is allowed by our allow list
func (lh *LightHouse) unlockedShouldAddV6(vpnIp iputil.VpnIp, to *Ip6AndPort) bool {
allow := lh.GetRemoteAllowList().AllowIpV6(vpnIp, to.Hi, to.Lo)
2021-04-01 09:23:31 -06:00
if lh.l.Level >= logrus.TraceLevel {
lh.l.WithField("remoteIp", lhIp6ToIp(to)).WithField("allow", allow).Trace("remoteAllowList.Allow")
2021-03-31 16:32:02 -06:00
}
// We don't check our vpn network here because nebula does not support ipv6 on the inside
2021-03-31 16:32:02 -06:00
if !allow {
return false
}
return true
2019-11-19 10:00:20 -07:00
}
2021-04-01 09:23:31 -06:00
func lhIp6ToIp(v *Ip6AndPort) net.IP {
ip := make(net.IP, 16)
binary.BigEndian.PutUint64(ip[:8], v.Hi)
binary.BigEndian.PutUint64(ip[8:], v.Lo)
return ip
}
func (lh *LightHouse) IsLighthouseIP(vpnIp iputil.VpnIp) bool {
if _, ok := lh.GetLighthouses()[vpnIp]; ok {
2019-11-19 10:00:20 -07:00
return true
}
return false
}
func NewLhQueryByInt(VpnIp iputil.VpnIp) *NebulaMeta {
2019-11-19 10:00:20 -07:00
return &NebulaMeta{
Type: NebulaMeta_HostQuery,
Details: &NebulaMetaDetails{
VpnIp: uint32(VpnIp),
2019-11-19 10:00:20 -07:00
},
}
}
2021-03-31 16:32:02 -06:00
func NewIp4AndPort(ip net.IP, port uint32) *Ip4AndPort {
ipp := Ip4AndPort{Port: port}
ipp.Ip = uint32(iputil.Ip2VpnIp(ip))
2021-03-31 16:32:02 -06:00
return &ipp
2021-03-18 19:37:24 -06:00
}
func NewIp4AndPortFromNetIP(ip netip.Addr, port uint16) *Ip4AndPort {
v4Addr := ip.As4()
return &Ip4AndPort{
Ip: binary.BigEndian.Uint32(v4Addr[:]),
Port: uint32(port),
}
}
2021-03-31 16:32:02 -06:00
func NewIp6AndPort(ip net.IP, port uint32) *Ip6AndPort {
2021-04-01 09:23:31 -06:00
return &Ip6AndPort{
Hi: binary.BigEndian.Uint64(ip[:8]),
Lo: binary.BigEndian.Uint64(ip[8:]),
Port: port,
}
2019-11-19 10:00:20 -07:00
}
func NewIp6AndPortFromNetIP(ip netip.Addr, port uint16) *Ip6AndPort {
ip6Addr := ip.As16()
return &Ip6AndPort{
Hi: binary.BigEndian.Uint64(ip6Addr[:8]),
Lo: binary.BigEndian.Uint64(ip6Addr[8:]),
Port: uint32(port),
}
}
func NewUDPAddrFromLH4(ipp *Ip4AndPort) *udp.Addr {
2021-03-18 19:37:24 -06:00
ip := ipp.Ip
return udp.NewAddr(
2021-03-18 19:37:24 -06:00
net.IPv4(byte(ip&0xff000000>>24), byte(ip&0x00ff0000>>16), byte(ip&0x0000ff00>>8), byte(ip&0x000000ff)),
uint16(ipp.Port),
)
}
func NewUDPAddrFromLH6(ipp *Ip6AndPort) *udp.Addr {
return udp.NewAddr(lhIp6ToIp(ipp), uint16(ipp.Port))
2019-11-19 10:00:20 -07:00
}
func (lh *LightHouse) startQueryWorker() {
if lh.amLighthouse {
return
}
go func() {
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
for {
select {
case <-lh.ctx.Done():
return
case ip := <-lh.queryChan:
lh.innerQueryServer(ip, nb, out)
}
}
}()
}
func (lh *LightHouse) innerQueryServer(ip iputil.VpnIp, nb, out []byte) {
if lh.IsLighthouseIP(ip) {
return
}
// Send a query to the lighthouses and hope for the best next time
query, err := NewLhQueryByInt(ip).Marshal()
if err != nil {
lh.l.WithError(err).WithField("vpnIp", ip).Error("Failed to marshal lighthouse query payload")
return
}
lighthouses := lh.GetLighthouses()
lh.metricTx(NebulaMeta_HostQuery, int64(len(lighthouses)))
for n := range lighthouses {
lh.ifce.SendMessageToVpnIp(header.LightHouse, 0, n, query, nb, out)
}
}
func (lh *LightHouse) StartUpdateWorker() {
interval := lh.GetUpdateInterval()
if lh.amLighthouse || interval == 0 {
2019-11-19 10:00:20 -07:00
return
}
clockSource := time.NewTicker(time.Second * time.Duration(interval))
updateCtx, cancel := context.WithCancel(lh.ctx)
lh.updateCancel = cancel
go func() {
defer clockSource.Stop()
for {
lh.SendUpdate()
select {
case <-updateCtx.Done():
return
case <-clockSource.C:
continue
}
}
}()
}
2019-11-19 10:00:20 -07:00
func (lh *LightHouse) SendUpdate() {
2021-03-31 16:32:02 -06:00
var v4 []*Ip4AndPort
2021-03-18 19:37:24 -06:00
var v6 []*Ip6AndPort
2019-11-19 10:00:20 -07:00
for _, e := range lh.GetAdvertiseAddrs() {
if ip := e.ip.To4(); ip != nil {
v4 = append(v4, NewIp4AndPort(e.ip, uint32(e.port)))
} else {
v6 = append(v6, NewIp6AndPort(e.ip, uint32(e.port)))
}
}
lal := lh.GetLocalAllowList()
for _, e := range *localIps(lh.l, lal) {
if ip4 := e.To4(); ip4 != nil && ipMaskContains(lh.myVpnIp, lh.myVpnZeros, iputil.Ip2VpnIp(ip4)) {
2021-03-31 16:32:02 -06:00
continue
}
2021-03-18 19:37:24 -06:00
2021-03-31 16:32:02 -06:00
// Only add IPs that aren't my VPN/tun IP
if ip := e.To4(); ip != nil {
v4 = append(v4, NewIp4AndPort(e, lh.nebulaPort))
} else {
v6 = append(v6, NewIp6AndPort(e, lh.nebulaPort))
2019-11-19 10:00:20 -07:00
}
}
var relays []uint32
for _, r := range lh.GetRelaysForMe() {
relays = append(relays, (uint32)(r))
}
m := &NebulaMeta{
Type: NebulaMeta_HostUpdateNotification,
Details: &NebulaMetaDetails{
VpnIp: uint32(lh.myVpnIp),
2021-03-31 16:32:02 -06:00
Ip4AndPorts: v4,
2021-03-18 19:37:24 -06:00
Ip6AndPorts: v6,
RelayVpnIp: relays,
},
}
lighthouses := lh.GetLighthouses()
lh.metricTx(NebulaMeta_HostUpdateNotification, int64(len(lighthouses)))
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
2021-04-01 09:23:31 -06:00
mm, err := m.Marshal()
if err != nil {
2021-04-01 09:23:31 -06:00
lh.l.WithError(err).Error("Error while marshaling for lighthouse update")
return
}
for vpnIp := range lighthouses {
lh.ifce.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, mm, nb, out)
2019-11-19 10:00:20 -07:00
}
}
type LightHouseHandler struct {
lh *LightHouse
nb []byte
out []byte
2021-03-31 16:32:02 -06:00
pb []byte
meta *NebulaMeta
2021-03-31 16:32:02 -06:00
l *logrus.Logger
}
func (lh *LightHouse) NewRequestHandler() *LightHouseHandler {
lhh := &LightHouseHandler{
lh: lh,
nb: make([]byte, 12, 12),
out: make([]byte, mtu),
2021-03-31 16:32:02 -06:00
l: lh.l,
pb: make([]byte, mtu),
meta: &NebulaMeta{
Details: &NebulaMetaDetails{},
},
}
return lhh
}
2021-03-31 16:32:02 -06:00
func (lh *LightHouse) metricRx(t NebulaMeta_MessageType, i int64) {
lh.metrics.Rx(header.MessageType(t), 0, i)
2021-03-31 16:32:02 -06:00
}
func (lh *LightHouse) metricTx(t NebulaMeta_MessageType, i int64) {
lh.metrics.Tx(header.MessageType(t), 0, i)
2021-03-31 16:32:02 -06:00
}
// This method is similar to Reset(), but it re-uses the pointer structs
// so that we don't have to re-allocate them
func (lhh *LightHouseHandler) resetMeta() *NebulaMeta {
details := lhh.meta.Details
lhh.meta.Reset()
2021-03-31 16:32:02 -06:00
// Keep the array memory around
details.Ip4AndPorts = details.Ip4AndPorts[:0]
details.Ip6AndPorts = details.Ip6AndPorts[:0]
details.RelayVpnIp = details.RelayVpnIp[:0]
lhh.meta.Details = details
return lhh.meta
}
func lhHandleRequest(lhh *LightHouseHandler, f *Interface) udp.LightHouseHandlerFunc {
return func(rAddr *udp.Addr, vpnIp iputil.VpnIp, p []byte) {
lhh.HandleRequest(rAddr, vpnIp, p, f)
}
}
func (lhh *LightHouseHandler) HandleRequest(rAddr *udp.Addr, vpnIp iputil.VpnIp, p []byte, w EncWriter) {
n := lhh.resetMeta()
2021-03-31 16:32:02 -06:00
err := n.Unmarshal(p)
2019-11-19 10:00:20 -07:00
if err != nil {
lhh.l.WithError(err).WithField("vpnIp", vpnIp).WithField("udpAddr", rAddr).
2019-11-19 10:00:20 -07:00
Error("Failed to unmarshal lighthouse packet")
//TODO: send recv_error?
return
}
if n.Details == nil {
lhh.l.WithField("vpnIp", vpnIp).WithField("udpAddr", rAddr).
2019-11-19 10:00:20 -07:00
Error("Invalid lighthouse update")
//TODO: send recv_error?
return
}
2021-03-31 16:32:02 -06:00
lhh.lh.metricRx(n.Type, 1)
2019-11-19 10:00:20 -07:00
switch n.Type {
case NebulaMeta_HostQuery:
2021-03-31 16:32:02 -06:00
lhh.handleHostQuery(n, vpnIp, rAddr, w)
2019-11-19 10:00:20 -07:00
2021-03-31 16:32:02 -06:00
case NebulaMeta_HostQueryReply:
lhh.handleHostQueryReply(n, vpnIp)
2021-03-18 19:37:24 -06:00
2021-03-31 16:32:02 -06:00
case NebulaMeta_HostUpdateNotification:
lhh.handleHostUpdateNotification(n, vpnIp, w)
2021-03-18 19:37:24 -06:00
2021-03-31 16:32:02 -06:00
case NebulaMeta_HostMovedNotification:
case NebulaMeta_HostPunchNotification:
lhh.handleHostPunchNotification(n, vpnIp, w)
case NebulaMeta_HostUpdateNotificationAck:
// noop
2021-03-31 16:32:02 -06:00
}
}
2021-03-18 19:37:24 -06:00
func (lhh *LightHouseHandler) handleHostQuery(n *NebulaMeta, vpnIp iputil.VpnIp, addr *udp.Addr, w EncWriter) {
2021-03-31 16:32:02 -06:00
// Exit if we don't answer queries
if !lhh.lh.amLighthouse {
if lhh.l.Level >= logrus.DebugLevel {
lhh.l.Debugln("I don't answer queries, but received from: ", addr)
2019-11-19 10:00:20 -07:00
}
2021-03-31 16:32:02 -06:00
return
}
2019-11-19 10:00:20 -07:00
2021-03-31 16:32:02 -06:00
//TODO: we can DRY this further
reqVpnIp := n.Details.VpnIp
2021-03-31 16:32:02 -06:00
//TODO: Maybe instead of marshalling into n we marshal into a new `r` to not nuke our current request data
found, ln, err := lhh.lh.queryAndPrepMessage(iputil.VpnIp(n.Details.VpnIp), func(c *cache) (int, error) {
2021-03-31 16:32:02 -06:00
n = lhh.resetMeta()
n.Type = NebulaMeta_HostQueryReply
n.Details.VpnIp = reqVpnIp
2021-03-18 19:37:24 -06:00
lhh.coalesceAnswers(c, n)
2021-03-18 19:37:24 -06:00
2021-03-31 16:32:02 -06:00
return n.MarshalTo(lhh.pb)
})
2021-03-18 19:37:24 -06:00
2021-03-31 16:32:02 -06:00
if !found {
return
}
2019-11-19 10:00:20 -07:00
2021-03-31 16:32:02 -06:00
if err != nil {
lhh.l.WithError(err).WithField("vpnIp", vpnIp).Error("Failed to marshal lighthouse host query reply")
2021-03-31 16:32:02 -06:00
return
}
2021-03-18 19:37:24 -06:00
2021-03-31 16:32:02 -06:00
lhh.lh.metricTx(NebulaMeta_HostQueryReply, 1)
w.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, lhh.pb[:ln], lhh.nb, lhh.out[:0])
2021-03-18 19:37:24 -06:00
2021-03-31 16:32:02 -06:00
// This signals the other side to punch some zero byte udp packets
found, ln, err = lhh.lh.queryAndPrepMessage(vpnIp, func(c *cache) (int, error) {
2021-03-31 16:32:02 -06:00
n = lhh.resetMeta()
n.Type = NebulaMeta_HostPunchNotification
n.Details.VpnIp = uint32(vpnIp)
2021-03-31 16:32:02 -06:00
lhh.coalesceAnswers(c, n)
2021-03-31 16:32:02 -06:00
return n.MarshalTo(lhh.pb)
})
if !found {
return
}
if err != nil {
lhh.l.WithError(err).WithField("vpnIp", vpnIp).Error("Failed to marshal lighthouse host was queried for")
2021-03-31 16:32:02 -06:00
return
}
lhh.lh.metricTx(NebulaMeta_HostPunchNotification, 1)
w.SendMessageToVpnIp(header.LightHouse, 0, iputil.VpnIp(reqVpnIp), lhh.pb[:ln], lhh.nb, lhh.out[:0])
2021-03-31 16:32:02 -06:00
}
func (lhh *LightHouseHandler) coalesceAnswers(c *cache, n *NebulaMeta) {
if c.v4 != nil {
if c.v4.learned != nil {
n.Details.Ip4AndPorts = append(n.Details.Ip4AndPorts, c.v4.learned)
}
if c.v4.reported != nil && len(c.v4.reported) > 0 {
n.Details.Ip4AndPorts = append(n.Details.Ip4AndPorts, c.v4.reported...)
}
}
2021-03-31 16:32:02 -06:00
if c.v6 != nil {
if c.v6.learned != nil {
n.Details.Ip6AndPorts = append(n.Details.Ip6AndPorts, c.v6.learned)
}
if c.v6.reported != nil && len(c.v6.reported) > 0 {
n.Details.Ip6AndPorts = append(n.Details.Ip6AndPorts, c.v6.reported...)
}
}
if c.relay != nil {
n.Details.RelayVpnIp = append(n.Details.RelayVpnIp, c.relay.relay...)
}
2021-03-31 16:32:02 -06:00
}
func (lhh *LightHouseHandler) handleHostQueryReply(n *NebulaMeta, vpnIp iputil.VpnIp) {
2021-03-31 16:32:02 -06:00
if !lhh.lh.IsLighthouseIP(vpnIp) {
return
}
lhh.lh.Lock()
am := lhh.lh.unlockedGetRemoteList(iputil.VpnIp(n.Details.VpnIp))
am.Lock()
lhh.lh.Unlock()
2021-03-31 16:32:02 -06:00
certVpnIp := iputil.VpnIp(n.Details.VpnIp)
am.unlockedSetV4(vpnIp, certVpnIp, n.Details.Ip4AndPorts, lhh.lh.unlockedShouldAddV4)
am.unlockedSetV6(vpnIp, certVpnIp, n.Details.Ip6AndPorts, lhh.lh.unlockedShouldAddV6)
am.unlockedSetRelay(vpnIp, certVpnIp, n.Details.RelayVpnIp)
am.Unlock()
2021-03-31 16:32:02 -06:00
// Non-blocking attempt to trigger, skip if it would block
select {
case lhh.lh.handshakeTrigger <- iputil.VpnIp(n.Details.VpnIp):
2021-03-31 16:32:02 -06:00
default:
}
}
func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, vpnIp iputil.VpnIp, w EncWriter) {
2021-03-31 16:32:02 -06:00
if !lhh.lh.amLighthouse {
if lhh.l.Level >= logrus.DebugLevel {
lhh.l.Debugln("I am not a lighthouse, do not take host updates: ", vpnIp)
2019-11-19 10:00:20 -07:00
}
2021-03-31 16:32:02 -06:00
return
}
2021-03-18 19:37:24 -06:00
2021-03-31 16:32:02 -06:00
//Simple check that the host sent this not someone else
if n.Details.VpnIp != uint32(vpnIp) {
2021-03-31 16:32:02 -06:00
if lhh.l.Level >= logrus.DebugLevel {
lhh.l.WithField("vpnIp", vpnIp).WithField("answer", iputil.VpnIp(n.Details.VpnIp)).Debugln("Host sent invalid update")
2019-11-19 10:00:20 -07:00
}
2021-03-31 16:32:02 -06:00
return
}
2019-11-19 10:00:20 -07:00
2021-03-31 16:32:02 -06:00
lhh.lh.Lock()
am := lhh.lh.unlockedGetRemoteList(vpnIp)
am.Lock()
lhh.lh.Unlock()
2021-03-18 19:37:24 -06:00
certVpnIp := iputil.VpnIp(n.Details.VpnIp)
am.unlockedSetV4(vpnIp, certVpnIp, n.Details.Ip4AndPorts, lhh.lh.unlockedShouldAddV4)
am.unlockedSetV6(vpnIp, certVpnIp, n.Details.Ip6AndPorts, lhh.lh.unlockedShouldAddV6)
am.unlockedSetRelay(vpnIp, certVpnIp, n.Details.RelayVpnIp)
am.Unlock()
n = lhh.resetMeta()
n.Type = NebulaMeta_HostUpdateNotificationAck
n.Details.VpnIp = uint32(vpnIp)
ln, err := n.MarshalTo(lhh.pb)
if err != nil {
lhh.l.WithError(err).WithField("vpnIp", vpnIp).Error("Failed to marshal lighthouse host update ack")
return
}
lhh.lh.metricTx(NebulaMeta_HostUpdateNotificationAck, 1)
w.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, lhh.pb[:ln], lhh.nb, lhh.out[:0])
2021-03-31 16:32:02 -06:00
}
2021-03-18 19:37:24 -06:00
func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, vpnIp iputil.VpnIp, w EncWriter) {
2021-03-31 16:32:02 -06:00
if !lhh.lh.IsLighthouseIP(vpnIp) {
return
}
empty := []byte{0}
punch := func(vpnPeer *udp.Addr) {
2021-03-31 16:32:02 -06:00
if vpnPeer == nil {
return
2021-03-18 19:37:24 -06:00
}
2021-03-31 16:32:02 -06:00
go func() {
time.Sleep(lhh.lh.punchy.GetDelay())
2021-03-31 16:32:02 -06:00
lhh.lh.metricHolepunchTx.Inc(1)
lhh.lh.punchConn.WriteTo(empty, vpnPeer)
}()
if lhh.l.Level >= logrus.DebugLevel {
//TODO: lacking the ip we are actually punching on, old: l.Debugf("Punching %s on %d for %s", IntIp(a.Ip), a.Port, IntIp(n.Details.VpnIp))
lhh.l.Debugf("Punching on %d for %s", vpnPeer.Port, iputil.VpnIp(n.Details.VpnIp))
2019-11-19 10:00:20 -07:00
}
}
2021-03-31 16:32:02 -06:00
for _, a := range n.Details.Ip4AndPorts {
punch(NewUDPAddrFromLH4(a))
}
2021-03-31 16:32:02 -06:00
for _, a := range n.Details.Ip6AndPorts {
punch(NewUDPAddrFromLH6(a))
}
2019-11-19 10:00:20 -07:00
2021-03-31 16:32:02 -06:00
// This sends a nebula test packet to the host trying to contact us. In the case
// of a double nat or other difficult scenario, this may help establish
// a tunnel.
if lhh.lh.punchy.GetRespond() {
queryVpnIp := iputil.VpnIp(n.Details.VpnIp)
2021-03-31 16:32:02 -06:00
go func() {
time.Sleep(lhh.lh.punchy.GetRespondDelay())
2021-03-31 16:32:02 -06:00
if lhh.l.Level >= logrus.DebugLevel {
lhh.l.Debugf("Sending a nebula test packet to vpn ip %s", queryVpnIp)
2021-03-31 16:32:02 -06:00
}
//NOTE: we have to allocate a new output buffer here since we are spawning a new goroutine
// for each punchBack packet. We should move this into a timerwheel or a single goroutine
// managed by a channel.
w.SendMessageToVpnIp(header.Test, header.TestRequest, queryVpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
2021-03-31 16:32:02 -06:00
}()
2019-11-19 10:00:20 -07:00
}
}
2021-04-01 09:23:31 -06:00
// ipMaskContains checks if testIp is contained by ip after applying a cidr
// zeros is 32 - bits from net.IPMask.Size()
func ipMaskContains(ip iputil.VpnIp, zeros iputil.VpnIp, testIp iputil.VpnIp) bool {
2021-04-01 09:23:31 -06:00
return (testIp^ip)>>zeros == 0
}