2019-11-19 10:00:20 -07:00
|
|
|
package nebula
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/binary"
|
|
|
|
"fmt"
|
|
|
|
"net"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"time"
|
2020-06-30 16:53:30 -06:00
|
|
|
|
|
|
|
"github.com/sirupsen/logrus"
|
|
|
|
"github.com/slackhq/nebula/sshd"
|
|
|
|
"gopkg.in/yaml.v2"
|
2019-11-19 10:00:20 -07:00
|
|
|
)
|
|
|
|
|
2020-06-30 12:48:58 -06:00
|
|
|
// The caller should provide a real logger, we have one just in case
|
2019-11-19 10:00:20 -07:00
|
|
|
var l = logrus.New()
|
|
|
|
|
|
|
|
type m map[string]interface{}
|
|
|
|
|
2020-09-18 08:20:09 -06:00
|
|
|
func Main(config *Config, configTest bool, buildVersion string, logger *logrus.Logger, tunFd *int) (*Control, error) {
|
2020-06-30 12:48:58 -06:00
|
|
|
l = logger
|
2019-11-19 10:00:20 -07:00
|
|
|
l.Formatter = &logrus.TextFormatter{
|
|
|
|
FullTimestamp: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Print the config if in test, the exit comes later
|
|
|
|
if configTest {
|
|
|
|
b, err := yaml.Marshal(config.Settings)
|
|
|
|
if err != nil {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, err
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
2020-06-30 12:48:58 -06:00
|
|
|
|
|
|
|
// Print the final config
|
2019-11-19 10:00:20 -07:00
|
|
|
l.Println(string(b))
|
|
|
|
}
|
|
|
|
|
2020-06-30 12:48:58 -06:00
|
|
|
err := configLogger(config)
|
2019-11-19 10:00:20 -07:00
|
|
|
if err != nil {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("Failed to configure the logger", nil, err)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
config.RegisterReloadCallback(func(c *Config) {
|
|
|
|
err := configLogger(c)
|
|
|
|
if err != nil {
|
|
|
|
l.WithError(err).Error("Failed to configure the logger")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// trustedCAs is currently a global, so loadCA operates on that global directly
|
|
|
|
trustedCAs, err = loadCAFromConfig(config)
|
|
|
|
if err != nil {
|
|
|
|
//The errors coming out of loadCA are already nicely formatted
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("Failed to load ca from config", nil, err)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
l.WithField("fingerprints", trustedCAs.GetFingerprints()).Debug("Trusted CA fingerprints")
|
|
|
|
|
|
|
|
cs, err := NewCertStateFromConfig(config)
|
|
|
|
if err != nil {
|
|
|
|
//The errors coming out of NewCertStateFromConfig are already nicely formatted
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("Failed to load certificate from config", nil, err)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
l.WithField("cert", cs.certificate).Debug("Client nebula certificate")
|
|
|
|
|
|
|
|
fw, err := NewFirewallFromConfig(cs.certificate, config)
|
|
|
|
if err != nil {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("Error while loading firewall rules", nil, err)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
l.WithField("firewallHash", fw.GetRuleHash()).Info("Firewall started")
|
|
|
|
|
|
|
|
// TODO: make sure mask is 4 bytes
|
|
|
|
tunCidr := cs.certificate.Details.Ips[0]
|
|
|
|
routes, err := parseRoutes(config, tunCidr)
|
|
|
|
if err != nil {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("Could not parse tun.routes", nil, err)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
2019-12-12 10:31:22 -07:00
|
|
|
unsafeRoutes, err := parseUnsafeRoutes(config, tunCidr)
|
|
|
|
if err != nil {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("Could not parse tun.unsafe_routes", nil, err)
|
2019-12-12 10:31:22 -07:00
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
|
|
|
|
ssh, err := sshd.NewSSHServer(l.WithField("subsystem", "sshd"))
|
|
|
|
wireSSHReload(ssh, config)
|
|
|
|
if config.GetBool("sshd.enabled", false) {
|
|
|
|
err = configSSH(ssh, config)
|
|
|
|
if err != nil {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("Error while configuring the sshd", nil, err)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
// All non system modifying configuration consumption should live above this line
|
|
|
|
// tun config, listeners, anything modifying the computer should be below
|
|
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
2021-02-25 13:01:14 -07:00
|
|
|
var routines int
|
|
|
|
|
|
|
|
// If `routines` is set, use that and ignore the specific values
|
|
|
|
if routines = config.GetInt("routines", 0); routines != 0 {
|
|
|
|
if routines < 1 {
|
|
|
|
routines = 1
|
|
|
|
}
|
|
|
|
if routines > 1 {
|
|
|
|
l.WithField("routines", routines).Info("Using multiple routines")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// deprecated and undocumented
|
|
|
|
tunQueues := config.GetInt("tun.routines", 1)
|
|
|
|
udpQueues := config.GetInt("listen.routines", 1)
|
|
|
|
if tunQueues > udpQueues {
|
|
|
|
routines = tunQueues
|
|
|
|
} else {
|
|
|
|
routines = udpQueues
|
|
|
|
}
|
|
|
|
if routines != 1 {
|
|
|
|
l.WithField("routines", routines).Warn("Setting tun.routines and listen.routines is deprecated. Use `routines` instead")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-10 07:15:55 -06:00
|
|
|
var tun Inside
|
2020-04-06 12:35:32 -06:00
|
|
|
if !configTest {
|
|
|
|
config.CatchHUP()
|
|
|
|
|
2020-08-10 07:15:55 -06:00
|
|
|
switch {
|
|
|
|
case config.GetBool("tun.disabled", false):
|
2021-03-01 09:09:41 -07:00
|
|
|
tun = newDisabledTun(tunCidr, config.GetInt("tun.tx_queue", 500), config.GetBool("stats.message_metrics", false), l)
|
2020-08-10 07:15:55 -06:00
|
|
|
case tunFd != nil:
|
2020-06-30 12:48:58 -06:00
|
|
|
tun, err = newTunFromFd(
|
|
|
|
*tunFd,
|
|
|
|
tunCidr,
|
|
|
|
config.GetInt("tun.mtu", DEFAULT_MTU),
|
|
|
|
routes,
|
|
|
|
unsafeRoutes,
|
|
|
|
config.GetInt("tun.tx_queue", 500),
|
|
|
|
)
|
2020-08-10 07:15:55 -06:00
|
|
|
default:
|
2020-06-30 12:48:58 -06:00
|
|
|
tun, err = newTun(
|
|
|
|
config.GetString("tun.dev", ""),
|
|
|
|
tunCidr,
|
|
|
|
config.GetInt("tun.mtu", DEFAULT_MTU),
|
|
|
|
routes,
|
|
|
|
unsafeRoutes,
|
|
|
|
config.GetInt("tun.tx_queue", 500),
|
2021-02-25 13:01:14 -07:00
|
|
|
routines > 1,
|
2020-06-30 12:48:58 -06:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-04-06 12:35:32 -06:00
|
|
|
if err != nil {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("Failed to get a tun/tap device", nil, err)
|
2020-04-06 12:35:32 -06:00
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// set up our UDP listener
|
2021-02-25 13:01:14 -07:00
|
|
|
udpConns := make([]*udpConn, routines)
|
|
|
|
port := config.GetInt("listen.port", 0)
|
2020-04-06 12:35:32 -06:00
|
|
|
|
|
|
|
if !configTest {
|
2021-02-25 13:01:14 -07:00
|
|
|
for i := 0; i < routines; i++ {
|
|
|
|
udpServer, err := NewListener(config.GetString("listen.host", "0.0.0.0"), port, routines > 1)
|
|
|
|
if err != nil {
|
|
|
|
return nil, NewContextualError("Failed to open udp listener", m{"queue": i}, err)
|
|
|
|
}
|
|
|
|
udpServer.reloadConfig(config)
|
|
|
|
udpConns[i] = udpServer
|
|
|
|
|
|
|
|
// If port is dynamic, discover it
|
|
|
|
if port == 0 {
|
|
|
|
uPort, err := udpServer.LocalAddr()
|
|
|
|
if err != nil {
|
|
|
|
return nil, NewContextualError("Failed to get listening port", nil, err)
|
|
|
|
}
|
|
|
|
port = int(uPort.Port)
|
|
|
|
}
|
2020-04-06 12:35:32 -06:00
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set up my internal host map
|
|
|
|
var preferredRanges []*net.IPNet
|
|
|
|
rawPreferredRanges := config.GetStringSlice("preferred_ranges", []string{})
|
|
|
|
// First, check if 'preferred_ranges' is set and fallback to 'local_range'
|
|
|
|
if len(rawPreferredRanges) > 0 {
|
|
|
|
for _, rawPreferredRange := range rawPreferredRanges {
|
|
|
|
_, preferredRange, err := net.ParseCIDR(rawPreferredRange)
|
|
|
|
if err != nil {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("Failed to parse preferred ranges", nil, err)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
preferredRanges = append(preferredRanges, preferredRange)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// local_range was superseded by preferred_ranges. If it is still present,
|
|
|
|
// merge the local_range setting into preferred_ranges. We will probably
|
|
|
|
// deprecate local_range and remove in the future.
|
|
|
|
rawLocalRange := config.GetString("local_range", "")
|
|
|
|
if rawLocalRange != "" {
|
|
|
|
_, localRange, err := net.ParseCIDR(rawLocalRange)
|
|
|
|
if err != nil {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("Failed to parse local_range", nil, err)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the entry for local_range was already specified in
|
|
|
|
// preferred_ranges. Don't put it into the slice twice if so.
|
|
|
|
var found bool
|
|
|
|
for _, r := range preferredRanges {
|
|
|
|
if r.String() == localRange.String() {
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
preferredRanges = append(preferredRanges, localRange)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
hostMap := NewHostMap("main", tunCidr, preferredRanges)
|
|
|
|
hostMap.SetDefaultRoute(ip2int(net.ParseIP(config.GetString("default_route", "0.0.0.0"))))
|
2019-12-12 09:34:17 -07:00
|
|
|
hostMap.addUnsafeRoutes(&unsafeRoutes)
|
2020-06-26 11:45:48 -06:00
|
|
|
hostMap.metricsEnabled = config.GetBool("stats.message_metrics", false)
|
2019-12-12 09:34:17 -07:00
|
|
|
|
2019-11-19 10:00:20 -07:00
|
|
|
l.WithField("network", hostMap.vpnCIDR).WithField("preferredRanges", hostMap.preferredRanges).Info("Main HostMap created")
|
|
|
|
|
|
|
|
/*
|
|
|
|
config.SetDefault("promoter.interval", 10)
|
|
|
|
go hostMap.Promoter(config.GetInt("promoter.interval"))
|
|
|
|
*/
|
|
|
|
|
2020-03-27 12:26:39 -06:00
|
|
|
punchy := NewPunchyFromConfig(config)
|
2020-04-06 12:35:32 -06:00
|
|
|
if punchy.Punch && !configTest {
|
2019-11-19 10:00:20 -07:00
|
|
|
l.Info("UDP hole punching enabled")
|
2021-02-25 13:01:14 -07:00
|
|
|
go hostMap.Punchy(udpConns[0])
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
amLighthouse := config.GetBool("lighthouse.am_lighthouse", false)
|
2019-11-24 10:32:08 -07:00
|
|
|
|
|
|
|
// warn if am_lighthouse is enabled but upstream lighthouses exists
|
2019-12-09 17:53:56 -07:00
|
|
|
rawLighthouseHosts := config.GetStringSlice("lighthouse.hosts", []string{})
|
|
|
|
if amLighthouse && len(rawLighthouseHosts) != 0 {
|
2019-11-24 10:32:08 -07:00
|
|
|
l.Warn("lighthouse.am_lighthouse enabled on node but upstream lighthouses exist in config")
|
|
|
|
}
|
|
|
|
|
2019-12-09 17:53:56 -07:00
|
|
|
lighthouseHosts := make([]uint32, len(rawLighthouseHosts))
|
|
|
|
for i, host := range rawLighthouseHosts {
|
|
|
|
ip := net.ParseIP(host)
|
|
|
|
if ip == nil {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("Unable to parse lighthouse host entry", m{"host": host, "entry": i + 1}, nil)
|
2019-12-09 17:53:56 -07:00
|
|
|
}
|
2020-01-20 13:52:55 -07:00
|
|
|
if !tunCidr.Contains(ip) {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("lighthouse host is not in our subnet, invalid", m{"vpnIp": ip, "network": tunCidr.String()}, nil)
|
2020-01-20 13:52:55 -07:00
|
|
|
}
|
2019-12-09 17:53:56 -07:00
|
|
|
lighthouseHosts[i] = ip2int(ip)
|
|
|
|
}
|
|
|
|
|
2019-11-19 10:00:20 -07:00
|
|
|
lightHouse := NewLightHouse(
|
|
|
|
amLighthouse,
|
|
|
|
ip2int(tunCidr.IP),
|
2019-12-09 17:53:56 -07:00
|
|
|
lighthouseHosts,
|
2019-11-19 10:00:20 -07:00
|
|
|
//TODO: change to a duration
|
|
|
|
config.GetInt("lighthouse.interval", 10),
|
|
|
|
port,
|
2021-02-25 13:01:14 -07:00
|
|
|
udpConns[0],
|
2020-03-27 12:26:39 -06:00
|
|
|
punchy.Respond,
|
|
|
|
punchy.Delay,
|
2020-06-26 11:45:48 -06:00
|
|
|
config.GetBool("stats.lighthouse_metrics", false),
|
2019-11-19 10:00:20 -07:00
|
|
|
)
|
|
|
|
|
2020-04-08 14:20:12 -06:00
|
|
|
remoteAllowList, err := config.GetAllowList("lighthouse.remote_allow_list", false)
|
Add lighthouse.{remoteAllowList,localAllowList} (#217)
These settings make it possible to blacklist / whitelist IP addresses
that are used for remote connections.
`lighthouse.remoteAllowList` filters which remote IPs are allow when
fetching from the lighthouse (or, if you are the lighthouse, which IPs
you store and forward to querying hosts). By default, any remote IPs are
allowed. You can provide CIDRs here with `true` to allow and `false` to
deny. The most specific CIDR rule applies to each remote. If all rules
are "allow", the default will be "deny", and vice-versa. If both "allow"
and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0"
as the default.
lighthouse:
remoteAllowList:
# Example to block IPs from this subnet from being used for remote IPs.
"172.16.0.0/12": false
# A more complicated example, allow public IPs but only private IPs from a specific subnet
"0.0.0.0/0": true
"10.0.0.0/8": false
"10.42.42.0/24": true
`lighthouse.localAllowList` has the same logic as above, but it applies
to the local addresses we advertise to the lighthouse. Additionally, you
can specify an `interfaces` map of regular expressions to match against
interface names. The regexp must match the entire name. All interface
rules must be either true or false (and the default rule will be the
inverse). CIDR rules are matched after interface name rules.
Default is all local IP addresses.
lighthouse:
localAllowList:
# Example to blacklist docker interfaces.
interfaces:
'docker.*': false
# Example to only advertise IPs in this subnet to the lighthouse.
"10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
|
|
|
if err != nil {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("Invalid lighthouse.remote_allow_list", nil, err)
|
Add lighthouse.{remoteAllowList,localAllowList} (#217)
These settings make it possible to blacklist / whitelist IP addresses
that are used for remote connections.
`lighthouse.remoteAllowList` filters which remote IPs are allow when
fetching from the lighthouse (or, if you are the lighthouse, which IPs
you store and forward to querying hosts). By default, any remote IPs are
allowed. You can provide CIDRs here with `true` to allow and `false` to
deny. The most specific CIDR rule applies to each remote. If all rules
are "allow", the default will be "deny", and vice-versa. If both "allow"
and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0"
as the default.
lighthouse:
remoteAllowList:
# Example to block IPs from this subnet from being used for remote IPs.
"172.16.0.0/12": false
# A more complicated example, allow public IPs but only private IPs from a specific subnet
"0.0.0.0/0": true
"10.0.0.0/8": false
"10.42.42.0/24": true
`lighthouse.localAllowList` has the same logic as above, but it applies
to the local addresses we advertise to the lighthouse. Additionally, you
can specify an `interfaces` map of regular expressions to match against
interface names. The regexp must match the entire name. All interface
rules must be either true or false (and the default rule will be the
inverse). CIDR rules are matched after interface name rules.
Default is all local IP addresses.
lighthouse:
localAllowList:
# Example to blacklist docker interfaces.
interfaces:
'docker.*': false
# Example to only advertise IPs in this subnet to the lighthouse.
"10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
|
|
|
}
|
|
|
|
lightHouse.SetRemoteAllowList(remoteAllowList)
|
|
|
|
|
2020-04-08 14:20:12 -06:00
|
|
|
localAllowList, err := config.GetAllowList("lighthouse.local_allow_list", true)
|
Add lighthouse.{remoteAllowList,localAllowList} (#217)
These settings make it possible to blacklist / whitelist IP addresses
that are used for remote connections.
`lighthouse.remoteAllowList` filters which remote IPs are allow when
fetching from the lighthouse (or, if you are the lighthouse, which IPs
you store and forward to querying hosts). By default, any remote IPs are
allowed. You can provide CIDRs here with `true` to allow and `false` to
deny. The most specific CIDR rule applies to each remote. If all rules
are "allow", the default will be "deny", and vice-versa. If both "allow"
and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0"
as the default.
lighthouse:
remoteAllowList:
# Example to block IPs from this subnet from being used for remote IPs.
"172.16.0.0/12": false
# A more complicated example, allow public IPs but only private IPs from a specific subnet
"0.0.0.0/0": true
"10.0.0.0/8": false
"10.42.42.0/24": true
`lighthouse.localAllowList` has the same logic as above, but it applies
to the local addresses we advertise to the lighthouse. Additionally, you
can specify an `interfaces` map of regular expressions to match against
interface names. The regexp must match the entire name. All interface
rules must be either true or false (and the default rule will be the
inverse). CIDR rules are matched after interface name rules.
Default is all local IP addresses.
lighthouse:
localAllowList:
# Example to blacklist docker interfaces.
interfaces:
'docker.*': false
# Example to only advertise IPs in this subnet to the lighthouse.
"10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
|
|
|
if err != nil {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("Invalid lighthouse.local_allow_list", nil, err)
|
Add lighthouse.{remoteAllowList,localAllowList} (#217)
These settings make it possible to blacklist / whitelist IP addresses
that are used for remote connections.
`lighthouse.remoteAllowList` filters which remote IPs are allow when
fetching from the lighthouse (or, if you are the lighthouse, which IPs
you store and forward to querying hosts). By default, any remote IPs are
allowed. You can provide CIDRs here with `true` to allow and `false` to
deny. The most specific CIDR rule applies to each remote. If all rules
are "allow", the default will be "deny", and vice-versa. If both "allow"
and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0"
as the default.
lighthouse:
remoteAllowList:
# Example to block IPs from this subnet from being used for remote IPs.
"172.16.0.0/12": false
# A more complicated example, allow public IPs but only private IPs from a specific subnet
"0.0.0.0/0": true
"10.0.0.0/8": false
"10.42.42.0/24": true
`lighthouse.localAllowList` has the same logic as above, but it applies
to the local addresses we advertise to the lighthouse. Additionally, you
can specify an `interfaces` map of regular expressions to match against
interface names. The regexp must match the entire name. All interface
rules must be either true or false (and the default rule will be the
inverse). CIDR rules are matched after interface name rules.
Default is all local IP addresses.
lighthouse:
localAllowList:
# Example to blacklist docker interfaces.
interfaces:
'docker.*': false
# Example to only advertise IPs in this subnet to the lighthouse.
"10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
|
|
|
}
|
|
|
|
lightHouse.SetLocalAllowList(localAllowList)
|
|
|
|
|
2019-11-23 16:55:23 -07:00
|
|
|
//TODO: Move all of this inside functions in lighthouse.go
|
2019-11-19 10:00:20 -07:00
|
|
|
for k, v := range config.GetMap("static_host_map", map[interface{}]interface{}{}) {
|
|
|
|
vpnIp := net.ParseIP(fmt.Sprintf("%v", k))
|
2020-01-20 13:52:55 -07:00
|
|
|
if !tunCidr.Contains(vpnIp) {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("static_host_map key is not in our subnet, invalid", m{"vpnIp": vpnIp, "network": tunCidr.String()}, nil)
|
2020-01-20 13:52:55 -07:00
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
vals, ok := v.([]interface{})
|
|
|
|
if ok {
|
|
|
|
for _, v := range vals {
|
|
|
|
parts := strings.Split(fmt.Sprintf("%v", v), ":")
|
|
|
|
addr, err := net.ResolveIPAddr("ip", parts[0])
|
|
|
|
if err == nil {
|
|
|
|
ip := addr.IP
|
|
|
|
port, err := strconv.Atoi(parts[1])
|
|
|
|
if err != nil {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp}, err)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
lightHouse.AddRemote(ip2int(vpnIp), NewUDPAddr(ip2int(ip), uint16(port)), true)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
//TODO: make this all a helper
|
|
|
|
parts := strings.Split(fmt.Sprintf("%v", v), ":")
|
|
|
|
addr, err := net.ResolveIPAddr("ip", parts[0])
|
|
|
|
if err == nil {
|
|
|
|
ip := addr.IP
|
|
|
|
port, err := strconv.Atoi(parts[1])
|
|
|
|
if err != nil {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("Static host address could not be parsed", m{"vpnIp": vpnIp}, err)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
lightHouse.AddRemote(ip2int(vpnIp), NewUDPAddr(ip2int(ip), uint16(port)), true)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-23 16:55:23 -07:00
|
|
|
err = lightHouse.ValidateLHStaticEntries()
|
2019-11-23 14:46:45 -07:00
|
|
|
if err != nil {
|
2019-11-23 16:55:23 -07:00
|
|
|
l.WithError(err).Error("Lighthouse unreachable")
|
2019-11-23 14:46:45 -07:00
|
|
|
}
|
|
|
|
|
2020-06-26 11:45:48 -06:00
|
|
|
var messageMetrics *MessageMetrics
|
|
|
|
if config.GetBool("stats.message_metrics", false) {
|
|
|
|
messageMetrics = newMessageMetrics()
|
|
|
|
} else {
|
|
|
|
messageMetrics = newMessageMetricsOnlyRecvError()
|
|
|
|
}
|
|
|
|
|
2020-02-21 14:25:11 -07:00
|
|
|
handshakeConfig := HandshakeConfig{
|
2020-07-22 08:35:10 -06:00
|
|
|
tryInterval: config.GetDuration("handshakes.try_interval", DefaultHandshakeTryInterval),
|
|
|
|
retries: config.GetInt("handshakes.retries", DefaultHandshakeRetries),
|
|
|
|
waitRotation: config.GetInt("handshakes.wait_rotation", DefaultHandshakeWaitRotation),
|
|
|
|
triggerBuffer: config.GetInt("handshakes.trigger_buffer", DefaultHandshakeTriggerBuffer),
|
2020-06-26 11:45:48 -06:00
|
|
|
|
|
|
|
messageMetrics: messageMetrics,
|
2020-02-21 14:25:11 -07:00
|
|
|
}
|
|
|
|
|
2021-02-25 13:01:14 -07:00
|
|
|
handshakeManager := NewHandshakeManager(tunCidr, preferredRanges, hostMap, lightHouse, udpConns[0], handshakeConfig)
|
2020-07-22 08:35:10 -06:00
|
|
|
lightHouse.handshakeTrigger = handshakeManager.trigger
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2019-11-23 09:50:36 -07:00
|
|
|
//TODO: These will be reused for psk
|
|
|
|
//handshakeMACKey := config.GetString("handshake_mac.key", "")
|
|
|
|
//handshakeAcceptedMACKeys := config.GetStringSlice("handshake_mac.accepted_keys", []string{})
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2019-12-11 18:42:55 -07:00
|
|
|
serveDns := config.GetBool("lighthouse.serve_dns", false)
|
2019-11-19 10:00:20 -07:00
|
|
|
checkInterval := config.GetInt("timers.connection_alive_interval", 5)
|
|
|
|
pendingDeletionInterval := config.GetInt("timers.pending_deletion_interval", 10)
|
|
|
|
ifConfig := &InterfaceConfig{
|
2019-11-23 09:50:36 -07:00
|
|
|
HostMap: hostMap,
|
|
|
|
Inside: tun,
|
2021-02-25 13:01:14 -07:00
|
|
|
Outside: udpConns[0],
|
2019-11-23 09:50:36 -07:00
|
|
|
certState: cs,
|
|
|
|
Cipher: config.GetString("cipher", "aes"),
|
|
|
|
Firewall: fw,
|
|
|
|
ServeDns: serveDns,
|
|
|
|
HandshakeManager: handshakeManager,
|
|
|
|
lightHouse: lightHouse,
|
|
|
|
checkInterval: checkInterval,
|
|
|
|
pendingDeletionInterval: pendingDeletionInterval,
|
|
|
|
DropLocalBroadcast: config.GetBool("tun.drop_local_broadcast", false),
|
|
|
|
DropMulticast: config.GetBool("tun.drop_multicast", false),
|
|
|
|
UDPBatchSize: config.GetInt("listen.batch", 64),
|
2021-02-25 13:01:14 -07:00
|
|
|
routines: routines,
|
2020-06-26 11:45:48 -06:00
|
|
|
MessageMetrics: messageMetrics,
|
2020-09-18 08:20:09 -06:00
|
|
|
version: buildVersion,
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
switch ifConfig.Cipher {
|
|
|
|
case "aes":
|
2020-03-30 12:23:55 -06:00
|
|
|
noiseEndianness = binary.BigEndian
|
2019-11-19 10:00:20 -07:00
|
|
|
case "chachapoly":
|
2020-03-30 12:23:55 -06:00
|
|
|
noiseEndianness = binary.LittleEndian
|
2019-11-19 10:00:20 -07:00
|
|
|
default:
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, fmt.Errorf("unknown cipher: %v", ifConfig.Cipher)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2020-04-06 12:35:32 -06:00
|
|
|
var ifce *Interface
|
|
|
|
if !configTest {
|
|
|
|
ifce, err = NewInterface(ifConfig)
|
|
|
|
if err != nil {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, fmt.Errorf("failed to initialize interface: %s", err)
|
2020-04-06 12:35:32 -06:00
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2021-02-25 13:01:14 -07:00
|
|
|
// TODO: Better way to attach these, probably want a new interface in InterfaceConfig
|
|
|
|
// I don't want to make this initial commit too far-reaching though
|
|
|
|
ifce.writers = udpConns
|
|
|
|
|
2020-04-06 12:35:32 -06:00
|
|
|
ifce.RegisterConfigChangeCallbacks(config)
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2020-04-06 12:35:32 -06:00
|
|
|
go handshakeManager.Run(ifce)
|
|
|
|
go lightHouse.LhUpdateWorker(ifce)
|
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2020-04-06 12:35:32 -06:00
|
|
|
err = startStats(config, configTest)
|
2019-11-19 10:00:20 -07:00
|
|
|
if err != nil {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, NewContextualError("Failed to start stats emitter", nil, err)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2020-04-06 12:35:32 -06:00
|
|
|
if configTest {
|
2020-09-18 08:20:09 -06:00
|
|
|
return nil, nil
|
2020-04-06 12:35:32 -06:00
|
|
|
}
|
|
|
|
|
2019-11-19 10:00:20 -07:00
|
|
|
//TODO: check if we _should_ be emitting stats
|
|
|
|
go ifce.emitStats(config.GetDuration("stats.interval", time.Second*10))
|
|
|
|
|
|
|
|
attachCommands(ssh, hostMap, handshakeManager.pendingHostMap, lightHouse, ifce)
|
|
|
|
|
2019-12-11 18:42:55 -07:00
|
|
|
// Start DNS server last to allow using the nebula IP as lighthouse.dns.host
|
|
|
|
if amLighthouse && serveDns {
|
|
|
|
l.Debugln("Starting dns server")
|
|
|
|
go dnsMain(hostMap, config)
|
|
|
|
}
|
|
|
|
|
2020-09-18 08:20:09 -06:00
|
|
|
return &Control{ifce, l}, nil
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|