2019-11-19 10:00:20 -07:00
|
|
|
package nebula
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/binary"
|
2020-06-30 16:53:30 -06:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"time"
|
2019-11-19 10:00:20 -07:00
|
|
|
|
|
|
|
"github.com/flynn/noise"
|
|
|
|
"github.com/sirupsen/logrus"
|
|
|
|
"github.com/slackhq/nebula/cert"
|
2021-11-03 19:54:04 -06:00
|
|
|
"github.com/slackhq/nebula/firewall"
|
|
|
|
"github.com/slackhq/nebula/header"
|
|
|
|
"github.com/slackhq/nebula/iputil"
|
|
|
|
"github.com/slackhq/nebula/udp"
|
2019-11-19 10:00:20 -07:00
|
|
|
"golang.org/x/net/ipv4"
|
2022-04-18 10:12:25 -06:00
|
|
|
"google.golang.org/protobuf/proto"
|
2019-11-19 10:00:20 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
minFwPacketLen = 4
|
|
|
|
)
|
|
|
|
|
2023-04-07 12:28:37 -06:00
|
|
|
func readOutsidePackets(f *Interface) udp.EncReader {
|
|
|
|
return func(
|
|
|
|
addr *udp.Addr,
|
|
|
|
out []byte,
|
|
|
|
packet []byte,
|
|
|
|
header *header.H,
|
|
|
|
fwPacket *firewall.Packet,
|
|
|
|
lhh udp.LightHouseHandlerFunc,
|
|
|
|
nb []byte,
|
|
|
|
q int,
|
|
|
|
localCache firewall.ConntrackCache,
|
|
|
|
) {
|
|
|
|
f.readOutsidePackets(addr, nil, out, packet, header, fwPacket, lhh, nb, q, localCache)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Interface) readOutsidePackets(addr *udp.Addr, via *ViaSender, out []byte, packet []byte, h *header.H, fwPacket *firewall.Packet, lhf udp.LightHouseHandlerFunc, nb []byte, q int, localCache firewall.ConntrackCache) {
|
2021-11-03 19:54:04 -06:00
|
|
|
err := h.Parse(packet)
|
2019-11-19 10:00:20 -07:00
|
|
|
if err != nil {
|
|
|
|
// TODO: best if we return this and let caller log
|
|
|
|
// TODO: Might be better to send the literal []byte("holepunch") packet and ignore that?
|
|
|
|
// Hole punch packets are 0 or 1 byte big, so lets ignore printing those errors
|
|
|
|
if len(packet) > 1 {
|
2021-03-26 08:46:30 -06:00
|
|
|
f.l.WithField("packet", packet).Infof("Error while parsing inbound packet from %s: %s", addr, err)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
//l.Error("in packet ", header, packet[HeaderLen:])
|
2022-09-19 11:47:48 -06:00
|
|
|
if addr != nil {
|
|
|
|
if ip4 := addr.IP.To4(); ip4 != nil {
|
|
|
|
if ipMaskContains(f.lightHouse.myVpnIp, f.lightHouse.myVpnZeros, iputil.VpnIp(binary.BigEndian.Uint32(ip4))) {
|
|
|
|
if f.l.Level >= logrus.DebugLevel {
|
|
|
|
f.l.WithField("udpAddr", addr).Debug("Refusing to process double encrypted packet")
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2022-06-21 12:35:23 -06:00
|
|
|
var hostinfo *HostInfo
|
2019-11-19 10:00:20 -07:00
|
|
|
// verify if we've seen this index before, otherwise respond to the handshake initiation
|
2022-06-21 12:35:23 -06:00
|
|
|
if h.Type == header.Message && h.Subtype == header.MessageRelay {
|
2023-07-24 11:37:52 -06:00
|
|
|
hostinfo = f.hostMap.QueryRelayIndex(h.RemoteIndex)
|
2022-06-21 12:35:23 -06:00
|
|
|
} else {
|
2023-07-24 11:37:52 -06:00
|
|
|
hostinfo = f.hostMap.QueryIndex(h.RemoteIndex)
|
2022-06-21 12:35:23 -06:00
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
|
|
|
|
var ci *ConnectionState
|
2022-06-21 12:35:23 -06:00
|
|
|
if hostinfo != nil {
|
2019-11-19 10:00:20 -07:00
|
|
|
ci = hostinfo.ConnectionState
|
|
|
|
}
|
|
|
|
|
2021-11-03 19:54:04 -06:00
|
|
|
switch h.Type {
|
|
|
|
case header.Message:
|
2022-06-21 12:35:23 -06:00
|
|
|
// TODO handleEncrypted sends directly to addr on error. Handle this in the tunneling case.
|
2021-11-03 19:54:04 -06:00
|
|
|
if !f.handleEncrypted(ci, addr, h) {
|
2019-11-19 10:00:20 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-06-21 12:35:23 -06:00
|
|
|
switch h.Subtype {
|
|
|
|
case header.MessageNone:
|
2023-05-04 14:16:37 -06:00
|
|
|
if !f.decryptToTun(hostinfo, h.MessageCounter, out, packet, fwPacket, nb, q, localCache) {
|
|
|
|
return
|
|
|
|
}
|
2022-06-21 12:35:23 -06:00
|
|
|
case header.MessageRelay:
|
|
|
|
// The entire body is sent as AD, not encrypted.
|
|
|
|
// The packet consists of a 16-byte parsed Nebula header, Associated Data-protected payload, and a trailing 16-byte AEAD signature value.
|
|
|
|
// The packet is guaranteed to be at least 16 bytes at this point, b/c it got past the h.Parse() call above. If it's
|
|
|
|
// otherwise malformed (meaning, there is no trailing 16 byte AEAD value), then this will result in at worst a 0-length slice
|
|
|
|
// which will gracefully fail in the DecryptDanger call.
|
|
|
|
signedPayload := packet[:len(packet)-hostinfo.ConnectionState.dKey.Overhead()]
|
|
|
|
signatureValue := packet[len(packet)-hostinfo.ConnectionState.dKey.Overhead():]
|
|
|
|
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, signedPayload, signatureValue, h.MessageCounter, nb)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Successfully validated the thing. Get rid of the Relay header.
|
|
|
|
signedPayload = signedPayload[header.Len:]
|
|
|
|
// Pull the Roaming parts up here, and return in all call paths.
|
|
|
|
f.handleHostRoaming(hostinfo, addr)
|
2023-05-04 14:16:37 -06:00
|
|
|
// Track usage of both the HostInfo and the Relay for the received & authenticated packet
|
2023-02-13 13:41:05 -07:00
|
|
|
f.connectionManager.In(hostinfo.localIndexId)
|
2023-05-04 14:16:37 -06:00
|
|
|
f.connectionManager.RelayUsed(h.RemoteIndex)
|
2022-06-21 12:35:23 -06:00
|
|
|
|
|
|
|
relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex)
|
|
|
|
if !ok {
|
|
|
|
// The only way this happens is if hostmap has an index to the correct HostInfo, but the HostInfo is missing
|
2023-03-30 10:09:20 -06:00
|
|
|
// its internal mapping. This should never happen.
|
|
|
|
hostinfo.logger(f.l).WithFields(logrus.Fields{"vpnIp": hostinfo.vpnIp, "remoteIndex": h.RemoteIndex}).Error("HostInfo missing remote relay index")
|
2022-06-21 12:35:23 -06:00
|
|
|
return
|
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2022-06-21 12:35:23 -06:00
|
|
|
switch relay.Type {
|
|
|
|
case TerminalType:
|
|
|
|
// If I am the target of this relay, process the unwrapped packet
|
|
|
|
// From this recursive point, all these variables are 'burned'. We shouldn't rely on them again.
|
|
|
|
f.readOutsidePackets(nil, &ViaSender{relayHI: hostinfo, remoteIdx: relay.RemoteIndex, relay: relay}, out[:0], signedPayload, h, fwPacket, lhf, nb, q, localCache)
|
|
|
|
return
|
|
|
|
case ForwardingType:
|
|
|
|
// Find the target HostInfo relay object
|
2023-05-04 14:16:37 -06:00
|
|
|
targetHI, targetRelay, err := f.hostMap.QueryVpnIpRelayFor(hostinfo.vpnIp, relay.PeerIp)
|
2022-06-21 12:35:23 -06:00
|
|
|
if err != nil {
|
2023-03-30 14:07:31 -06:00
|
|
|
hostinfo.logger(f.l).WithField("relayTo", relay.PeerIp).WithError(err).Info("Failed to find target host info by ip")
|
2022-06-21 12:35:23 -06:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If that relay is Established, forward the payload through it
|
|
|
|
if targetRelay.State == Established {
|
|
|
|
switch targetRelay.Type {
|
|
|
|
case ForwardingType:
|
|
|
|
// Forward this packet through the relay tunnel
|
|
|
|
// Find the target HostInfo
|
|
|
|
f.SendVia(targetHI, targetRelay, signedPayload, nb, out, false)
|
|
|
|
return
|
|
|
|
case TerminalType:
|
|
|
|
hostinfo.logger(f.l).Error("Unexpected Relay Type of Terminal")
|
|
|
|
}
|
|
|
|
} else {
|
2023-03-30 14:07:31 -06:00
|
|
|
hostinfo.logger(f.l).WithFields(logrus.Fields{"relayTo": relay.PeerIp, "relayFrom": hostinfo.vpnIp, "targetRelayState": targetRelay.State}).Info("Unexpected target relay state")
|
2022-06-21 12:35:23 -06:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
|
2021-11-03 19:54:04 -06:00
|
|
|
case header.LightHouse:
|
|
|
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
|
|
|
if !f.handleEncrypted(ci, addr, h) {
|
2019-11-19 10:00:20 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-11-03 19:54:04 -06:00
|
|
|
d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb)
|
2019-11-19 10:00:20 -07:00
|
|
|
if err != nil {
|
2021-03-26 08:46:30 -06:00
|
|
|
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", addr).
|
2019-11-19 10:00:20 -07:00
|
|
|
WithField("packet", packet).
|
|
|
|
Error("Failed to decrypt lighthouse packet")
|
|
|
|
|
|
|
|
//TODO: maybe after build 64 is out? 06/14/2018 - NB
|
|
|
|
//f.sendRecvError(net.Addr(addr), header.RemoteIndex)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-04-07 12:28:37 -06:00
|
|
|
lhf(addr, hostinfo.vpnIp, d)
|
2019-11-19 10:00:20 -07:00
|
|
|
|
|
|
|
// Fallthrough to the bottom to record incoming traffic
|
|
|
|
|
2021-11-03 19:54:04 -06:00
|
|
|
case header.Test:
|
|
|
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
|
|
|
if !f.handleEncrypted(ci, addr, h) {
|
2019-11-19 10:00:20 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-11-03 19:54:04 -06:00
|
|
|
d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb)
|
2019-11-19 10:00:20 -07:00
|
|
|
if err != nil {
|
2021-03-26 08:46:30 -06:00
|
|
|
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", addr).
|
2019-11-19 10:00:20 -07:00
|
|
|
WithField("packet", packet).
|
|
|
|
Error("Failed to decrypt test packet")
|
|
|
|
|
|
|
|
//TODO: maybe after build 64 is out? 06/14/2018 - NB
|
|
|
|
//f.sendRecvError(net.Addr(addr), header.RemoteIndex)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-11-03 19:54:04 -06:00
|
|
|
if h.Subtype == header.TestRequest {
|
2019-11-19 10:00:20 -07:00
|
|
|
// This testRequest might be from TryPromoteBest, so we should roam
|
|
|
|
// to the new IP address before responding
|
|
|
|
f.handleHostRoaming(hostinfo, addr)
|
2022-06-21 12:35:23 -06:00
|
|
|
f.send(header.Test, header.TestReply, ci, hostinfo, d, nb, out)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fallthrough to the bottom to record incoming traffic
|
|
|
|
|
|
|
|
// Non encrypted messages below here, they should not fall through to avoid tracking incoming traffic since they
|
|
|
|
// are unauthenticated
|
|
|
|
|
2021-11-03 19:54:04 -06:00
|
|
|
case header.Handshake:
|
|
|
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
2023-11-02 15:53:59 -06:00
|
|
|
f.handshakeManager.HandleIncoming(addr, via, packet, h)
|
2019-11-19 10:00:20 -07:00
|
|
|
return
|
|
|
|
|
2021-11-03 19:54:04 -06:00
|
|
|
case header.RecvError:
|
|
|
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
|
|
|
f.handleRecvError(addr, h)
|
2019-11-19 10:00:20 -07:00
|
|
|
return
|
|
|
|
|
2021-11-03 19:54:04 -06:00
|
|
|
case header.CloseTunnel:
|
|
|
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
|
|
|
if !f.handleEncrypted(ci, addr, h) {
|
2019-11-19 10:00:20 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-03-26 08:46:30 -06:00
|
|
|
hostinfo.logger(f.l).WithField("udpAddr", addr).
|
2019-11-19 10:00:20 -07:00
|
|
|
Info("Close tunnel received, tearing down.")
|
|
|
|
|
2022-06-21 12:35:23 -06:00
|
|
|
f.closeTunnel(hostinfo)
|
2019-11-19 10:00:20 -07:00
|
|
|
return
|
|
|
|
|
2022-06-21 12:35:23 -06:00
|
|
|
case header.Control:
|
|
|
|
if !f.handleEncrypted(ci, addr, h) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb)
|
|
|
|
if err != nil {
|
|
|
|
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", addr).
|
|
|
|
WithField("packet", packet).
|
|
|
|
Error("Failed to decrypt Control packet")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
m := &NebulaControl{}
|
|
|
|
err = m.Unmarshal(d)
|
|
|
|
if err != nil {
|
|
|
|
hostinfo.logger(f.l).WithError(err).Error("Failed to unmarshal control message")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
f.relayManager.HandleControlMsg(hostinfo, m, f)
|
|
|
|
|
2019-11-19 10:00:20 -07:00
|
|
|
default:
|
2021-11-03 19:54:04 -06:00
|
|
|
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
2021-03-26 08:46:30 -06:00
|
|
|
hostinfo.logger(f.l).Debugf("Unexpected packet received from %s", addr)
|
2019-11-19 10:00:20 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
f.handleHostRoaming(hostinfo, addr)
|
|
|
|
|
2023-02-13 13:41:05 -07:00
|
|
|
f.connectionManager.In(hostinfo.localIndexId)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2021-04-14 12:50:09 -06:00
|
|
|
// closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote
|
2022-06-21 12:35:23 -06:00
|
|
|
func (f *Interface) closeTunnel(hostInfo *HostInfo) {
|
2023-03-13 11:35:14 -06:00
|
|
|
final := f.hostMap.DeleteHostInfo(hostInfo)
|
|
|
|
if final {
|
|
|
|
// We no longer have any tunnels with this vpn ip, clear learned lighthouse state to lower memory usage
|
|
|
|
f.lightHouse.DeleteVpnIp(hostInfo.vpnIp)
|
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2021-04-14 12:50:09 -06:00
|
|
|
// sendCloseTunnel is a helper function to send a proper close tunnel packet to a remote
|
|
|
|
func (f *Interface) sendCloseTunnel(h *HostInfo) {
|
2022-06-21 12:35:23 -06:00
|
|
|
f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12, 12), make([]byte, mtu))
|
2021-04-14 12:50:09 -06:00
|
|
|
}
|
|
|
|
|
2021-11-03 19:54:04 -06:00
|
|
|
func (f *Interface) handleHostRoaming(hostinfo *HostInfo, addr *udp.Addr) {
|
2022-06-21 12:35:23 -06:00
|
|
|
if addr != nil && !hostinfo.remote.Equals(addr) {
|
2022-03-14 11:35:13 -06:00
|
|
|
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.IP) {
|
2021-03-26 08:46:30 -06:00
|
|
|
hostinfo.logger(f.l).WithField("newAddr", addr).Debug("lighthouse.remote_allow_list denied roaming")
|
Add lighthouse.{remoteAllowList,localAllowList} (#217)
These settings make it possible to blacklist / whitelist IP addresses
that are used for remote connections.
`lighthouse.remoteAllowList` filters which remote IPs are allow when
fetching from the lighthouse (or, if you are the lighthouse, which IPs
you store and forward to querying hosts). By default, any remote IPs are
allowed. You can provide CIDRs here with `true` to allow and `false` to
deny. The most specific CIDR rule applies to each remote. If all rules
are "allow", the default will be "deny", and vice-versa. If both "allow"
and "deny" rules are present, then you MUST set a rule for "0.0.0.0/0"
as the default.
lighthouse:
remoteAllowList:
# Example to block IPs from this subnet from being used for remote IPs.
"172.16.0.0/12": false
# A more complicated example, allow public IPs but only private IPs from a specific subnet
"0.0.0.0/0": true
"10.0.0.0/8": false
"10.42.42.0/24": true
`lighthouse.localAllowList` has the same logic as above, but it applies
to the local addresses we advertise to the lighthouse. Additionally, you
can specify an `interfaces` map of regular expressions to match against
interface names. The regexp must match the entire name. All interface
rules must be either true or false (and the default rule will be the
inverse). CIDR rules are matched after interface name rules.
Default is all local IP addresses.
lighthouse:
localAllowList:
# Example to blacklist docker interfaces.
interfaces:
'docker.*': false
# Example to only advertise IPs in this subnet to the lighthouse.
"10.0.0.0/8": true
2020-04-08 13:36:43 -06:00
|
|
|
return
|
|
|
|
}
|
2021-03-01 09:14:34 -07:00
|
|
|
if !hostinfo.lastRoam.IsZero() && addr.Equals(hostinfo.lastRoamRemote) && time.Since(hostinfo.lastRoam) < RoamingSuppressSeconds*time.Second {
|
2021-03-26 08:46:30 -06:00
|
|
|
if f.l.Level >= logrus.DebugLevel {
|
|
|
|
hostinfo.logger(f.l).WithField("udpAddr", hostinfo.remote).WithField("newAddr", addr).
|
2021-03-01 09:14:34 -07:00
|
|
|
Debugf("Suppressing roam back to previous remote for %d seconds", RoamingSuppressSeconds)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-03-26 08:46:30 -06:00
|
|
|
hostinfo.logger(f.l).WithField("udpAddr", hostinfo.remote).WithField("newAddr", addr).
|
2019-11-19 10:00:20 -07:00
|
|
|
Info("Host roamed to new udp ip/port.")
|
|
|
|
hostinfo.lastRoam = time.Now()
|
2022-06-21 12:35:23 -06:00
|
|
|
hostinfo.lastRoamRemote = hostinfo.remote
|
2021-03-18 19:37:24 -06:00
|
|
|
hostinfo.SetRemote(addr)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2021-11-03 19:54:04 -06:00
|
|
|
func (f *Interface) handleEncrypted(ci *ConnectionState, addr *udp.Addr, h *header.H) bool {
|
2019-11-19 10:00:20 -07:00
|
|
|
// If connectionstate exists and the replay protector allows, process packet
|
|
|
|
// Else, send recv errors for 300 seconds after a restart to allow fast reconnection.
|
2021-11-03 19:54:04 -06:00
|
|
|
if ci == nil || !ci.window.Check(f.l, h.MessageCounter) {
|
2022-06-21 12:35:23 -06:00
|
|
|
if addr != nil {
|
2022-06-27 10:37:54 -06:00
|
|
|
f.maybeSendRecvError(addr, h.RemoteIndex)
|
2022-06-21 12:35:23 -06:00
|
|
|
return false
|
|
|
|
} else {
|
|
|
|
return false
|
|
|
|
}
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// newPacket validates and parses the interesting bits for the firewall out of the ip and sub protocol headers
|
2021-11-03 19:54:04 -06:00
|
|
|
func newPacket(data []byte, incoming bool, fp *firewall.Packet) error {
|
2019-11-19 10:00:20 -07:00
|
|
|
// Do we at least have an ipv4 header worth of data?
|
|
|
|
if len(data) < ipv4.HeaderLen {
|
|
|
|
return fmt.Errorf("packet is less than %v bytes", ipv4.HeaderLen)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Is it an ipv4 packet?
|
|
|
|
if int((data[0]>>4)&0x0f) != 4 {
|
|
|
|
return fmt.Errorf("packet is not ipv4, type: %v", int((data[0]>>4)&0x0f))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adjust our start position based on the advertised ip header length
|
|
|
|
ihl := int(data[0]&0x0f) << 2
|
|
|
|
|
|
|
|
// Well formed ip header length?
|
|
|
|
if ihl < ipv4.HeaderLen {
|
|
|
|
return fmt.Errorf("packet had an invalid header length: %v", ihl)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if this is the second or further fragment of a fragmented packet.
|
|
|
|
flagsfrags := binary.BigEndian.Uint16(data[6:8])
|
|
|
|
fp.Fragment = (flagsfrags & 0x1FFF) != 0
|
|
|
|
|
|
|
|
// Firewall handles protocol checks
|
|
|
|
fp.Protocol = data[9]
|
|
|
|
|
|
|
|
// Accounting for a variable header length, do we have enough data for our src/dst tuples?
|
|
|
|
minLen := ihl
|
2021-11-03 19:54:04 -06:00
|
|
|
if !fp.Fragment && fp.Protocol != firewall.ProtoICMP {
|
2019-11-19 10:00:20 -07:00
|
|
|
minLen += minFwPacketLen
|
|
|
|
}
|
|
|
|
if len(data) < minLen {
|
|
|
|
return fmt.Errorf("packet is less than %v bytes, ip header len: %v", minLen, ihl)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Firewall packets are locally oriented
|
|
|
|
if incoming {
|
2021-11-03 19:54:04 -06:00
|
|
|
fp.RemoteIP = iputil.Ip2VpnIp(data[12:16])
|
|
|
|
fp.LocalIP = iputil.Ip2VpnIp(data[16:20])
|
|
|
|
if fp.Fragment || fp.Protocol == firewall.ProtoICMP {
|
2019-11-19 10:00:20 -07:00
|
|
|
fp.RemotePort = 0
|
|
|
|
fp.LocalPort = 0
|
|
|
|
} else {
|
|
|
|
fp.RemotePort = binary.BigEndian.Uint16(data[ihl : ihl+2])
|
|
|
|
fp.LocalPort = binary.BigEndian.Uint16(data[ihl+2 : ihl+4])
|
|
|
|
}
|
|
|
|
} else {
|
2021-11-03 19:54:04 -06:00
|
|
|
fp.LocalIP = iputil.Ip2VpnIp(data[12:16])
|
|
|
|
fp.RemoteIP = iputil.Ip2VpnIp(data[16:20])
|
|
|
|
if fp.Fragment || fp.Protocol == firewall.ProtoICMP {
|
2019-11-19 10:00:20 -07:00
|
|
|
fp.RemotePort = 0
|
|
|
|
fp.LocalPort = 0
|
|
|
|
} else {
|
|
|
|
fp.LocalPort = binary.BigEndian.Uint16(data[ihl : ihl+2])
|
|
|
|
fp.RemotePort = binary.BigEndian.Uint16(data[ihl+2 : ihl+4])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-11-03 19:54:04 -06:00
|
|
|
func (f *Interface) decrypt(hostinfo *HostInfo, mc uint64, out []byte, packet []byte, h *header.H, nb []byte) ([]byte, error) {
|
2019-11-19 10:00:20 -07:00
|
|
|
var err error
|
2021-11-03 19:54:04 -06:00
|
|
|
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:header.Len], packet[header.Len:], mc, nb)
|
2019-11-19 10:00:20 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-03-26 08:46:30 -06:00
|
|
|
if !hostinfo.ConnectionState.window.Update(f.l, mc) {
|
2021-11-03 19:54:04 -06:00
|
|
|
hostinfo.logger(f.l).WithField("header", h).
|
2019-11-19 10:00:20 -07:00
|
|
|
Debugln("dropping out of window packet")
|
|
|
|
return nil, errors.New("out of window packet")
|
|
|
|
}
|
|
|
|
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2023-05-04 14:16:37 -06:00
|
|
|
func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out []byte, packet []byte, fwPacket *firewall.Packet, nb []byte, q int, localCache firewall.ConntrackCache) bool {
|
2019-11-19 10:00:20 -07:00
|
|
|
var err error
|
|
|
|
|
2021-11-03 19:54:04 -06:00
|
|
|
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:header.Len], packet[header.Len:], messageCounter, nb)
|
2019-11-19 10:00:20 -07:00
|
|
|
if err != nil {
|
2021-03-26 08:46:30 -06:00
|
|
|
hostinfo.logger(f.l).WithError(err).Error("Failed to decrypt packet")
|
2019-11-19 10:00:20 -07:00
|
|
|
//TODO: maybe after build 64 is out? 06/14/2018 - NB
|
|
|
|
//f.sendRecvError(hostinfo.remote, header.RemoteIndex)
|
2023-05-04 14:16:37 -06:00
|
|
|
return false
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
err = newPacket(out, true, fwPacket)
|
|
|
|
if err != nil {
|
2021-03-26 08:46:30 -06:00
|
|
|
hostinfo.logger(f.l).WithError(err).WithField("packet", out).
|
2019-11-19 10:00:20 -07:00
|
|
|
Warnf("Error while validating inbound packet")
|
2023-05-04 14:16:37 -06:00
|
|
|
return false
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2021-03-26 08:46:30 -06:00
|
|
|
if !hostinfo.ConnectionState.window.Update(f.l, messageCounter) {
|
|
|
|
hostinfo.logger(f.l).WithField("fwPacket", fwPacket).
|
2019-11-19 10:00:20 -07:00
|
|
|
Debugln("dropping out of window packet")
|
2023-05-04 14:16:37 -06:00
|
|
|
return false
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2024-04-11 20:44:22 -06:00
|
|
|
dropReason := f.firewall.Drop(*fwPacket, true, hostinfo, f.pki.GetCAPool(), localCache)
|
2020-06-10 15:55:49 -06:00
|
|
|
if dropReason != nil {
|
2023-11-13 11:43:51 -07:00
|
|
|
// NOTE: We give `packet` as the `out` here since we already decrypted from it and we don't need it anymore
|
|
|
|
// This gives us a buffer to build the reject packet in
|
|
|
|
f.rejectOutside(out, hostinfo.ConnectionState, hostinfo, nb, packet, q)
|
2021-03-26 08:46:30 -06:00
|
|
|
if f.l.Level >= logrus.DebugLevel {
|
|
|
|
hostinfo.logger(f.l).WithField("fwPacket", fwPacket).
|
2020-06-10 15:55:49 -06:00
|
|
|
WithField("reason", dropReason).
|
|
|
|
Debugln("dropping inbound packet")
|
|
|
|
}
|
2023-05-04 14:16:37 -06:00
|
|
|
return false
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2023-02-13 13:41:05 -07:00
|
|
|
f.connectionManager.In(hostinfo.localIndexId)
|
2021-02-25 13:01:14 -07:00
|
|
|
_, err = f.readers[q].Write(out)
|
2019-11-19 10:00:20 -07:00
|
|
|
if err != nil {
|
2021-03-26 08:46:30 -06:00
|
|
|
f.l.WithError(err).Error("Failed to write to tun")
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
2023-05-04 14:16:37 -06:00
|
|
|
return true
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
2022-06-27 10:37:54 -06:00
|
|
|
func (f *Interface) maybeSendRecvError(endpoint *udp.Addr, index uint32) {
|
|
|
|
if f.sendRecvErrorConfig.ShouldSendRecvError(endpoint.IP) {
|
|
|
|
f.sendRecvError(endpoint, index)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-03 19:54:04 -06:00
|
|
|
func (f *Interface) sendRecvError(endpoint *udp.Addr, index uint32) {
|
|
|
|
f.messageMetrics.Tx(header.RecvError, 0, 1)
|
2019-11-19 10:00:20 -07:00
|
|
|
|
|
|
|
//TODO: this should be a signed message so we can trust that we should drop the index
|
2021-11-03 19:54:04 -06:00
|
|
|
b := header.Encode(make([]byte, header.Len), header.Version, header.RecvError, 0, index, 0)
|
2019-11-19 10:00:20 -07:00
|
|
|
f.outside.WriteTo(b, endpoint)
|
2021-03-26 08:46:30 -06:00
|
|
|
if f.l.Level >= logrus.DebugLevel {
|
|
|
|
f.l.WithField("index", index).
|
2019-11-19 10:00:20 -07:00
|
|
|
WithField("udpAddr", endpoint).
|
|
|
|
Debug("Recv error sent")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-03 19:54:04 -06:00
|
|
|
func (f *Interface) handleRecvError(addr *udp.Addr, h *header.H) {
|
2021-03-26 08:46:30 -06:00
|
|
|
if f.l.Level >= logrus.DebugLevel {
|
|
|
|
f.l.WithField("index", h.RemoteIndex).
|
2019-11-19 10:00:20 -07:00
|
|
|
WithField("udpAddr", addr).
|
|
|
|
Debug("Recv error received")
|
|
|
|
}
|
|
|
|
|
2023-07-24 11:37:52 -06:00
|
|
|
hostinfo := f.hostMap.QueryReverseIndex(h.RemoteIndex)
|
|
|
|
if hostinfo == nil {
|
|
|
|
f.l.WithField("remoteIndex", h.RemoteIndex).Debugln("Did not find remote index in main hostmap")
|
2019-11-19 10:00:20 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if !hostinfo.RecvErrorExceeded() {
|
|
|
|
return
|
|
|
|
}
|
2023-07-24 11:37:52 -06:00
|
|
|
|
2021-06-03 11:04:04 -06:00
|
|
|
if hostinfo.remote != nil && !hostinfo.remote.Equals(addr) {
|
2021-03-26 08:46:30 -06:00
|
|
|
f.l.Infoln("Someone spoofing recv_errors? ", addr, hostinfo.remote)
|
2019-11-19 10:00:20 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-06-21 12:35:23 -06:00
|
|
|
f.closeTunnel(hostinfo)
|
2023-07-24 11:37:52 -06:00
|
|
|
// We also delete it from pending hostmap to allow for fast reconnect.
|
2020-11-23 12:51:16 -07:00
|
|
|
f.handshakeManager.DeleteHostInfo(hostinfo)
|
2019-11-19 10:00:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
func (f *Interface) sendMeta(ci *ConnectionState, endpoint *net.UDPAddr, meta *NebulaMeta) {
|
|
|
|
if ci.eKey != nil {
|
|
|
|
//TODO: log error?
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
msg, err := proto.Marshal(meta)
|
|
|
|
if err != nil {
|
|
|
|
l.Debugln("failed to encode header")
|
|
|
|
}
|
|
|
|
|
|
|
|
c := ci.messageCounter
|
|
|
|
b := HeaderEncode(nil, Version, uint8(metadata), 0, hostinfo.remoteIndexId, c)
|
|
|
|
ci.messageCounter++
|
|
|
|
|
|
|
|
msg := ci.eKey.EncryptDanger(b, nil, msg, c)
|
|
|
|
//msg := ci.eKey.EncryptDanger(b, nil, []byte(fmt.Sprintf("%d", counter)), c)
|
|
|
|
f.outside.WriteTo(msg, endpoint)
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
|
2021-03-29 11:10:19 -06:00
|
|
|
func RecombineCertAndValidate(h *noise.HandshakeState, rawCertBytes []byte, caPool *cert.NebulaCAPool) (*cert.NebulaCertificate, error) {
|
2019-11-19 10:00:20 -07:00
|
|
|
pk := h.PeerStatic()
|
|
|
|
|
|
|
|
if pk == nil {
|
|
|
|
return nil, errors.New("no peer static key was present")
|
|
|
|
}
|
|
|
|
|
|
|
|
if rawCertBytes == nil {
|
|
|
|
return nil, errors.New("provided payload was empty")
|
|
|
|
}
|
|
|
|
|
|
|
|
r := &cert.RawNebulaCertificate{}
|
|
|
|
err := proto.Unmarshal(rawCertBytes, r)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error unmarshaling cert: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the Details are nil, just exit to avoid crashing
|
|
|
|
if r.Details == nil {
|
|
|
|
return nil, fmt.Errorf("certificate did not contain any details")
|
|
|
|
}
|
|
|
|
|
|
|
|
r.Details.PublicKey = pk
|
|
|
|
recombined, err := proto.Marshal(r)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error while recombining certificate: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
c, _ := cert.UnmarshalNebulaCertificate(recombined)
|
2021-03-29 11:10:19 -06:00
|
|
|
isValid, err := c.Verify(time.Now(), caPool)
|
2019-11-19 10:00:20 -07:00
|
|
|
if err != nil {
|
|
|
|
return c, fmt.Errorf("certificate validation failed: %s", err)
|
|
|
|
} else if !isValid {
|
|
|
|
// This case should never happen but here's to defensive programming!
|
|
|
|
return c, errors.New("certificate validation failed but did not return an error")
|
|
|
|
}
|
|
|
|
|
|
|
|
return c, nil
|
|
|
|
}
|