mirror of https://github.com/slackhq/nebula.git
Remove Vagrant example (#1129)
This commit is contained in:
parent
7e7d5e00ca
commit
f7db0eb5cc
|
@ -1,138 +0,0 @@
|
|||
# Quickstart Guide
|
||||
|
||||
This guide is intended to bring up a vagrant environment with 1 lighthouse and 2 generic hosts running nebula.
|
||||
|
||||
## Creating the virtualenv for ansible
|
||||
|
||||
Within the `quickstart/` directory, do the following
|
||||
|
||||
```
|
||||
# make a virtual environment
|
||||
virtualenv venv
|
||||
|
||||
# get into the virtualenv
|
||||
source venv/bin/activate
|
||||
|
||||
# install ansible
|
||||
pip install -r requirements.yml
|
||||
```
|
||||
|
||||
## Bringing up the vagrant environment
|
||||
|
||||
A plugin that is used for the Vagrant environment is `vagrant-hostmanager`
|
||||
|
||||
To install, run
|
||||
|
||||
```
|
||||
vagrant plugin install vagrant-hostmanager
|
||||
```
|
||||
|
||||
All hosts within the Vagrantfile are brought up with
|
||||
|
||||
`vagrant up`
|
||||
|
||||
Once the boxes are up, go into the `ansible/` directory and deploy the playbook by running
|
||||
|
||||
`ansible-playbook playbook.yml -i inventory -u vagrant`
|
||||
|
||||
## Testing within the vagrant env
|
||||
|
||||
Once the ansible run is done, hop onto a vagrant box
|
||||
|
||||
`vagrant ssh generic1.vagrant`
|
||||
|
||||
or specifically
|
||||
|
||||
`ssh vagrant@<ip-address-in-vagrant-file` (password for the vagrant user on the boxes is `vagrant`)
|
||||
|
||||
Some quick tests once the vagrant boxes are up are to ping from `generic1.vagrant` to `generic2.vagrant` using
|
||||
their respective nebula ip address.
|
||||
|
||||
```
|
||||
vagrant@generic1:~$ ping 10.168.91.220
|
||||
PING 10.168.91.220 (10.168.91.220) 56(84) bytes of data.
|
||||
64 bytes from 10.168.91.220: icmp_seq=1 ttl=64 time=241 ms
|
||||
64 bytes from 10.168.91.220: icmp_seq=2 ttl=64 time=0.704 ms
|
||||
```
|
||||
|
||||
You can further verify that the allowed nebula firewall rules work by ssh'ing from 1 generic box to the other.
|
||||
|
||||
`ssh vagrant@<nebula-ip-address>` (password for the vagrant user on the boxes is `vagrant`)
|
||||
|
||||
See `/etc/nebula/config.yml` on a box for firewall rules.
|
||||
|
||||
To see full handshakes and hostmaps, change the logging config of `/etc/nebula/config.yml` on the vagrant boxes from
|
||||
info to debug.
|
||||
|
||||
You can watch nebula logs by running
|
||||
|
||||
```
|
||||
sudo journalctl -fu nebula
|
||||
```
|
||||
|
||||
Refer to the nebula src code directory's README for further instructions on configuring nebula.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Is nebula up and running?
|
||||
|
||||
Run and verify that
|
||||
|
||||
```
|
||||
ifconfig
|
||||
```
|
||||
|
||||
shows you an interface with the name `nebula1` being up.
|
||||
|
||||
```
|
||||
vagrant@generic1:~$ ifconfig nebula1
|
||||
nebula1: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST> mtu 1300
|
||||
inet 10.168.91.210 netmask 255.128.0.0 destination 10.168.91.210
|
||||
inet6 fe80::aeaf:b105:e6dc:936c prefixlen 64 scopeid 0x20<link>
|
||||
unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC)
|
||||
RX packets 2 bytes 168 (168.0 B)
|
||||
RX errors 0 dropped 0 overruns 0 frame 0
|
||||
TX packets 11 bytes 600 (600.0 B)
|
||||
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
|
||||
```
|
||||
|
||||
### Connectivity
|
||||
|
||||
Are you able to ping other boxes on the private nebula network?
|
||||
|
||||
The following are the private nebula ip addresses of the vagrant env
|
||||
|
||||
```
|
||||
generic1.vagrant [nebula_ip] 10.168.91.210
|
||||
generic2.vagrant [nebula_ip] 10.168.91.220
|
||||
lighthouse1.vagrant [nebula_ip] 10.168.91.230
|
||||
```
|
||||
|
||||
Try pinging generic1.vagrant to and from any other box using its nebula ip above.
|
||||
|
||||
Double check the nebula firewall rules under /etc/nebula/config.yml to make sure that connectivity is allowed for your use-case if on a specific port.
|
||||
|
||||
```
|
||||
vagrant@lighthouse1:~$ grep -A21 firewall /etc/nebula/config.yml
|
||||
firewall:
|
||||
conntrack:
|
||||
tcp_timeout: 12m
|
||||
udp_timeout: 3m
|
||||
default_timeout: 10m
|
||||
|
||||
inbound:
|
||||
- proto: icmp
|
||||
port: any
|
||||
host: any
|
||||
- proto: any
|
||||
port: 22
|
||||
host: any
|
||||
- proto: any
|
||||
port: 53
|
||||
host: any
|
||||
|
||||
outbound:
|
||||
- proto: any
|
||||
port: any
|
||||
host: any
|
||||
```
|
|
@ -1,40 +0,0 @@
|
|||
Vagrant.require_version ">= 2.2.6"
|
||||
|
||||
nodes = [
|
||||
{ :hostname => 'generic1.vagrant', :ip => '172.11.91.210', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||
{ :hostname => 'generic2.vagrant', :ip => '172.11.91.220', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||
{ :hostname => 'lighthouse1.vagrant', :ip => '172.11.91.230', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||
]
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
|
||||
config.ssh.insert_key = false
|
||||
|
||||
if Vagrant.has_plugin?('vagrant-cachier')
|
||||
config.cache.enable :apt
|
||||
else
|
||||
printf("** Install vagrant-cachier plugin to speedup deploy: `vagrant plugin install vagrant-cachier`.**\n")
|
||||
end
|
||||
|
||||
if Vagrant.has_plugin?('vagrant-hostmanager')
|
||||
config.hostmanager.enabled = true
|
||||
config.hostmanager.manage_host = true
|
||||
config.hostmanager.include_offline = true
|
||||
else
|
||||
config.vagrant.plugins = "vagrant-hostmanager"
|
||||
end
|
||||
|
||||
nodes.each do |node|
|
||||
config.vm.define node[:hostname] do |node_config|
|
||||
node_config.vm.box = node[:box]
|
||||
node_config.vm.hostname = node[:hostname]
|
||||
node_config.vm.network :private_network, ip: node[:ip]
|
||||
node_config.vm.provider :virtualbox do |vb|
|
||||
vb.memory = node[:ram]
|
||||
vb.cpus = node[:cpus]
|
||||
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
|
||||
vb.customize ['guestproperty', 'set', :id, '/VirtualBox/GuestAdd/VBoxService/--timesync-set-threshold', 10000]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,4 +0,0 @@
|
|||
[defaults]
|
||||
host_key_checking = False
|
||||
private_key_file = ~/.vagrant.d/insecure_private_key
|
||||
become = yes
|
|
@ -1,21 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'to_nebula_ip': self.to_nebula_ip,
|
||||
'map_to_nebula_ips': self.map_to_nebula_ips,
|
||||
}
|
||||
|
||||
def to_nebula_ip(self, ip_str):
|
||||
ip_list = list(map(int, ip_str.split(".")))
|
||||
ip_list[0] = 10
|
||||
ip_list[1] = 168
|
||||
ip = '.'.join(map(str, ip_list))
|
||||
return ip
|
||||
|
||||
def map_to_nebula_ips(self, ip_strs):
|
||||
ip_list = [ self.to_nebula_ip(ip_str) for ip_str in ip_strs ]
|
||||
ips = ', '.join(ip_list)
|
||||
return ips
|
|
@ -1,11 +0,0 @@
|
|||
[all]
|
||||
generic1.vagrant
|
||||
generic2.vagrant
|
||||
lighthouse1.vagrant
|
||||
|
||||
[generic]
|
||||
generic1.vagrant
|
||||
generic2.vagrant
|
||||
|
||||
[lighthouse]
|
||||
lighthouse1.vagrant
|
|
@ -1,23 +0,0 @@
|
|||
---
|
||||
- name: test connection to vagrant boxes
|
||||
hosts: all
|
||||
tasks:
|
||||
- debug: msg=ok
|
||||
|
||||
- name: build nebula binaries locally
|
||||
connection: local
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- command: chdir=../../../ make build/linux-amd64/"{{ item }}"
|
||||
with_items:
|
||||
- nebula
|
||||
- nebula-cert
|
||||
tags:
|
||||
- build-nebula
|
||||
|
||||
- name: install nebula on all vagrant hosts
|
||||
hosts: all
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
roles:
|
||||
- nebula
|
|
@ -1,3 +0,0 @@
|
|||
---
|
||||
# defaults file for nebula
|
||||
nebula_config_directory: "/etc/nebula/"
|
|
@ -1,14 +0,0 @@
|
|||
[Unit]
|
||||
Description=Nebula overlay networking tool
|
||||
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||
After=basic.target network.target network-online.target
|
||||
Before=sshd.service
|
||||
|
||||
[Service]
|
||||
SyslogIdentifier=nebula
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,5 +0,0 @@
|
|||
-----BEGIN NEBULA CERTIFICATE-----
|
||||
CkAKDm5lYnVsYSB0ZXN0IENBKNXC1NYFMNXIhO0GOiCmVYeZ9tkB4WEnawmkrca+
|
||||
hsAg9otUFhpAowZeJ33KVEABEkAORybHQUUyVFbKYzw0JHfVzAQOHA4kwB1yP9IV
|
||||
KpiTw9+ADz+wA+R5tn9B+L8+7+Apc+9dem4BQULjA5mRaoYN
|
||||
-----END NEBULA CERTIFICATE-----
|
|
@ -1,4 +0,0 @@
|
|||
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||
FEXZKMSmg8CgIODR0ymUeNT3nbnVpMi7nD79UgkCRHWmVYeZ9tkB4WEnawmkrca+
|
||||
hsAg9otUFhpAowZeJ33KVA==
|
||||
-----END NEBULA ED25519 PRIVATE KEY-----
|
|
@ -1,5 +0,0 @@
|
|||
---
|
||||
# handlers file for nebula
|
||||
|
||||
- name: restart nebula
|
||||
service: name=nebula state=restarted
|
|
@ -1,62 +0,0 @@
|
|||
---
|
||||
# tasks file for nebula
|
||||
|
||||
- name: get the vagrant network interface and set fact
|
||||
set_fact:
|
||||
vagrant_ifce: "ansible_{{ ansible_interfaces | difference(['lo',ansible_default_ipv4.alias]) | sort | first }}"
|
||||
tags:
|
||||
- nebula-conf
|
||||
|
||||
- name: install built nebula binary
|
||||
copy: src="../../../../../build/linux-amd64/{{ item }}" dest="/usr/local/bin" mode=0755
|
||||
with_items:
|
||||
- nebula
|
||||
- nebula-cert
|
||||
|
||||
- name: create nebula config directory
|
||||
file: path="{{ nebula_config_directory }}" state=directory mode=0755
|
||||
|
||||
- name: temporarily copy over root.crt and root.key to sign
|
||||
copy: src={{ item }} dest=/opt/{{ item }}
|
||||
with_items:
|
||||
- vagrant-test-ca.key
|
||||
- vagrant-test-ca.crt
|
||||
|
||||
- name: remove previously signed host certificate
|
||||
file: dest=/etc/nebula/{{ item }} state=absent
|
||||
with_items:
|
||||
- host.crt
|
||||
- host.key
|
||||
|
||||
- name: sign using the root key
|
||||
command: nebula-cert sign -ca-crt /opt/vagrant-test-ca.crt -ca-key /opt/vagrant-test-ca.key -duration 4320h -groups vagrant -ip {{ hostvars[inventory_hostname][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}/9 -name {{ ansible_hostname }}.nebula -out-crt /etc/nebula/host.crt -out-key /etc/nebula/host.key
|
||||
|
||||
- name: remove root.key used to sign
|
||||
file: dest=/opt/{{ item }} state=absent
|
||||
with_items:
|
||||
- vagrant-test-ca.key
|
||||
|
||||
- name: write the content of the trusted ca certificate
|
||||
copy: src="vagrant-test-ca.crt" dest="/etc/nebula/vagrant-test-ca.crt"
|
||||
notify: restart nebula
|
||||
|
||||
- name: Create config directory
|
||||
file: path="{{ nebula_config_directory }}" owner=root group=root mode=0755 state=directory
|
||||
|
||||
- name: nebula config
|
||||
template: src=config.yml.j2 dest="/etc/nebula/config.yml" mode=0644 owner=root group=root
|
||||
notify: restart nebula
|
||||
tags:
|
||||
- nebula-conf
|
||||
|
||||
- name: nebula systemd
|
||||
copy: src=systemd.nebula.service dest="/etc/systemd/system/nebula.service" mode=0644 owner=root group=root
|
||||
register: addconf
|
||||
notify: restart nebula
|
||||
|
||||
- name: maybe reload systemd
|
||||
shell: systemctl daemon-reload
|
||||
when: addconf.changed
|
||||
|
||||
- name: nebula running
|
||||
service: name="nebula" state=started enabled=yes
|
|
@ -1,85 +0,0 @@
|
|||
pki:
|
||||
ca: /etc/nebula/vagrant-test-ca.crt
|
||||
cert: /etc/nebula/host.crt
|
||||
key: /etc/nebula/host.key
|
||||
|
||||
# Port Nebula will be listening on
|
||||
listen:
|
||||
host: 0.0.0.0
|
||||
port: 4242
|
||||
|
||||
# sshd can expose informational and administrative functions via ssh
|
||||
sshd:
|
||||
# Toggles the feature
|
||||
enabled: true
|
||||
# Host and port to listen on
|
||||
listen: 127.0.0.1:2222
|
||||
# A file containing the ssh host private key to use
|
||||
host_key: /etc/ssh/ssh_host_ed25519_key
|
||||
# A file containing a list of authorized public keys
|
||||
authorized_users:
|
||||
{% for user in nebula_users %}
|
||||
- user: {{ user.name }}
|
||||
keys:
|
||||
{% for key in user.ssh_auth_keys %}
|
||||
- "{{ key }}"
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
local_range: 10.168.0.0/16
|
||||
|
||||
static_host_map:
|
||||
# lighthouse
|
||||
{{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}: ["{{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address']}}:4242"]
|
||||
|
||||
default_route: "0.0.0.0"
|
||||
|
||||
lighthouse:
|
||||
{% if 'lighthouse' in group_names %}
|
||||
am_lighthouse: true
|
||||
serve_dns: true
|
||||
{% else %}
|
||||
am_lighthouse: false
|
||||
{% endif %}
|
||||
interval: 60
|
||||
{% if 'generic' in group_names %}
|
||||
hosts:
|
||||
- {{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}
|
||||
{% endif %}
|
||||
|
||||
# Configure the private interface
|
||||
tun:
|
||||
dev: nebula1
|
||||
# Sets MTU of the tun dev.
|
||||
# MTU of the tun must be smaller than the MTU of the eth0 interface
|
||||
mtu: 1300
|
||||
|
||||
# TODO
|
||||
# Configure logging level
|
||||
logging:
|
||||
level: info
|
||||
format: json
|
||||
|
||||
firewall:
|
||||
conntrack:
|
||||
tcp_timeout: 12m
|
||||
udp_timeout: 3m
|
||||
default_timeout: 10m
|
||||
|
||||
inbound:
|
||||
- proto: icmp
|
||||
port: any
|
||||
host: any
|
||||
- proto: any
|
||||
port: 22
|
||||
host: any
|
||||
{% if "lighthouse" in groups %}
|
||||
- proto: any
|
||||
port: 53
|
||||
host: any
|
||||
{% endif %}
|
||||
|
||||
outbound:
|
||||
- proto: any
|
||||
port: any
|
||||
host: any
|
|
@ -1,7 +0,0 @@
|
|||
---
|
||||
# vars file for nebula
|
||||
|
||||
nebula_users:
|
||||
- name: user1
|
||||
ssh_auth_keys:
|
||||
- "ed25519 place-your-ssh-public-key-here"
|
|
@ -1 +0,0 @@
|
|||
ansible
|
Loading…
Reference in New Issue