FEATURE: user space wireguard

Add the possibility to use a user space implementation of wireguard. Specifically, the rust implementation boringtun.
This commit is contained in:
leonnicolas 2020-12-29 10:48:30 +01:00
parent 2d12d9ef81
commit e30cff5293
No known key found for this signature in database
GPG Key ID: 088D0743E2B65C07
9 changed files with 605 additions and 7 deletions

View File

@ -80,6 +80,7 @@ var (
func Main() error {
backend := flag.String("backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends))
cleanUpIface := flag.Bool("clean-up-interface", false, "Should Kilo delete its interface when it shuts down?")
createIface := flag.Bool("create-interface", true, "Should kilo create an interface on startup?")
cni := flag.Bool("cni", true, "Should Kilo manage the node's CNI configuration?")
cniPath := flag.String("cni-path", mesh.DefaultCNIPath, "Path to CNI config.")
compatibility := flag.String("compatibility", "", fmt.Sprintf("Should Kilo run in compatibility mode? Possible values: %s", availableCompatibilities))
@ -177,7 +178,7 @@ func Main() error {
return fmt.Errorf("backend %v unknown; possible values are: %s", *backend, availableBackends)
}
m, err := mesh.New(b, enc, gr, *hostname, uint32(port), s, *local, *cni, *cniPath, *iface, *cleanUpIface, log.With(logger, "component", "kilo"))
m, err := mesh.New(b, enc, gr, *hostname, uint32(port), s, *local, *cni, *cniPath, *iface, *cleanUpIface, *createIface, log.With(logger, "component", "kilo"))
if err != nil {
return fmt.Errorf("failed to create Kilo mesh: %v", err)
}

View File

@ -27,6 +27,8 @@ Usage of bin/amd64/kg:
Path to CNI config. (default "/etc/cni/net.d/10-kilo.conflist")
-compatibility string
Should Kilo run in compatibility mode? Possible values: flannel
-create-interface
Should kilo create an interface on startup? (default true)
-encapsulate string
When should Kilo encapsulate packets within a location? Possible values: never, crosssubnet, always (default "always")
-hostname string

View File

@ -14,7 +14,7 @@ Kilo will try to infer the location of the node using the [topology.kubernetes.i
Additionally, Kilo supports using a custom topology label by setting the command line flag `--topology-label=<label>`.
If this label is not set, then the [kilo.squat.ai/location](./annotations.md#location) node annotation can be used.
For example, in order to join nodes in Google Cloud and AWS into a single cluster, an administrator could use the following snippet could to annotate all nodes with `GCP` in the name:
For example, in order to join nodes in Google Cloud and AWS into a single cluster, an administrator could use the following snippet to annotate all nodes with `GCP` in the name:
```shell
for node in $(kubectl get nodes | grep -i gcp | awk '{print $1}'); do kubectl annotate node $node kilo.squat.ai/location="gcp"; done

View File

@ -0,0 +1,33 @@
# Userspace WireGuard
It is possible to use a userspace implementation of WireGuard with Kilo.
This can make sense if
* not all nodes in the cluster have WireGuard installed
* no one wants to install the DKMS WireGuard package on these nodes
## Homogeneous Cluster
With a homogeneous cluster (no node has the WireGuard kernel module), you can run a userspace WireGuard implementation as a DaemonSet.
This will create a WireGuard interface and Kilo will configure it.
In order to avoid a race condition, `kg` needs to be passed the `--create-interface=false` flag.
An example configuration for a k3s cluster with [boringtun](https://github.com/cloudflare/boringtun) can be applied with
```shell
kubectl apply -f https://raw.githubusercontent.com/squat/Kilo/master/manifests/kilo-k3s-userspace.yaml
```
__Note:__ even if some nodes have the WireGuard kernel module, this will still use the userspace implementation of WireGuard.
## Heterogeneous Cluster
If you have a heterogeneous cluster (some nodes are missing the WireGuard kernel module) and you wish to use the kernel module, if available, you can apply this configuration to a k3s cluster:
```shell
kubectl apply -f https://raw.githubusercontent.com/squat/Kilo/master/manifests/kilo-k3s-userspace-heterogeneous.yaml
```
This config will apply [nkml](https://github.com/leonnicolas/nkml) as a DaemonSet to label all nodes according to the presence of the WireGuard kernel module.
It will apply two different DaemonSets with Kilo: `kilo` without userspace WireGuard and `kilo-userspace` with boringtun as a sidecar.
Because Kilo is dependant on nkml, it needs to run on the host network and needs a kubeconfig to be able to update the labels.

View File

@ -0,0 +1,349 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
data:
cni-conf.json: |
{
"cniVersion":"0.3.1",
"name":"kilo",
"plugins":[
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"forceAddress":true,
"mtu": 1420,
"ipam":{
"type":"host-local"
}
},
{
"type":"portmap",
"snat":true,
"capabilities":{
"portMappings":true
}
}
]
}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kilo
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kilo
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- get
- patch
- watch
- apiGroups:
- kilo.squat.ai
resources:
- peers
verbs:
- list
- update
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kilo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kilo
subjects:
- kind: ServiceAccount
name: kilo
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
spec:
selector:
matchLabels:
app.kubernetes.io/name: kilo
template:
metadata:
labels:
app.kubernetes.io/name: kilo
spec:
nodeSelector:
nkml.squat.ai/wireguard: "true"
serviceAccountName: kilo
hostNetwork: true
containers:
- name: kilo
image: squat/kilo
args:
- --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME)
- --interface=kilo0
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kilo-dir
mountPath: /var/lib/kilo
- name: kubeconfig
mountPath: /etc/kubernetes/kubeconfig
readOnly: true
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
initContainers:
- name: install-cni
image: squat/kilo
command:
- /bin/sh
- -c
- set -e -x;
cp /opt/cni/bin/* /host/opt/cni/bin/;
TMP_CONF="$CNI_CONF_NAME".tmp;
echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;
rm -f /host/etc/cni/net.d/*;
mv $TMP_CONF /host/etc/cni/net.d/$CNI_CONF_NAME
env:
- name: CNI_CONF_NAME
value: 10-kilo.conflist
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kilo
key: cni-conf.json
volumeMounts:
- name: cni-bin-dir
mountPath: /host/opt/cni/bin
- name: cni-conf-dir
mountPath: /host/etc/cni/net.d
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d
- name: kilo-dir
hostPath:
path: /var/lib/kilo
- name: kubeconfig
hostPath:
# Since kilo runs as a daemonset, it is recommended that you copy the
# k3s.yaml kubeconfig file from the master node to all worker nodes
# with the same path structure.
path: /etc/rancher/k3s/k3s.yaml
- name: lib-modules
hostPath:
path: /lib/modules
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kilo-userspace
namespace: kube-system
labels:
app.kubernetes.io/name: kilo-userspace
spec:
selector:
matchLabels:
app.kubernetes.io/name: kilo-userspace
template:
metadata:
labels:
app.kubernetes.io/name: kilo-userspace
spec:
nodeSelector:
nkml.squat.ai/wireguard: "false"
serviceAccountName: kilo
hostNetwork: true
containers:
- name: kilo
image: squat/kilo
args:
- --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME)
- --create-interface=false
- --interface=kilo0
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kilo-dir
mountPath: /var/lib/kilo
- name: kubeconfig
mountPath: /etc/kubernetes/kubeconfig
readOnly: true
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
- name: wireguard
mountPath: /var/run/wireguard
readOnly: false
- name: boringtun
image: leonnicolas/boringtun
args:
- --disable-drop-privileges=true
- --foreground
- kilo0
securityContext:
privileged: true
volumeMounts:
- name: wireguard
mountPath: /var/run/wireguard
readOnly: false
initContainers:
- name: install-cni
image: squat/kilo
command:
- /bin/sh
- -c
- set -e -x;
cp /opt/cni/bin/* /host/opt/cni/bin/;
TMP_CONF="$CNI_CONF_NAME".tmp;
echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;
rm -f /host/etc/cni/net.d/*;
mv $TMP_CONF /host/etc/cni/net.d/$CNI_CONF_NAME
env:
- name: CNI_CONF_NAME
value: 10-kilo.conflist
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kilo
key: cni-conf.json
volumeMounts:
- name: cni-bin-dir
mountPath: /host/opt/cni/bin
- name: cni-conf-dir
mountPath: /host/etc/cni/net.d
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d
- name: kilo-dir
hostPath:
path: /var/lib/kilo
- name: kubeconfig
hostPath:
# Since kilo runs as a daemonset, it is recommended that you copy the
# k3s.yaml kubeconfig file from the master node to all worker nodes
# with the same path structure.
path: /etc/rancher/k3s/k3s.yaml
- name: lib-modules
hostPath:
path: /lib/modules
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: wireguard
hostPath:
path: /var/run/wireguard
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: nkml
namespace: kube-system
labels:
app.kubernetes.io/name: nkml
spec:
selector:
matchLabels:
app.kubernetes.io/name: nkml
template:
metadata:
labels:
app.kubernetes.io/name: nkml
spec:
hostNetwork: true
containers:
- name: nkml
image: leonnicolas/nkml
args:
- --hostname=$(NODE_NAME)
- --label-mod=wireguard
- --kubeconfig=/etc/kubernetes/kubeconfig
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
ports:
- name: http
containerPort: 8080
volumeMounts:
- name: kubeconfig
mountPath: /etc/kubernetes/kubeconfig
readOnly: true
volumes:
- name: kubeconfig
hostPath:
# since the above DaemonSets are dependant on the labels
# and nkml would need a cni to start
# it needs run on the hostnetwork and use the kubeconfig
# to label the nodes
path: /etc/rancher/k3s/k3s.yaml

View File

@ -0,0 +1,199 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
data:
cni-conf.json: |
{
"cniVersion":"0.3.1",
"name":"kilo",
"plugins":[
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"forceAddress":true,
"mtu": 1420,
"ipam":{
"type":"host-local"
}
},
{
"type":"portmap",
"snat":true,
"capabilities":{
"portMappings":true
}
}
]
}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kilo
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kilo
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- patch
- watch
- apiGroups:
- kilo.squat.ai
resources:
- peers
verbs:
- list
- update
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kilo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kilo
subjects:
- kind: ServiceAccount
name: kilo
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
spec:
selector:
matchLabels:
app.kubernetes.io/name: kilo
template:
metadata:
labels:
app.kubernetes.io/name: kilo
spec:
serviceAccountName: kilo
hostNetwork: true
containers:
- name: kilo
image: squat/kilo
args:
- --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME)
- --create-interface=false
- --interface=kilo0
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kilo-dir
mountPath: /var/lib/kilo
- name: kubeconfig
mountPath: /etc/kubernetes/kubeconfig
readOnly: true
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
- name: wireguard
mountPath: /var/run/wireguard
readOnly: false
- name: boringtun
image: leonnicolas/boringtun
args:
- --disable-drop-privileges=true
- --foreground
- kilo0
securityContext:
privileged: true
volumeMounts:
- name: wireguard
mountPath: /var/run/wireguard
readOnly: false
initContainers:
- name: install-cni
image: squat/kilo
command:
- /bin/sh
- -c
- set -e -x;
cp /opt/cni/bin/* /host/opt/cni/bin/;
TMP_CONF="$CNI_CONF_NAME".tmp;
echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;
rm -f /host/etc/cni/net.d/*;
mv $TMP_CONF /host/etc/cni/net.d/$CNI_CONF_NAME
env:
- name: CNI_CONF_NAME
value: 10-kilo.conflist
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kilo
key: cni-conf.json
volumeMounts:
- name: cni-bin-dir
mountPath: /host/opt/cni/bin
- name: cni-conf-dir
mountPath: /host/etc/cni/net.d
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d
- name: kilo-dir
hostPath:
path: /var/lib/kilo
- name: kubeconfig
hostPath:
# Since kilo runs as a daemonset, it is recommended that you copy the
# k3s.yaml kubeconfig file from the master node to all worker nodes
# with the same path structure.
path: /etc/rancher/k3s/k3s.yaml
- name: lib-modules
hostPath:
path: /lib/modules
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: wireguard
hostPath:
path: /var/run/wireguard

View File

@ -85,7 +85,7 @@ type Mesh struct {
}
// New returns a new Mesh instance.
func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularity, hostname string, port uint32, subnet *net.IPNet, local, cni bool, cniPath, iface string, cleanUpIface bool, logger log.Logger) (*Mesh, error) {
func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularity, hostname string, port uint32, subnet *net.IPNet, local, cni bool, cniPath, iface string, cleanUpIface bool, createIface bool, logger log.Logger) (*Mesh, error) {
if err := os.MkdirAll(kiloPath, 0700); err != nil {
return nil, fmt.Errorf("failed to create directory to store configuration: %v", err)
}
@ -117,9 +117,18 @@ func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularit
return nil, fmt.Errorf("failed to find interface for private IP: %v", err)
}
privIface := ifaces[0].Index
kiloIface, _, err := wireguard.New(iface)
if err != nil {
return nil, fmt.Errorf("failed to create WireGuard interface: %v", err)
var kiloIface int
if createIface {
kiloIface, _, err = wireguard.New(iface)
if err != nil {
return nil, fmt.Errorf("failed to create WireGuard interface: %v", err)
}
} else {
link, err := netlink.LinkByName(iface)
if err != nil {
return nil, fmt.Errorf("failed to get interface index: %v", err)
}
kiloIface = link.Attrs().Index
}
if enc.Strategy() != encapsulation.Never {
if err := enc.Init(privIface); err != nil {

View File

@ -0,0 +1,5 @@
---
id: userspace-wireguard
title: Userspace WireGuard
hide_title: true
---

View File

@ -7,7 +7,7 @@ module.exports = {
{
type: 'category',
label: 'Guides',
items: ['topology', 'vpn', 'vpn-server', 'multi-cluster-services', 'network-policies'],
items: ['topology', 'vpn', 'vpn-server', 'multi-cluster-services', 'network-policies', 'userspace-wireguard'],
},
{
type: 'category',