Compare commits

..

1 Commits

Author SHA1 Message Date
leonnicolas
b749def837
Prepare move to kilo-io
This commit changes all package paths from squat/kilo to kilo-io/kilo
and the docker image name from squat/kilo to kiloio/squat.
The API name and comments regarding the website kilo.squat.ai are
unchanged.

Signed-off-by: leonnicolas <leonloechner@gmx.de>
2021-08-18 14:53:00 +02:00
1593 changed files with 31447 additions and 161136 deletions

View File

@ -1,3 +0,0 @@
**
!/bin/linux

View File

@ -6,25 +6,13 @@ on:
tags: tags:
- "*" - "*"
pull_request: pull_request:
branches: [ main ]
schedule: schedule:
- cron: '0 0 * * *' - cron: '0 0 * * *'
workflow_dispatch: workflow_dispatch:
jobs: jobs:
vendor:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.18
- name: Vendor
run: |
make vendor
git diff --exit-code
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -32,23 +20,10 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.16.5
- name: Build - name: Build
run: make run: make
docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.18
- name: Build docs
run: |
make gen-docs
git diff --exit-code
linux: linux:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -56,7 +31,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.16.5
- name: Build kg and kgctl for all Linux Architectures - name: Build kg and kgctl for all Linux Architectures
run: make all-build run: make all-build
@ -67,7 +42,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.16.5
- name: Build kgctl for Darwin amd64 - name: Build kgctl for Darwin amd64
run: make OS=darwin ARCH=amd64 run: make OS=darwin ARCH=amd64
- name: Build kgctl for Darwin arm64 - name: Build kgctl for Darwin arm64
@ -80,7 +55,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.16.5
- name: Build kgctl for Windows - name: Build kgctl for Windows
run: make OS=windows run: make OS=windows
@ -91,18 +66,19 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.16.5
- name: Run Unit Tests - name: Run Unit Tests
run: make unit run: make unit
e2e: e2e:
if: github.event_name == 'pull_request'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.16.5
- name: Run e2e Tests - name: Run e2e Tests
run: make e2e run: make e2e
@ -113,7 +89,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.16.5
- name: Lint Code - name: Lint Code
run: make lint run: make lint
@ -124,7 +100,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.16.5
- name: Enable Experimental Docker CLI - name: Enable Experimental Docker CLI
run: | run: |
echo $'{\n "experimental": true\n}' | sudo tee /etc/docker/daemon.json echo $'{\n "experimental": true\n}' | sudo tee /etc/docker/daemon.json
@ -140,7 +116,6 @@ jobs:
push: push:
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
needs: needs:
- vendor
- build - build
- linux - linux
- darwin - darwin
@ -154,7 +129,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.16.5
- name: Enable Experimental Docker CLI - name: Enable Experimental Docker CLI
run: | run: |
echo $'{\n "experimental": true\n}' | sudo tee /etc/docker/daemon.json echo $'{\n "experimental": true\n}' | sudo tee /etc/docker/daemon.json

View File

@ -3,15 +3,15 @@ on:
types: [created] types: [created]
name: Handle Release name: Handle Release
jobs: jobs:
kgctl: linux:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: 1.18 go-version: 1.16.5
- name: Build kgctl Binaries to Be Released - name: Make Directory with kgctl Binaries to Be Released
run: make release run: make release
- name: Publish Release - name: Publish Release
uses: skx/github-action-publish-binaries@master uses: skx/github-action-publish-binaries@master

View File

@ -1,7 +1,7 @@
ARG FROM=alpine ARG FROM=alpine
FROM $FROM AS cni FROM $FROM AS cni
ARG GOARCH=amd64 ARG GOARCH=amd64
ARG CNI_PLUGINS_VERSION=v1.1.1 ARG CNI_PLUGINS_VERSION=v0.9.1
RUN apk add --no-cache curl && \ RUN apk add --no-cache curl && \
curl -Lo cni.tar.gz https://github.com/containernetworking/plugins/releases/download/$CNI_PLUGINS_VERSION/cni-plugins-linux-$GOARCH-$CNI_PLUGINS_VERSION.tgz && \ curl -Lo cni.tar.gz https://github.com/containernetworking/plugins/releases/download/$CNI_PLUGINS_VERSION/cni-plugins-linux-$GOARCH-$CNI_PLUGINS_VERSION.tgz && \
tar -xf cni.tar.gz tar -xf cni.tar.gz
@ -11,9 +11,7 @@ ARG GOARCH
ARG ALPINE_VERSION=v3.12 ARG ALPINE_VERSION=v3.12
LABEL maintainer="squat <lserven@gmail.com>" LABEL maintainer="squat <lserven@gmail.com>"
RUN echo -e "https://alpine.global.ssl.fastly.net/alpine/$ALPINE_VERSION/main\nhttps://alpine.global.ssl.fastly.net/alpine/$ALPINE_VERSION/community" > /etc/apk/repositories && \ RUN echo -e "https://alpine.global.ssl.fastly.net/alpine/$ALPINE_VERSION/main\nhttps://alpine.global.ssl.fastly.net/alpine/$ALPINE_VERSION/community" > /etc/apk/repositories && \
apk add --no-cache ipset iptables ip6tables graphviz font-noto apk add --no-cache ipset iptables ip6tables wireguard-tools graphviz font-noto
COPY --from=cni bridge host-local loopback portmap /opt/cni/bin/ COPY --from=cni bridge host-local loopback portmap /opt/cni/bin/
ADD https://raw.githubusercontent.com/kubernetes-sigs/iptables-wrappers/e139a115350974aac8a82ec4b815d2845f86997e/iptables-wrapper-installer.sh /
RUN chmod 700 /iptables-wrapper-installer.sh && /iptables-wrapper-installer.sh --no-sanity-check
COPY bin/linux/$GOARCH/kg /opt/bin/ COPY bin/linux/$GOARCH/kg /opt/bin/
ENTRYPOINT ["/opt/bin/kg"] ENTRYPOINT ["/opt/bin/kg"]

View File

@ -12,9 +12,9 @@ else
endif endif
RELEASE_BINS := $(addprefix bin/release/kgctl-, $(addprefix linux-, $(ALL_ARCH)) darwin-amd64 darwin-arm64 windows-amd64) RELEASE_BINS := $(addprefix bin/release/kgctl-, $(addprefix linux-, $(ALL_ARCH)) darwin-amd64 darwin-arm64 windows-amd64)
PROJECT := kilo PROJECT := kilo
PKG := github.com/squat/$(PROJECT) PKG := github.com/kilo-io/$(PROJECT)
REGISTRY ?= index.docker.io REGISTRY ?= index.docker.io
IMAGE ?= squat/$(PROJECT) IMAGE ?= kiloio/$(PROJECT)
FULLY_QUALIFIED_IMAGE := $(REGISTRY)/$(IMAGE) FULLY_QUALIFIED_IMAGE := $(REGISTRY)/$(IMAGE)
TAG := $(shell git describe --abbrev=0 --tags HEAD 2>/dev/null) TAG := $(shell git describe --abbrev=0 --tags HEAD 2>/dev/null)
@ -38,15 +38,15 @@ DOCS_GEN_BINARY := bin/docs-gen
DEEPCOPY_GEN_BINARY := bin/deepcopy-gen DEEPCOPY_GEN_BINARY := bin/deepcopy-gen
INFORMER_GEN_BINARY := bin/informer-gen INFORMER_GEN_BINARY := bin/informer-gen
LISTER_GEN_BINARY := bin/lister-gen LISTER_GEN_BINARY := bin/lister-gen
STATICCHECK_BINARY := bin/staticcheck GOLINT_BINARY := bin/golint
EMBEDMD_BINARY := bin/embedmd EMBEDMD_BINARY := bin/embedmd
KIND_BINARY := $(shell pwd)/bin/kind KIND_BINARY := $(shell pwd)/bin/kind
KUBECTL_BINARY := $(shell pwd)/bin/kubectl KUBECTL_BINARY := $(shell pwd)/bin/kubectl
BASH_UNIT := $(shell pwd)/bin/bash_unit BASH_UNIT := $(shell pwd)/bin/bash_unit
BASH_UNIT_FLAGS := BASH_UNIT_FLAGS :=
BUILD_IMAGE ?= golang:1.18.0 BUILD_IMAGE ?= golang:1.16.5-alpine
BASE_IMAGE ?= alpine:3.15 BASE_IMAGE ?= alpine:3.13
build: $(BINS) build: $(BINS)
@ -81,7 +81,7 @@ crd: manifests/crds.yaml
manifests/crds.yaml: pkg/k8s/apis/kilo/v1alpha1/types.go $(CONTROLLER_GEN_BINARY) manifests/crds.yaml: pkg/k8s/apis/kilo/v1alpha1/types.go $(CONTROLLER_GEN_BINARY)
$(CONTROLLER_GEN_BINARY) crd \ $(CONTROLLER_GEN_BINARY) crd \
paths=./pkg/k8s/apis/kilo/... \ paths=./pkg/k8s/apis/kilo/... \
output:crd:stdout > $@ output:crd:stdout | tail -n +3 > $@
client: pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/peer.go client: pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/peer.go
pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/peer.go: .header pkg/k8s/apis/kilo/v1alpha1/types.go $(CLIENT_GEN_BINARY) pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/peer.go: .header pkg/k8s/apis/kilo/v1alpha1/types.go $(CLIENT_GEN_BINARY)
@ -139,7 +139,7 @@ pkg/k8s/listers/kilo/v1alpha1/peer.go: .header pkg/k8s/apis/kilo/v1alpha1/types.
rm -r github.com || true rm -r github.com || true
go fmt ./pkg/k8s/listers/... go fmt ./pkg/k8s/listers/...
gen-docs: generate docs/api.md docs/kg.md gen-docs: generate docs/api.md
docs/api.md: pkg/k8s/apis/kilo/v1alpha1/types.go $(DOCS_GEN_BINARY) docs/api.md: pkg/k8s/apis/kilo/v1alpha1/types.go $(DOCS_GEN_BINARY)
$(DOCS_GEN_BINARY) $< > $@ $(DOCS_GEN_BINARY) $< > $@
@ -165,7 +165,7 @@ fmt:
@echo $(GO_PKGS) @echo $(GO_PKGS)
gofmt -w -s $(GO_FILES) gofmt -w -s $(GO_FILES)
lint: header $(STATICCHECK_BINARY) lint: header $(GOLINT_BINARY)
@echo 'go vet $(GO_PKGS)' @echo 'go vet $(GO_PKGS)'
@vet_res=$$(GO111MODULE=on go vet -mod=vendor $(GO_PKGS) 2>&1); if [ -n "$$vet_res" ]; then \ @vet_res=$$(GO111MODULE=on go vet -mod=vendor $(GO_PKGS) 2>&1); if [ -n "$$vet_res" ]; then \
echo ""; \ echo ""; \
@ -174,10 +174,10 @@ lint: header $(STATICCHECK_BINARY)
echo "$$vet_res"; \ echo "$$vet_res"; \
exit 1; \ exit 1; \
fi fi
@echo '$(STATICCHECK_BINARY) $(GO_PKGS)' @echo '$(GOLINT_BINARY) $(GO_PKGS)'
@lint_res=$$($(STATICCHECK_BINARY) $(GO_PKGS)); if [ -n "$$lint_res" ]; then \ @lint_res=$$($(GOLINT_BINARY) $(GO_PKGS)); if [ -n "$$lint_res" ]; then \
echo ""; \ echo ""; \
echo "Staticcheck found style issues. Please check the reported issues"; \ echo "Golint found style issues. Please check the reported issues"; \
echo "and fix them if necessary before submitting the code for review:"; \ echo "and fix them if necessary before submitting the code for review:"; \
echo "$$lint_res"; \ echo "$$lint_res"; \
exit 1; \ exit 1; \
@ -209,7 +209,7 @@ $(BASH_UNIT):
chmod +x $@ chmod +x $@
e2e: container $(KIND_BINARY) $(KUBECTL_BINARY) $(BASH_UNIT) bin/$(OS)/$(ARCH)/kgctl e2e: container $(KIND_BINARY) $(KUBECTL_BINARY) $(BASH_UNIT) bin/$(OS)/$(ARCH)/kgctl
KILO_IMAGE=$(IMAGE):$(ARCH)-$(VERSION) KIND_BINARY=$(KIND_BINARY) KUBECTL_BINARY=$(KUBECTL_BINARY) KGCTL_BINARY=$(shell pwd)/bin/$(OS)/$(ARCH)/kgctl $(BASH_UNIT) $(BASH_UNIT_FLAGS) ./e2e/setup.sh ./e2e/full-mesh.sh ./e2e/location-mesh.sh ./e2e/multi-cluster.sh ./e2e/handlers.sh ./e2e/kgctl.sh ./e2e/teardown.sh KILO_IMAGE=$(IMAGE):$(ARCH)-$(VERSION) KIND_BINARY=$(KIND_BINARY) KUBECTL_BINARY=$(KUBECTL_BINARY) KGCTL_BINARY=$(shell pwd)/bin/$(OS)/$(ARCH)/kgctl $(BASH_UNIT) $(BASH_UNIT_FLAGS) ./e2e/setup.sh ./e2e/full-mesh.sh ./e2e/location-mesh.sh ./e2e/multi-cluster.sh ./e2e/handlers.sh ./e2e/teardown.sh
header: .header header: .header
@HEADER=$$(cat .header); \ @HEADER=$$(cat .header); \
@ -242,7 +242,7 @@ website/docs/README.md: README.md
cat README.md >> $@ cat README.md >> $@
cp -r docs/graphs website/static/img/ cp -r docs/graphs website/static/img/
sed -i 's/\.\/docs\///g' $@ sed -i 's/\.\/docs\///g' $@
find $(@D) -type f -name '*.md' | xargs -I{} sed -i 's/\.\/\(.\+\.\(svg\|png\)\)/\/img\/\1/g' {} find $(@D) -type f -name '*.md' | xargs -I{} sed -i 's/\.\/\(.\+\.svg\)/\/img\/\1/g' {}
sed -i 's/graphs\//\/img\/graphs\//g' $@ sed -i 's/graphs\//\/img\/graphs\//g' $@
# The next line is a workaround until mdx, docusaurus' markdown parser, can parse links with preceding brackets. # The next line is a workaround until mdx, docusaurus' markdown parser, can parse links with preceding brackets.
sed -i 's/\[\]\(\[.*\](.*)\)/\&#91;\&#93;\1/g' website/docs/api.md sed -i 's/\[\]\(\[.*\](.*)\)/\&#91;\&#93;\1/g' website/docs/api.md
@ -358,8 +358,8 @@ $(LISTER_GEN_BINARY):
$(DOCS_GEN_BINARY): cmd/docs-gen/main.go $(DOCS_GEN_BINARY): cmd/docs-gen/main.go
go build -mod=vendor -o $@ ./cmd/docs-gen go build -mod=vendor -o $@ ./cmd/docs-gen
$(STATICCHECK_BINARY): $(GOLINT_BINARY):
go build -mod=vendor -o $@ honnef.co/go/tools/cmd/staticcheck go build -mod=vendor -o $@ golang.org/x/lint/golint
$(EMBEDMD_BINARY): $(EMBEDMD_BINARY):
go build -mod=vendor -o $@ github.com/campoy/embedmd go build -mod=vendor -o $@ github.com/campoy/embedmd

View File

@ -4,8 +4,8 @@
Kilo is a multi-cloud network overlay built on WireGuard and designed for Kubernetes. Kilo is a multi-cloud network overlay built on WireGuard and designed for Kubernetes.
[![Build Status](https://github.com/squat/kilo/workflows/CI/badge.svg)](https://github.com/squat/kilo/actions?query=workflow%3ACI) [![Build Status](https://github.com/kilo-io/kilo/workflows/CI/badge.svg)](https://github.com/kilo-io/kilo/actions?query=workflow%3ACI)
[![Go Report Card](https://goreportcard.com/badge/github.com/squat/kilo)](https://goreportcard.com/report/github.com/squat/kilo) [![Go Report Card](https://goreportcard.com/badge/github.com/kilo-io/kilo)](https://goreportcard.com/report/github.com/kilo-io/kilo)
[![Docker Pulls](https://img.shields.io/docker/pulls/squat/kilo)](https://hub.docker.com/r/squat/kilo) [![Docker Pulls](https://img.shields.io/docker/pulls/squat/kilo)](https://hub.docker.com/r/squat/kilo)
[![Slack](https://img.shields.io/badge/join%20slack-%23kilo-brightgreen.svg)](https://slack.k8s.io/) [![Slack](https://img.shields.io/badge/join%20slack-%23kilo-brightgreen.svg)](https://slack.k8s.io/)
@ -72,29 +72,29 @@ Kilo can be installed by deploying a DaemonSet to the cluster.
To run Kilo on kubeadm: To run Kilo on kubeadm:
```shell ```shell
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/crds.yaml kubectl apply -f https://raw.githubusercontent.com/kilo-io/kilo/main/manifests/crds.yaml
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/kilo-kubeadm.yaml kubectl apply -f https://raw.githubusercontent.com/kilo-io/kilo/main/manifests/kilo-kubeadm.yaml
``` ```
To run Kilo on bootkube: To run Kilo on bootkube:
```shell ```shell
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/crds.yaml kubectl apply -f https://raw.githubusercontent.com/kilo-io/kilo/main/manifests/crds.yaml
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/kilo-bootkube.yaml kubectl apply -f https://raw.githubusercontent.com/kilo-io/kilo/main/manifests/kilo-bootkube.yaml
``` ```
To run Kilo on Typhoon: To run Kilo on Typhoon:
```shell ```shell
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/crds.yaml kubectl apply -f https://raw.githubusercontent.com/kilo-io/kilo/main/manifests/crds.yaml
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/kilo-typhoon.yaml kubectl apply -f https://raw.githubusercontent.com/kilo-io/kilo/main/manifests/kilo-typhoon.yaml
``` ```
To run Kilo on k3s: To run Kilo on k3s:
```shell ```shell
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/crds.yaml kubectl apply -f https://raw.githubusercontent.com/kilo-io/kilo/main/manifests/crds.yaml
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/kilo-k3s.yaml kubectl apply -f https://raw.githubusercontent.com/kilo-io/kilo/main/manifests/kilo-k3s.yaml
``` ```
## Add-on Mode ## Add-on Mode
@ -106,11 +106,11 @@ Kilo currently supports running on top of Flannel.
For example, to run Kilo on a Typhoon cluster running Flannel: For example, to run Kilo on a Typhoon cluster running Flannel:
```shell ```shell
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/crds.yaml kubectl apply -f https://raw.githubusercontent.com/kilo-io/kilo/main/manifests/crds.yaml
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/kilo-typhoon-flannel.yaml kubectl apply -f https://raw.githubusercontent.com/kilo-io/kilo/main/manifests/kilo-typhoon-flannel.yaml
``` ```
[See the manifests directory for more examples](https://github.com/squat/kilo/tree/main/manifests). [See the manifests directory for more examples](https://github.com/kilo-io/kilo/tree/main/manifests).
## VPN ## VPN

View File

@ -1,4 +1,4 @@
// Copyright 2021 the Kilo authors // Copyright 2019 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -24,9 +24,7 @@ import (
"os" "os"
"os/exec" "os/exec"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes" "github.com/kilo-io/kilo/pkg/mesh"
"github.com/squat/kilo/pkg/mesh"
) )
type graphHandler struct { type graphHandler struct {
@ -64,7 +62,7 @@ func (h *graphHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
peers[p.Name] = p peers[p.Name] = p
} }
} }
topo, err := mesh.NewTopology(nodes, peers, h.granularity, *h.hostname, 0, wgtypes.Key{}, h.subnet, nodes[*h.hostname].PersistentKeepalive, nil) topo, err := mesh.NewTopology(nodes, peers, h.granularity, *h.hostname, 0, []byte{}, h.subnet, nodes[*h.hostname].PersistentKeepalive, nil)
if err != nil { if err != nil {
http.Error(w, fmt.Sprintf("failed to create topology: %v", err), http.StatusInternalServerError) http.Error(w, fmt.Sprintf("failed to create topology: %v", err), http.StatusInternalServerError)
return return

View File

@ -1,4 +1,4 @@
// Copyright 2021 the Kilo authors // Copyright 2019 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -15,7 +15,6 @@
package main package main
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"net" "net"
@ -28,21 +27,20 @@ import (
"github.com/go-kit/kit/log" "github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level" "github.com/go-kit/kit/log/level"
"github.com/metalmatze/signal/internalserver"
"github.com/oklog/run" "github.com/oklog/run"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/spf13/cobra" flag "github.com/spf13/pflag"
apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
"github.com/squat/kilo/pkg/encapsulation" "github.com/kilo-io/kilo/pkg/encapsulation"
"github.com/squat/kilo/pkg/k8s" "github.com/kilo-io/kilo/pkg/k8s"
kiloclient "github.com/squat/kilo/pkg/k8s/clientset/versioned" kiloclient "github.com/kilo-io/kilo/pkg/k8s/clientset/versioned"
"github.com/squat/kilo/pkg/mesh" "github.com/kilo-io/kilo/pkg/mesh"
"github.com/squat/kilo/pkg/version" "github.com/kilo-io/kilo/pkg/version"
"github.com/squat/kilo/pkg/wireguard" "github.com/kilo-io/kilo/pkg/wireguard"
) )
const ( const (
@ -80,79 +78,52 @@ var (
}, ", ") }, ", ")
) )
var cmd = &cobra.Command{ // Main is the principal function for the binary, wrapped only by `main` for convenience.
Use: "kg", func Main() error {
Short: "kg is the Kilo agent", backend := flag.String("backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends))
Long: `kg is the Kilo agent. cleanUpIface := flag.Bool("clean-up-interface", false, "Should Kilo delete its interface when it shuts down?")
It runs on every node of a cluster, createIface := flag.Bool("create-interface", true, "Should kilo create an interface on startup?")
setting up the public and private keys for the VPN cni := flag.Bool("cni", true, "Should Kilo manage the node's CNI configuration?")
as well as the necessary rules to route packets between locations.`, cniPath := flag.String("cni-path", mesh.DefaultCNIPath, "Path to CNI config.")
PreRunE: preRun, compatibility := flag.String("compatibility", "", fmt.Sprintf("Should Kilo run in compatibility mode? Possible values: %s", availableCompatibilities))
RunE: runRoot, encapsulate := flag.String("encapsulate", string(encapsulation.Always), fmt.Sprintf("When should Kilo encapsulate packets within a location? Possible values: %s", availableEncapsulations))
SilenceUsage: true, granularity := flag.String("mesh-granularity", string(mesh.LogicalGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", availableGranularities))
SilenceErrors: true, kubeconfig := flag.String("kubeconfig", "", "Path to kubeconfig.")
} hostname := flag.String("hostname", "", "Hostname of the node on which this process is running.")
iface := flag.String("interface", mesh.DefaultKiloInterface, "Name of the Kilo interface to use; if it does not exist, it will be created.")
listen := flag.String("listen", ":1107", "The address at which to listen for health and metrics.")
local := flag.Bool("local", true, "Should Kilo manage routes within a location?")
logLevel := flag.String("log-level", logLevelInfo, fmt.Sprintf("Log level to use. Possible values: %s", availableLogLevels))
master := flag.String("master", "", "The address of the Kubernetes API server (overrides any value in kubeconfig).")
mtu := flag.Uint("mtu", wireguard.DefaultMTU, "The MTU of the WireGuard interface created by Kilo.")
topologyLabel := flag.String("topology-label", k8s.RegionLabelKey, "Kubernetes node label used to group nodes into logical locations.")
var port uint
flag.UintVar(&port, "port", mesh.DefaultKiloPort, "The port over which WireGuard peers should communicate.")
subnet := flag.String("subnet", mesh.DefaultKiloSubnet.String(), "CIDR from which to allocate addresses for WireGuard interfaces.")
resyncPeriod := flag.Duration("resync-period", 30*time.Second, "How often should the Kilo controllers reconcile?")
printVersion := flag.Bool("version", false, "Print version and exit")
flag.Parse()
var ( if *printVersion {
backend string fmt.Println(version.Version)
cleanUpIface bool return nil
createIface bool }
cni bool
cniPath string
compatibility string
encapsulate string
granularity string
hostname string
kubeconfig string
iface string
listen string
local bool
master string
mtu uint
topologyLabel string
port int
subnet string
resyncPeriod time.Duration
iptablesForwardRule bool
prioritisePrivateAddr bool
printVersion bool _, s, err := net.ParseCIDR(*subnet)
logLevel string if err != nil {
return fmt.Errorf("failed to parse %q as CIDR: %v", *subnet, err)
}
logger log.Logger if *hostname == "" {
registry *prometheus.Registry var err error
) *hostname, err = os.Hostname()
if *hostname == "" || err != nil {
return errors.New("failed to determine hostname")
}
}
func init() { logger := log.NewJSONLogger(log.NewSyncWriter(os.Stdout))
cmd.Flags().StringVar(&backend, "backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends)) switch *logLevel {
cmd.Flags().BoolVar(&cleanUpIface, "clean-up-interface", false, "Should Kilo delete its interface when it shuts down?")
cmd.Flags().BoolVar(&createIface, "create-interface", true, "Should kilo create an interface on startup?")
cmd.Flags().BoolVar(&cni, "cni", true, "Should Kilo manage the node's CNI configuration?")
cmd.Flags().StringVar(&cniPath, "cni-path", mesh.DefaultCNIPath, "Path to CNI config.")
cmd.Flags().StringVar(&compatibility, "compatibility", "", fmt.Sprintf("Should Kilo run in compatibility mode? Possible values: %s", availableCompatibilities))
cmd.Flags().StringVar(&encapsulate, "encapsulate", string(encapsulation.Always), fmt.Sprintf("When should Kilo encapsulate packets within a location? Possible values: %s", availableEncapsulations))
cmd.Flags().StringVar(&granularity, "mesh-granularity", string(mesh.LogicalGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", availableGranularities))
cmd.Flags().StringVar(&kubeconfig, "kubeconfig", "", "Path to kubeconfig.")
cmd.Flags().StringVar(&hostname, "hostname", "", "Hostname of the node on which this process is running.")
cmd.Flags().StringVar(&iface, "interface", mesh.DefaultKiloInterface, "Name of the Kilo interface to use; if it does not exist, it will be created.")
cmd.Flags().StringVar(&listen, "listen", ":1107", "The address at which to listen for health and metrics.")
cmd.Flags().BoolVar(&local, "local", true, "Should Kilo manage routes within a location?")
cmd.Flags().StringVar(&master, "master", "", "The address of the Kubernetes API server (overrides any value in kubeconfig).")
cmd.Flags().UintVar(&mtu, "mtu", wireguard.DefaultMTU, "The MTU of the WireGuard interface created by Kilo.")
cmd.Flags().StringVar(&topologyLabel, "topology-label", k8s.RegionLabelKey, "Kubernetes node label used to group nodes into logical locations.")
cmd.Flags().IntVar(&port, "port", mesh.DefaultKiloPort, "The port over which WireGuard peers should communicate.")
cmd.Flags().StringVar(&subnet, "subnet", mesh.DefaultKiloSubnet.String(), "CIDR from which to allocate addresses for WireGuard interfaces.")
cmd.Flags().DurationVar(&resyncPeriod, "resync-period", 30*time.Second, "How often should the Kilo controllers reconcile?")
cmd.Flags().BoolVar(&iptablesForwardRule, "iptables-forward-rules", false, "Add default accept rules to the FORWARD chain in iptables. Warning: this may break firewalls with a deny all policy and is potentially insecure!")
cmd.Flags().BoolVar(&prioritisePrivateAddr, "prioritise-private-addresses", false, "Prefer to assign a private IP address to the node's endpoint.")
cmd.PersistentFlags().BoolVar(&printVersion, "version", false, "Print version and exit")
cmd.PersistentFlags().StringVar(&logLevel, "log-level", logLevelInfo, fmt.Sprintf("Log level to use. Possible values: %s", availableLogLevels))
}
func preRun(_ *cobra.Command, _ []string) error {
logger = log.NewJSONLogger(log.NewSyncWriter(os.Stdout))
switch logLevel {
case logLevelAll: case logLevelAll:
logger = level.NewFilter(logger, level.AllowAll()) logger = level.NewFilter(logger, level.AllowAll())
case logLevelDebug: case logLevelDebug:
@ -166,107 +137,77 @@ func preRun(_ *cobra.Command, _ []string) error {
case logLevelNone: case logLevelNone:
logger = level.NewFilter(logger, level.AllowNone()) logger = level.NewFilter(logger, level.AllowNone())
default: default:
return fmt.Errorf("log level %v unknown; possible values are: %s", logLevel, availableLogLevels) return fmt.Errorf("log level %v unknown; possible values are: %s", *logLevel, availableLogLevels)
} }
logger = log.With(logger, "ts", log.DefaultTimestampUTC) logger = log.With(logger, "ts", log.DefaultTimestampUTC)
logger = log.With(logger, "caller", log.DefaultCaller) logger = log.With(logger, "caller", log.DefaultCaller)
registry = prometheus.NewRegistry() e := encapsulation.Strategy(*encapsulate)
registry.MustRegister(
collectors.NewGoCollector(),
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
)
return nil
}
// runRoot is the principal function for the binary.
func runRoot(_ *cobra.Command, _ []string) error {
if printVersion {
fmt.Println(version.Version)
return nil
}
_, s, err := net.ParseCIDR(subnet)
if err != nil {
return fmt.Errorf("failed to parse %q as CIDR: %v", subnet, err)
}
if hostname == "" {
var err error
hostname, err = os.Hostname()
if hostname == "" || err != nil {
return errors.New("failed to determine hostname")
}
}
e := encapsulation.Strategy(encapsulate)
switch e { switch e {
case encapsulation.Never: case encapsulation.Never:
case encapsulation.CrossSubnet: case encapsulation.CrossSubnet:
case encapsulation.Always: case encapsulation.Always:
default: default:
return fmt.Errorf("encapsulation %v unknown; possible values are: %s", encapsulate, availableEncapsulations) return fmt.Errorf("encapsulation %v unknown; possible values are: %s", *encapsulate, availableEncapsulations)
} }
var enc encapsulation.Encapsulator var enc encapsulation.Encapsulator
switch compatibility { switch *compatibility {
case "flannel": case "flannel":
enc = encapsulation.NewFlannel(e) enc = encapsulation.NewFlannel(e)
case "cilium":
enc = encapsulation.NewCilium(e)
default: default:
enc = encapsulation.NewIPIP(e) enc = encapsulation.NewIPIP(e)
} }
gr := mesh.Granularity(granularity) gr := mesh.Granularity(*granularity)
switch gr { switch gr {
case mesh.LogicalGranularity: case mesh.LogicalGranularity:
case mesh.FullGranularity: case mesh.FullGranularity:
default: default:
return fmt.Errorf("mesh granularity %v unknown; possible values are: %s", granularity, availableGranularities) return fmt.Errorf("mesh granularity %v unknown; possible values are: %s", *granularity, availableGranularities)
} }
var b mesh.Backend var b mesh.Backend
switch backend { switch *backend {
case k8s.Backend: case k8s.Backend:
config, err := clientcmd.BuildConfigFromFlags(master, kubeconfig) config, err := clientcmd.BuildConfigFromFlags(*master, *kubeconfig)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Kubernetes config: %v", err) return fmt.Errorf("failed to create Kubernetes config: %v", err)
} }
c := kubernetes.NewForConfigOrDie(config) c := kubernetes.NewForConfigOrDie(config)
kc := kiloclient.NewForConfigOrDie(config) kc := kiloclient.NewForConfigOrDie(config)
ec := apiextensions.NewForConfigOrDie(config) ec := apiextensions.NewForConfigOrDie(config)
b = k8s.New(c, kc, ec, topologyLabel, log.With(logger, "component", "k8s backend")) b = k8s.New(c, kc, ec, *topologyLabel)
default: default:
return fmt.Errorf("backend %v unknown; possible values are: %s", backend, availableBackends) return fmt.Errorf("backend %v unknown; possible values are: %s", *backend, availableBackends)
} }
if port < 1 || port > 1<<16-1 { m, err := mesh.New(b, enc, gr, *hostname, uint32(port), s, *local, *cni, *cniPath, *iface, *cleanUpIface, *createIface, *mtu, *resyncPeriod, log.With(logger, "component", "kilo"))
return fmt.Errorf("invalid port: port mus be in range [%d:%d], but got %d", 1, 1<<16-1, port)
}
m, err := mesh.New(b, enc, gr, hostname, port, s, local, cni, cniPath, iface, cleanUpIface, createIface, mtu, resyncPeriod, prioritisePrivateAddr, iptablesForwardRule, log.With(logger, "component", "kilo"), registry)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Kilo mesh: %v", err) return fmt.Errorf("failed to create Kilo mesh: %v", err)
} }
r := prometheus.NewRegistry()
r.MustRegister(
prometheus.NewGoCollector(),
prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}),
)
m.RegisterMetrics(r)
var g run.Group var g run.Group
{ {
h := internalserver.NewHandler(
internalserver.WithName("Internal Kilo API"),
internalserver.WithPrometheusRegistry(registry),
internalserver.WithPProf(),
)
h.AddEndpoint("/health", "Exposes health checks", healthHandler)
h.AddEndpoint("/graph", "Exposes Kilo mesh topology graph", (&graphHandler{m, gr, &hostname, s}).ServeHTTP)
// Run the HTTP server. // Run the HTTP server.
l, err := net.Listen("tcp", listen) mux := http.NewServeMux()
mux.HandleFunc("/health", healthHandler)
mux.Handle("/graph", &graphHandler{m, gr, hostname, s})
mux.Handle("/metrics", promhttp.HandlerFor(r, promhttp.HandlerOpts{}))
l, err := net.Listen("tcp", *listen)
if err != nil { if err != nil {
return fmt.Errorf("failed to listen on %s: %v", listen, err) return fmt.Errorf("failed to listen on %s: %v", *listen, err)
} }
g.Add(func() error { g.Add(func() error {
if err := http.Serve(l, h); err != nil && err != http.ErrServerClosed { if err := http.Serve(l, mux); err != nil && err != http.ErrServerClosed {
return fmt.Errorf("error: server exited unexpectedly: %v", err) return fmt.Errorf("error: server exited unexpectedly: %v", err)
} }
return nil return nil
@ -276,16 +217,15 @@ func runRoot(_ *cobra.Command, _ []string) error {
} }
{ {
ctx, cancel := context.WithCancel(context.Background())
// Start the mesh. // Start the mesh.
g.Add(func() error { g.Add(func() error {
logger.Log("msg", fmt.Sprintf("Starting Kilo network mesh '%v'.", version.Version)) logger.Log("msg", fmt.Sprintf("Starting Kilo network mesh '%v'.", version.Version))
if err := m.Run(ctx); err != nil { if err := m.Run(); err != nil {
return fmt.Errorf("error: Kilo exited unexpectedly: %v", err) return fmt.Errorf("error: Kilo exited unexpectedly: %v", err)
} }
return nil return nil
}, func(error) { }, func(error) {
cancel() m.Stop()
}) })
} }
@ -312,15 +252,8 @@ func runRoot(_ *cobra.Command, _ []string) error {
return g.Run() return g.Run()
} }
var versionCmd = &cobra.Command{
Use: "version",
Short: "Print the version and exit.",
Run: func(_ *cobra.Command, _ []string) { fmt.Println(version.Version) },
}
func main() { func main() {
cmd.AddCommand(webhookCmd, versionCmd) if err := Main(); err != nil {
if err := cmd.Execute(); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err) fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1) os.Exit(1)
} }

View File

@ -1,273 +0,0 @@
// Copyright 2021 the Kilo authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"syscall"
"time"
"github.com/go-kit/kit/log/level"
"github.com/oklog/run"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/spf13/cobra"
v1 "k8s.io/api/admission/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
kilo "github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1"
"github.com/squat/kilo/pkg/version"
)
var webhookCmd = &cobra.Command{
Use: "webhook",
PreRunE: func(c *cobra.Command, a []string) error {
if c.HasParent() {
return c.Parent().PreRunE(c, a)
}
return nil
},
Short: "webhook starts a HTTPS server to validate updates and creations of Kilo peers.",
RunE: webhook,
}
var (
certPath string
keyPath string
metricsAddr string
listenAddr string
)
func init() {
webhookCmd.Flags().StringVar(&certPath, "cert-file", "", "The path to a certificate file")
webhookCmd.Flags().StringVar(&keyPath, "key-file", "", "The path to a key file")
webhookCmd.Flags().StringVar(&metricsAddr, "listen-metrics", ":1107", "The metrics server will be listening to that address")
webhookCmd.Flags().StringVar(&listenAddr, "listen", ":8443", "The webhook server will be listening to that address")
}
var deserializer = serializer.NewCodecFactory(runtime.NewScheme()).UniversalDeserializer()
var (
validationCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "admission_requests_total",
Help: "The number of received admission reviews requests",
},
[]string{"operation", "response"},
)
requestCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "http_requests_total",
Help: "The number of received http requests",
},
[]string{"handler", "method"},
)
errorCounter = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "errors_total",
Help: "The total number of errors",
},
)
)
func validationHandler(w http.ResponseWriter, r *http.Request) {
level.Debug(logger).Log("msg", "handling request", "source", r.RemoteAddr)
body, err := ioutil.ReadAll(r.Body)
if err != nil {
errorCounter.Inc()
level.Error(logger).Log("err", "failed to parse body from incoming request", "source", r.RemoteAddr)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
var admissionReview v1.AdmissionReview
contentType := r.Header.Get("Content-Type")
if contentType != "application/json" {
errorCounter.Inc()
msg := fmt.Sprintf("received Content-Type=%s, expected application/json", contentType)
level.Error(logger).Log("err", msg)
http.Error(w, msg, http.StatusBadRequest)
return
}
response := v1.AdmissionReview{}
_, gvk, err := deserializer.Decode(body, nil, &admissionReview)
if err != nil {
errorCounter.Inc()
msg := fmt.Sprintf("Request could not be decoded: %v", err)
level.Error(logger).Log("err", msg)
http.Error(w, msg, http.StatusBadRequest)
return
}
if *gvk != v1.SchemeGroupVersion.WithKind("AdmissionReview") {
errorCounter.Inc()
msg := "only API v1 is supported"
level.Error(logger).Log("err", msg)
http.Error(w, msg, http.StatusBadRequest)
return
}
response.SetGroupVersionKind(*gvk)
response.Response = &v1.AdmissionResponse{
UID: admissionReview.Request.UID,
}
rawExtension := admissionReview.Request.Object
var peer kilo.Peer
if err := json.Unmarshal(rawExtension.Raw, &peer); err != nil {
errorCounter.Inc()
msg := fmt.Sprintf("could not unmarshal extension to peer spec: %v:", err)
level.Error(logger).Log("err", msg)
http.Error(w, msg, http.StatusBadRequest)
return
}
if err := peer.Validate(); err == nil {
level.Debug(logger).Log("msg", "got valid peer spec", "spec", peer.Spec, "name", peer.ObjectMeta.Name)
validationCounter.With(prometheus.Labels{"operation": string(admissionReview.Request.Operation), "response": "allowed"}).Inc()
response.Response.Allowed = true
} else {
level.Debug(logger).Log("msg", "got invalid peer spec", "spec", peer.Spec, "name", peer.ObjectMeta.Name)
validationCounter.With(prometheus.Labels{"operation": string(admissionReview.Request.Operation), "response": "denied"}).Inc()
response.Response.Result = &metav1.Status{
Message: err.Error(),
}
}
res, err := json.Marshal(response)
if err != nil {
errorCounter.Inc()
msg := fmt.Sprintf("failed to marshal response: %v", err)
level.Error(logger).Log("err", msg)
http.Error(w, msg, http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
if _, err := w.Write(res); err != nil {
level.Error(logger).Log("err", err, "msg", "failed to write response")
}
}
func metricsMiddleWare(path string, next func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
requestCounter.With(prometheus.Labels{"method": r.Method, "handler": path}).Inc()
next(w, r)
}
}
func webhook(_ *cobra.Command, _ []string) error {
if printVersion {
fmt.Println(version.Version)
os.Exit(0)
}
registry.MustRegister(
errorCounter,
validationCounter,
requestCounter,
)
ctx, cancel := context.WithCancel(context.Background())
defer func() {
cancel()
}()
var g run.Group
g.Add(run.SignalHandler(ctx, syscall.SIGINT, syscall.SIGTERM))
{
mm := http.NewServeMux()
mm.Handle("/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{}))
msrv := &http.Server{
Addr: metricsAddr,
Handler: mm,
}
g.Add(
func() error {
level.Info(logger).Log("msg", "starting metrics server", "address", msrv.Addr)
err := msrv.ListenAndServe()
level.Info(logger).Log("msg", "metrics server exited", "err", err)
return err
},
func(err error) {
var serr run.SignalError
if ok := errors.As(err, &serr); ok {
level.Info(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
} else {
level.Error(logger).Log("msg", "received error", "err", err.Error())
}
level.Info(logger).Log("msg", "shutting down metrics server gracefully")
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer func() {
cancel()
}()
if err := msrv.Shutdown(ctx); err != nil {
level.Error(logger).Log("msg", "failed to shut down metrics server gracefully", "err", err.Error())
msrv.Close()
}
},
)
}
{
mux := http.NewServeMux()
mux.HandleFunc("/validate", metricsMiddleWare("/validate", validationHandler))
srv := &http.Server{
Addr: listenAddr,
Handler: mux,
}
g.Add(
func() error {
level.Info(logger).Log("msg", "starting webhook server", "address", srv.Addr)
err := srv.ListenAndServeTLS(certPath, keyPath)
level.Info(logger).Log("msg", "webhook server exited", "err", err)
return err
},
func(err error) {
var serr run.SignalError
if ok := errors.As(err, &serr); ok {
level.Info(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
} else {
level.Error(logger).Log("msg", "received error", "err", err.Error())
}
level.Info(logger).Log("msg", "shutting down webhook server gracefully")
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer func() {
cancel()
}()
if err := srv.Shutdown(ctx); err != nil {
level.Error(logger).Log("msg", "failed to shut down webhook server gracefully", "err", err.Error())
srv.Close()
}
},
)
}
err := g.Run()
var serr run.SignalError
if ok := errors.As(err, &serr); ok {
return nil
}
return err
}

View File

@ -1,372 +0,0 @@
// Copyright 2022 the Kilo authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build linux
// +build linux
package main
import (
"context"
"errors"
"fmt"
"net"
"os"
"sort"
"strings"
"syscall"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/oklog/run"
"github.com/spf13/cobra"
"golang.zx2c4.com/wireguard/wgctrl"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/squat/kilo/pkg/iproute"
"github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1"
"github.com/squat/kilo/pkg/mesh"
"github.com/squat/kilo/pkg/route"
"github.com/squat/kilo/pkg/wireguard"
)
var (
logLevel string
connectOpts struct {
allowedIP net.IPNet
allowedIPs []net.IPNet
privateKey string
cleanUp bool
mtu uint
resyncPeriod time.Duration
interfaceName string
persistentKeepalive int
}
)
func takeIPNet(_ net.IP, i *net.IPNet, err error) *net.IPNet {
if err != nil {
panic(err)
}
return i
}
func connect() *cobra.Command {
cmd := &cobra.Command{
Use: "connect",
Args: cobra.ExactArgs(1),
RunE: runConnect,
Short: "connect to a Kilo cluster as a peer over WireGuard",
SilenceUsage: true,
}
cmd.Flags().IPNetVarP(&connectOpts.allowedIP, "allowed-ip", "a", *takeIPNet(net.ParseCIDR("10.10.10.10/32")), "Allowed IP of the peer.")
cmd.Flags().StringSliceVar(&allowedIPs, "allowed-ips", []string{}, "Additional allowed IPs of the cluster, e.g. the service CIDR.")
cmd.Flags().StringVar(&logLevel, "log-level", logLevelInfo, fmt.Sprintf("Log level to use. Possible values: %s", availableLogLevels))
cmd.Flags().StringVar(&connectOpts.privateKey, "private-key", "", "Path to an existing WireGuard private key file.")
cmd.Flags().BoolVar(&connectOpts.cleanUp, "clean-up", true, "Should Kilo clean up the routes and interface when it shuts down?")
cmd.Flags().UintVar(&connectOpts.mtu, "mtu", uint(1420), "The MTU for the WireGuard interface.")
cmd.Flags().DurationVar(&connectOpts.resyncPeriod, "resync-period", 30*time.Second, "How often should Kilo reconcile?")
cmd.Flags().StringVarP(&connectOpts.interfaceName, "interface", "i", mesh.DefaultKiloInterface, "Name of the Kilo interface to use; if it does not exist, it will be created.")
cmd.Flags().IntVar(&connectOpts.persistentKeepalive, "persistent-keepalive", 10, "How often should WireGuard send keepalives? Setting to 0 will disable sending keepalives.")
availableLogLevels = strings.Join([]string{
logLevelAll,
logLevelDebug,
logLevelInfo,
logLevelWarn,
logLevelError,
logLevelNone,
}, ", ")
return cmd
}
func runConnect(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logger := log.NewJSONLogger(log.NewSyncWriter(os.Stdout))
switch logLevel {
case logLevelAll:
logger = level.NewFilter(logger, level.AllowAll())
case logLevelDebug:
logger = level.NewFilter(logger, level.AllowDebug())
case logLevelInfo:
logger = level.NewFilter(logger, level.AllowInfo())
case logLevelWarn:
logger = level.NewFilter(logger, level.AllowWarn())
case logLevelError:
logger = level.NewFilter(logger, level.AllowError())
case logLevelNone:
logger = level.NewFilter(logger, level.AllowNone())
default:
return fmt.Errorf("log level %s unknown; possible values are: %s", logLevel, availableLogLevels)
}
logger = log.With(logger, "ts", log.DefaultTimestampUTC)
logger = log.With(logger, "caller", log.DefaultCaller)
peerName := args[0]
for i := range allowedIPs {
_, aip, err := net.ParseCIDR(allowedIPs[i])
if err != nil {
return err
}
connectOpts.allowedIPs = append(connectOpts.allowedIPs, *aip)
}
var privateKey wgtypes.Key
var err error
if connectOpts.privateKey == "" {
privateKey, err = wgtypes.GeneratePrivateKey()
if err != nil {
return fmt.Errorf("failed to generate private key: %w", err)
}
} else {
raw, err := os.ReadFile(connectOpts.privateKey)
if err != nil {
return fmt.Errorf("failed to read private key: %w", err)
}
privateKey, err = wgtypes.ParseKey(string(raw))
if err != nil {
return fmt.Errorf("failed to parse private key: %w", err)
}
}
publicKey := privateKey.PublicKey()
level.Info(logger).Log("msg", "generated public key", "key", publicKey)
if _, err := opts.kc.KiloV1alpha1().Peers().Get(ctx, peerName, metav1.GetOptions{}); apierrors.IsNotFound(err) {
peer := &v1alpha1.Peer{
ObjectMeta: metav1.ObjectMeta{
Name: peerName,
},
Spec: v1alpha1.PeerSpec{
AllowedIPs: []string{connectOpts.allowedIP.String()},
PersistentKeepalive: connectOpts.persistentKeepalive,
PublicKey: publicKey.String(),
},
}
if _, err := opts.kc.KiloV1alpha1().Peers().Create(ctx, peer, metav1.CreateOptions{}); err != nil {
return fmt.Errorf("failed to create peer: %w", err)
}
level.Info(logger).Log("msg", "created peer", "peer", peerName)
if connectOpts.cleanUp {
defer func() {
ctxWithTimeout, cancelWithTimeout := context.WithTimeout(context.Background(), 10*time.Second)
defer cancelWithTimeout()
if err := opts.kc.KiloV1alpha1().Peers().Delete(ctxWithTimeout, peerName, metav1.DeleteOptions{}); err != nil {
level.Error(logger).Log("err", fmt.Sprintf("failed to delete peer: %v", err))
} else {
level.Info(logger).Log("msg", "deleted peer", "peer", peerName)
}
}()
}
} else if err != nil {
return fmt.Errorf("failed to get peer: %w", err)
}
iface, _, err := wireguard.New(connectOpts.interfaceName, connectOpts.mtu)
if err != nil {
return fmt.Errorf("failed to create wg interface: %w", err)
}
level.Info(logger).Log("msg", "created WireGuard interface", "name", connectOpts.interfaceName, "index", iface)
table := route.NewTable()
if connectOpts.cleanUp {
defer cleanUp(iface, table, logger)
}
if err := iproute.SetAddress(iface, &connectOpts.allowedIP); err != nil {
return err
}
level.Info(logger).Log("msg", "set IP address of WireGuard interface", "IP", connectOpts.allowedIP.String())
if err := iproute.Set(iface, true); err != nil {
return err
}
var g run.Group
g.Add(run.SignalHandler(ctx, syscall.SIGINT, syscall.SIGTERM))
{
g.Add(
func() error {
errCh, err := table.Run(ctx.Done())
if err != nil {
return fmt.Errorf("failed to watch for route table updates: %w", err)
}
for {
select {
case err, ok := <-errCh:
if ok {
level.Error(logger).Log("err", err.Error())
} else {
return nil
}
case <-ctx.Done():
return nil
}
}
},
func(err error) {
cancel()
var serr run.SignalError
if ok := errors.As(err, &serr); ok {
level.Debug(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
} else {
level.Error(logger).Log("msg", "received error", "err", err.Error())
}
},
)
}
{
g.Add(
func() error {
level.Info(logger).Log("msg", "starting syncer")
for {
if err := sync(table, peerName, privateKey, iface, logger); err != nil {
level.Error(logger).Log("msg", "failed to sync", "err", err.Error())
}
select {
case <-time.After(connectOpts.resyncPeriod):
case <-ctx.Done():
return nil
}
}
}, func(err error) {
cancel()
var serr run.SignalError
if ok := errors.As(err, &serr); ok {
level.Debug(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
} else {
level.Error(logger).Log("msg", "received error", "err", err.Error())
}
})
}
err = g.Run()
var serr run.SignalError
if ok := errors.As(err, &serr); ok {
return nil
}
return err
}
func cleanUp(iface int, t *route.Table, logger log.Logger) {
if err := iproute.Set(iface, false); err != nil {
level.Error(logger).Log("err", fmt.Sprintf("failed to set WireGuard interface down: %v", err))
}
if err := iproute.RemoveInterface(iface); err != nil {
level.Error(logger).Log("err", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
}
if err := t.CleanUp(); err != nil {
level.Error(logger).Log("failed to clean up routes: %v", err)
}
}
func sync(table *route.Table, peerName string, privateKey wgtypes.Key, iface int, logger log.Logger) error {
ns, err := opts.backend.Nodes().List()
if err != nil {
return fmt.Errorf("failed to list nodes: %w", err)
}
for _, n := range ns {
_, err := n.Endpoint.UDPAddr(true)
if err != nil {
return err
}
}
ps, err := opts.backend.Peers().List()
if err != nil {
return fmt.Errorf("failed to list peers: %w", err)
}
// Obtain the Granularity by looking at the annotation of the first node.
if opts.granularity, err = determineGranularity(opts.granularity, ns); err != nil {
return fmt.Errorf("failed to determine granularity: %w", err)
}
var hostname string
var subnet *net.IPNet
nodes := make(map[string]*mesh.Node)
var nodeNames []string
for _, n := range ns {
if n.Ready() {
nodes[n.Name] = n
hostname = n.Name
nodeNames = append(nodeNames, n.Name)
}
if n.WireGuardIP != nil && subnet == nil {
subnet = n.WireGuardIP
}
}
if len(nodes) == 0 {
return errors.New("did not find any valid Kilo nodes in the cluster")
}
if subnet == nil {
return errors.New("did not find a valid Kilo subnet on any node")
}
subnet.IP = subnet.IP.Mask(subnet.Mask)
sort.Strings(nodeNames)
nodes[nodeNames[0]].AllowedLocationIPs = append(nodes[nodeNames[0]].AllowedLocationIPs, connectOpts.allowedIPs...)
peers := make(map[string]*mesh.Peer)
for _, p := range ps {
if p.Ready() {
peers[p.Name] = p
}
}
if _, ok := peers[peerName]; !ok {
return fmt.Errorf("did not find any peer named %q in the cluster", peerName)
}
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, opts.port, wgtypes.Key{}, subnet, *peers[peerName].PersistentKeepaliveInterval, logger)
if err != nil {
return fmt.Errorf("failed to create topology: %w", err)
}
conf := t.PeerConf(peerName)
conf.PrivateKey = &privateKey
conf.ListenPort = &opts.port
wgClient, err := wgctrl.New()
if err != nil {
return err
}
defer wgClient.Close()
current, err := wgClient.Device(connectOpts.interfaceName)
if err != nil {
return err
}
var equal bool
var diff string
equal, diff = conf.Equal(current)
if !equal {
// If the key is empty, then it's the first time we are running
// so don't bother printing a diff.
if current.PrivateKey != [wgtypes.KeyLen]byte{} {
level.Info(logger).Log("msg", "WireGuard configurations are different", "diff", diff)
}
level.Debug(logger).Log("msg", "setting WireGuard config", "config", conf.WGConfig())
if err := wgClient.ConfigureDevice(connectOpts.interfaceName, conf.WGConfig()); err != nil {
return err
}
}
if err := table.Set(t.PeerRoutes(peerName, iface, connectOpts.allowedIPs)); err != nil {
return fmt.Errorf("failed to update route table: %w", err)
}
return nil
}

View File

@ -18,9 +18,8 @@ import (
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"github.com/squat/kilo/pkg/mesh" "github.com/kilo-io/kilo/pkg/mesh"
) )
func graph() *cobra.Command { func graph() *cobra.Command {
@ -34,15 +33,15 @@ func graph() *cobra.Command {
func runGraph(_ *cobra.Command, _ []string) error { func runGraph(_ *cobra.Command, _ []string) error {
ns, err := opts.backend.Nodes().List() ns, err := opts.backend.Nodes().List()
if err != nil { if err != nil {
return fmt.Errorf("failed to list nodes: %w", err) return fmt.Errorf("failed to list nodes: %v", err)
} }
ps, err := opts.backend.Peers().List() ps, err := opts.backend.Peers().List()
if err != nil { if err != nil {
return fmt.Errorf("failed to list peers: %w", err) return fmt.Errorf("failed to list peers: %v", err)
} }
// Obtain the Granularity by looking at the annotation of the first node. // Obtain the Granularity by looking at the annotation of the first node.
if opts.granularity, err = determineGranularity(opts.granularity, ns); err != nil { if opts.granularity, err = optainGranularity(opts.granularity, ns); err != nil {
return fmt.Errorf("failed to determine granularity: %w", err) return fmt.Errorf("failed to obtain granularity: %w", err)
} }
var hostname string var hostname string
@ -67,13 +66,13 @@ func runGraph(_ *cobra.Command, _ []string) error {
peers[p.Name] = p peers[p.Name] = p
} }
} }
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, 0, wgtypes.Key{}, subnet, nodes[hostname].PersistentKeepalive, nil) t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, 0, []byte{}, subnet, nodes[hostname].PersistentKeepalive, nil)
if err != nil { if err != nil {
return fmt.Errorf("failed to create topology: %w", err) return fmt.Errorf("failed to create topology: %v", err)
} }
g, err := t.Dot() g, err := t.Dot()
if err != nil { if err != nil {
return fmt.Errorf("failed to generate graph: %w", err) return fmt.Errorf("failed to generate graph: %v", err)
} }
fmt.Println(g) fmt.Println(g)
return nil return nil

View File

@ -21,16 +21,15 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/go-kit/kit/log"
"github.com/spf13/cobra" "github.com/spf13/cobra"
apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
"github.com/squat/kilo/pkg/k8s" "github.com/kilo-io/kilo/pkg/k8s"
kiloclient "github.com/squat/kilo/pkg/k8s/clientset/versioned" kiloclient "github.com/kilo-io/kilo/pkg/k8s/clientset/versioned"
"github.com/squat/kilo/pkg/mesh" "github.com/kilo-io/kilo/pkg/mesh"
"github.com/squat/kilo/pkg/version" "github.com/kilo-io/kilo/pkg/version"
) )
const ( const (
@ -62,8 +61,7 @@ var (
opts struct { opts struct {
backend mesh.Backend backend mesh.Backend
granularity mesh.Granularity granularity mesh.Granularity
kc kiloclient.Interface port uint32
port int
} }
backend string backend string
granularity string granularity string
@ -71,40 +69,36 @@ var (
topologyLabel string topologyLabel string
) )
func runRoot(c *cobra.Command, _ []string) error { func runRoot(_ *cobra.Command, _ []string) error {
if opts.port < 1 || opts.port > 1<<16-1 {
return fmt.Errorf("invalid port: port mus be in range [%d:%d], but got %d", 1, 1<<16-1, opts.port)
}
opts.granularity = mesh.Granularity(granularity) opts.granularity = mesh.Granularity(granularity)
switch opts.granularity { switch opts.granularity {
case mesh.LogicalGranularity: case mesh.LogicalGranularity:
case mesh.FullGranularity: case mesh.FullGranularity:
case mesh.AutoGranularity: case mesh.AutoGranularity:
default: default:
return fmt.Errorf("mesh granularity %s unknown; posible values are: %s", granularity, availableGranularities) return fmt.Errorf("mesh granularity %v unknown; posible values are: %s", granularity, availableGranularities)
} }
switch backend { switch backend {
case k8s.Backend: case k8s.Backend:
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Kubernetes config: %w", err) return fmt.Errorf("failed to create Kubernetes config: %v", err)
} }
c := kubernetes.NewForConfigOrDie(config) c := kubernetes.NewForConfigOrDie(config)
opts.kc = kiloclient.NewForConfigOrDie(config) kc := kiloclient.NewForConfigOrDie(config)
ec := apiextensions.NewForConfigOrDie(config) ec := apiextensions.NewForConfigOrDie(config)
opts.backend = k8s.New(c, opts.kc, ec, topologyLabel, log.NewNopLogger()) opts.backend = k8s.New(c, kc, ec, topologyLabel)
default: default:
return fmt.Errorf("backend %s unknown; posible values are: %s", backend, availableBackends) return fmt.Errorf("backend %v unknown; posible values are: %s", backend, availableBackends)
} }
if err := opts.backend.Nodes().Init(c.Context()); err != nil { if err := opts.backend.Nodes().Init(make(chan struct{})); err != nil {
return fmt.Errorf("failed to initialize node backend: %w", err) return fmt.Errorf("failed to initialize node backend: %v", err)
} }
if err := opts.backend.Peers().Init(c.Context()); err != nil { if err := opts.backend.Peers().Init(make(chan struct{})); err != nil {
return fmt.Errorf("failed to initialize peer backend: %w", err) return fmt.Errorf("failed to initialize peer backend: %v", err)
} }
return nil return nil
} }
@ -116,7 +110,6 @@ func main() {
Long: "", Long: "",
PersistentPreRunE: runRoot, PersistentPreRunE: runRoot,
Version: version.Version, Version: version.Version,
SilenceErrors: true,
} }
cmd.PersistentFlags().StringVar(&backend, "backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends)) cmd.PersistentFlags().StringVar(&backend, "backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends))
cmd.PersistentFlags().StringVar(&granularity, "mesh-granularity", string(mesh.AutoGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", availableGranularities)) cmd.PersistentFlags().StringVar(&granularity, "mesh-granularity", string(mesh.AutoGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", availableGranularities))
@ -125,13 +118,12 @@ func main() {
defaultKubeconfig = filepath.Join(os.Getenv("HOME"), ".kube/config") defaultKubeconfig = filepath.Join(os.Getenv("HOME"), ".kube/config")
} }
cmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", defaultKubeconfig, "Path to kubeconfig.") cmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", defaultKubeconfig, "Path to kubeconfig.")
cmd.PersistentFlags().IntVar(&opts.port, "port", mesh.DefaultKiloPort, "The WireGuard port over which the nodes communicate.") cmd.PersistentFlags().Uint32Var(&opts.port, "port", mesh.DefaultKiloPort, "The WireGuard port over which the nodes communicate.")
cmd.PersistentFlags().StringVar(&topologyLabel, "topology-label", k8s.RegionLabelKey, "Kubernetes node label used to group nodes into logical locations.") cmd.PersistentFlags().StringVar(&topologyLabel, "topology-label", k8s.RegionLabelKey, "Kubernetes node label used to group nodes into logical locations.")
for _, subCmd := range []*cobra.Command{ for _, subCmd := range []*cobra.Command{
graph(), graph(),
showConf(), showConf(),
connect(),
} { } {
cmd.AddCommand(subCmd) cmd.AddCommand(subCmd)
} }
@ -142,7 +134,7 @@ func main() {
} }
} }
func determineGranularity(gr mesh.Granularity, ns []*mesh.Node) (mesh.Granularity, error) { func optainGranularity(gr mesh.Granularity, ns []*mesh.Node) (mesh.Granularity, error) {
if gr == mesh.AutoGranularity { if gr == mesh.AutoGranularity {
if len(ns) == 0 { if len(ns) == 0 {
return gr, errors.New("could not get any nodes") return gr, errors.New("could not get any nodes")
@ -152,7 +144,7 @@ func determineGranularity(gr mesh.Granularity, ns []*mesh.Node) (mesh.Granularit
case mesh.LogicalGranularity: case mesh.LogicalGranularity:
case mesh.FullGranularity: case mesh.FullGranularity:
default: default:
return ret, fmt.Errorf("mesh granularity %s is not supported", opts.granularity) return ret, fmt.Errorf("mesh granularity %v is not supported", opts.granularity)
} }
return ret, nil return ret, nil
} }

View File

@ -1,4 +1,4 @@
// Copyright 2021 the Kilo authors // Copyright 2019 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -15,23 +15,22 @@
package main package main
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"net" "net"
"os" "os"
"strings" "strings"
"time"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/json" "k8s.io/apimachinery/pkg/runtime/serializer/json"
"github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1" "github.com/kilo-io/kilo/pkg/k8s/apis/kilo/v1alpha1"
"github.com/squat/kilo/pkg/mesh" "github.com/kilo-io/kilo/pkg/mesh"
"github.com/squat/kilo/pkg/wireguard" "github.com/kilo-io/kilo/pkg/wireguard"
) )
const ( const (
@ -48,7 +47,7 @@ var (
}, ", ") }, ", ")
allowedIPs []string allowedIPs []string
showConfOpts struct { showConfOpts struct {
allowedIPs []net.IPNet allowedIPs []*net.IPNet
serializer *json.Serializer serializer *json.Serializer
output string output string
asPeer bool asPeer bool
@ -83,14 +82,14 @@ func runShowConf(c *cobra.Command, args []string) error {
case outputFormatYAML: case outputFormatYAML:
showConfOpts.serializer = json.NewYAMLSerializer(json.DefaultMetaFactory, peerCreatorTyper{}, peerCreatorTyper{}) showConfOpts.serializer = json.NewYAMLSerializer(json.DefaultMetaFactory, peerCreatorTyper{}, peerCreatorTyper{})
default: default:
return fmt.Errorf("output format %s unknown; posible values are: %s", showConfOpts.output, availableOutputFormats) return fmt.Errorf("output format %v unknown; posible values are: %s", showConfOpts.output, availableOutputFormats)
} }
for i := range allowedIPs { for i := range allowedIPs {
_, aip, err := net.ParseCIDR(allowedIPs[i]) _, aip, err := net.ParseCIDR(allowedIPs[i])
if err != nil { if err != nil {
return fmt.Errorf("allowed-ips must contain only valid CIDRs; got %q", allowedIPs[i]) return fmt.Errorf("allowed-ips must contain only valid CIDRs; got %q", allowedIPs[i])
} }
showConfOpts.allowedIPs = append(showConfOpts.allowedIPs, *aip) showConfOpts.allowedIPs = append(showConfOpts.allowedIPs, aip)
} }
return runRoot(c, args) return runRoot(c, args)
} }
@ -116,15 +115,15 @@ func showConfPeer() *cobra.Command {
func runShowConfNode(_ *cobra.Command, args []string) error { func runShowConfNode(_ *cobra.Command, args []string) error {
ns, err := opts.backend.Nodes().List() ns, err := opts.backend.Nodes().List()
if err != nil { if err != nil {
return fmt.Errorf("failed to list nodes: %w", err) return fmt.Errorf("failed to list nodes: %v", err)
} }
ps, err := opts.backend.Peers().List() ps, err := opts.backend.Peers().List()
if err != nil { if err != nil {
return fmt.Errorf("failed to list peers: %w", err) return fmt.Errorf("failed to list peers: %v", err)
} }
// Obtain the Granularity by looking at the annotation of the first node. // Obtain the Granularity by looking at the annotation of the first node.
if opts.granularity, err = determineGranularity(opts.granularity, ns); err != nil { if opts.granularity, err = optainGranularity(opts.granularity, ns); err != nil {
return fmt.Errorf("failed to determine granularity: %w", err) return fmt.Errorf("failed to obtain granularity: %w", err)
} }
hostname := args[0] hostname := args[0]
subnet := mesh.DefaultKiloSubnet subnet := mesh.DefaultKiloSubnet
@ -152,14 +151,14 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
} }
} }
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, int(opts.port), wgtypes.Key{}, subnet, nodes[hostname].PersistentKeepalive, nil) t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, opts.port, []byte{}, subnet, nodes[hostname].PersistentKeepalive, nil)
if err != nil { if err != nil {
return fmt.Errorf("failed to create topology: %w", err) return fmt.Errorf("failed to create topology: %v", err)
} }
var found bool var found bool
for _, p := range t.PeerConf("").Peers { for _, p := range t.PeerConf("").Peers {
if p.PublicKey == nodes[hostname].Key { if bytes.Equal(p.PublicKey, nodes[hostname].Key) {
found = true found = true
break break
} }
@ -172,7 +171,7 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
if !showConfOpts.asPeer { if !showConfOpts.asPeer {
c, err := t.Conf().Bytes() c, err := t.Conf().Bytes()
if err != nil { if err != nil {
return fmt.Errorf("failed to generate configuration: %w", err) return fmt.Errorf("failed to generate configuration: %v", err)
} }
_, err = os.Stdout.Write(c) _, err = os.Stdout.Write(c)
return err return err
@ -183,9 +182,6 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
fallthrough fallthrough
case outputFormatYAML: case outputFormatYAML:
p := t.AsPeer() p := t.AsPeer()
if p == nil {
return errors.New("cannot generate config from nil peer")
}
p.AllowedIPs = append(p.AllowedIPs, showConfOpts.allowedIPs...) p.AllowedIPs = append(p.AllowedIPs, showConfOpts.allowedIPs...)
p.DeduplicateIPs() p.DeduplicateIPs()
k8sp := translatePeer(p) k8sp := translatePeer(p)
@ -193,16 +189,13 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
return showConfOpts.serializer.Encode(k8sp, os.Stdout) return showConfOpts.serializer.Encode(k8sp, os.Stdout)
case outputFormatWireGuard: case outputFormatWireGuard:
p := t.AsPeer() p := t.AsPeer()
if p == nil {
return errors.New("cannot generate config from nil peer")
}
p.AllowedIPs = append(p.AllowedIPs, showConfOpts.allowedIPs...) p.AllowedIPs = append(p.AllowedIPs, showConfOpts.allowedIPs...)
p.DeduplicateIPs() p.DeduplicateIPs()
c, err := (&wireguard.Conf{ c, err := (&wireguard.Conf{
Peers: []wireguard.Peer{*p}, Peers: []*wireguard.Peer{p},
}).Bytes() }).Bytes()
if err != nil { if err != nil {
return fmt.Errorf("failed to generate configuration: %w", err) return fmt.Errorf("failed to generate configuration: %v", err)
} }
_, err = os.Stdout.Write(c) _, err = os.Stdout.Write(c)
return err return err
@ -213,15 +206,15 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
func runShowConfPeer(_ *cobra.Command, args []string) error { func runShowConfPeer(_ *cobra.Command, args []string) error {
ns, err := opts.backend.Nodes().List() ns, err := opts.backend.Nodes().List()
if err != nil { if err != nil {
return fmt.Errorf("failed to list nodes: %w", err) return fmt.Errorf("failed to list nodes: %v", err)
} }
ps, err := opts.backend.Peers().List() ps, err := opts.backend.Peers().List()
if err != nil { if err != nil {
return fmt.Errorf("failed to list peers: %w", err) return fmt.Errorf("failed to list peers: %v", err)
} }
// Obtain the Granularity by looking at the annotation of the first node. // Obtain the Granularity by looking at the annotation of the first node.
if opts.granularity, err = determineGranularity(opts.granularity, ns); err != nil { if opts.granularity, err = optainGranularity(opts.granularity, ns); err != nil {
return fmt.Errorf("failed to determine granularity: %w", err) return fmt.Errorf("failed to obtain granularity: %w", err)
} }
var hostname string var hostname string
subnet := mesh.DefaultKiloSubnet subnet := mesh.DefaultKiloSubnet
@ -251,18 +244,14 @@ func runShowConfPeer(_ *cobra.Command, args []string) error {
return fmt.Errorf("did not find any peer named %q in the cluster", peer) return fmt.Errorf("did not find any peer named %q in the cluster", peer)
} }
pka := time.Duration(0) t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, mesh.DefaultKiloPort, []byte{}, subnet, peers[peer].PersistentKeepalive, nil)
if p := peers[peer].PersistentKeepaliveInterval; p != nil {
pka = *p
}
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, mesh.DefaultKiloPort, wgtypes.Key{}, subnet, pka, nil)
if err != nil { if err != nil {
return fmt.Errorf("failed to create topology: %w", err) return fmt.Errorf("failed to create topology: %v", err)
} }
if !showConfOpts.asPeer { if !showConfOpts.asPeer {
c, err := t.PeerConf(peer).Bytes() c, err := t.PeerConf(peer).Bytes()
if err != nil { if err != nil {
return fmt.Errorf("failed to generate configuration: %w", err) return fmt.Errorf("failed to generate configuration: %v", err)
} }
_, err = os.Stdout.Write(c) _, err = os.Stdout.Write(c)
return err return err
@ -283,10 +272,10 @@ func runShowConfPeer(_ *cobra.Command, args []string) error {
p.AllowedIPs = append(p.AllowedIPs, showConfOpts.allowedIPs...) p.AllowedIPs = append(p.AllowedIPs, showConfOpts.allowedIPs...)
p.DeduplicateIPs() p.DeduplicateIPs()
c, err := (&wireguard.Conf{ c, err := (&wireguard.Conf{
Peers: []wireguard.Peer{*p}, Peers: []*wireguard.Peer{p},
}).Bytes() }).Bytes()
if err != nil { if err != nil {
return fmt.Errorf("failed to generate configuration: %w", err) return fmt.Errorf("failed to generate configuration: %v", err)
} }
_, err = os.Stdout.Write(c) _, err = os.Stdout.Write(c)
return err return err
@ -295,7 +284,6 @@ func runShowConfPeer(_ *cobra.Command, args []string) error {
} }
// translatePeer translates a wireguard.Peer to a Peer CRD. // translatePeer translates a wireguard.Peer to a Peer CRD.
// TODO this function has many similarities to peerBackend.Set(name, peer)
func translatePeer(peer *wireguard.Peer) *v1alpha1.Peer { func translatePeer(peer *wireguard.Peer) *v1alpha1.Peer {
if peer == nil { if peer == nil {
return &v1alpha1.Peer{} return &v1alpha1.Peer{}
@ -303,33 +291,36 @@ func translatePeer(peer *wireguard.Peer) *v1alpha1.Peer {
var aips []string var aips []string
for _, aip := range peer.AllowedIPs { for _, aip := range peer.AllowedIPs {
// Skip any invalid IPs. // Skip any invalid IPs.
// TODO all IPs should be valid, so no need to skip here? if aip == nil {
if aip.String() == (&net.IPNet{}).String() {
continue continue
} }
aips = append(aips, aip.String()) aips = append(aips, aip.String())
} }
var endpoint *v1alpha1.PeerEndpoint var endpoint *v1alpha1.PeerEndpoint
if peer.Endpoint.Port() > 0 || !peer.Endpoint.HasDNS() { if peer.Endpoint != nil && peer.Endpoint.Port > 0 && (peer.Endpoint.IP != nil || peer.Endpoint.DNS != "") {
var ip string
if peer.Endpoint.IP != nil {
ip = peer.Endpoint.IP.String()
}
endpoint = &v1alpha1.PeerEndpoint{ endpoint = &v1alpha1.PeerEndpoint{
DNSOrIP: v1alpha1.DNSOrIP{ DNSOrIP: v1alpha1.DNSOrIP{
IP: peer.Endpoint.IP().String(), DNS: peer.Endpoint.DNS,
DNS: peer.Endpoint.DNS(), IP: ip,
}, },
Port: uint32(peer.Endpoint.Port()), Port: peer.Endpoint.Port,
} }
} }
var key string var key string
if peer.PublicKey != (wgtypes.Key{}) { if len(peer.PublicKey) > 0 {
key = peer.PublicKey.String() key = string(peer.PublicKey)
} }
var psk string var psk string
if peer.PresharedKey != nil { if len(peer.PresharedKey) > 0 {
psk = peer.PresharedKey.String() psk = string(peer.PresharedKey)
} }
var pka int var pka int
if peer.PersistentKeepaliveInterval != nil && *peer.PersistentKeepaliveInterval > time.Duration(0) { if peer.PersistentKeepalive > 0 {
pka = int(*peer.PersistentKeepaliveInterval) pka = peer.PersistentKeepalive
} }
return &v1alpha1.Peer{ return &v1alpha1.Peer{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{

View File

@ -14,7 +14,7 @@ To follow along, you need to install the following utilities:
Clone the Repository and `cd` into it. Clone the Repository and `cd` into it.
```shell ```shell
git clone https://github.com/squat/kilo.git git clone https://github.com/kilo-io/kilo.git
cd kilo cd kilo
``` ```

View File

@ -1,962 +0,0 @@
{
"__inputs": [
{
"name": "DS_PROMETHEUS",
"label": "prometheus",
"description": "",
"type": "datasource",
"pluginId": "prometheus",
"pluginName": "Prometheus"
}
],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "7.5.4"
},
{
"type": "panel",
"id": "graph",
"name": "Graph",
"version": ""
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
},
{
"type": "panel",
"id": "stat",
"name": "Stat",
"version": ""
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": null,
"links": [],
"panels": [
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {
"unit": "Bps"
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"hiddenSeries": false,
"id": 12,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"exemplar": true,
"expr": "sum by (pod) (rate(wireguard_received_bytes_total[1h])) + sum by (pod) (rate(wireguard_sent_bytes_total[1h]))",
"interval": "",
"legendFormat": "",
"queryType": "randomWalk",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [
{
"$$hashKey": "object:64",
"colorMode": "background6",
"fill": true,
"fillColor": "rgba(234, 112, 112, 0.12)",
"line": false,
"lineColor": "rgba(237, 46, 24, 0.60)",
"op": "time"
}
],
"timeShift": null,
"title": "Throughput",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:42",
"format": "Bps",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:43",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"hiddenSeries": false,
"id": 10,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"exemplar": false,
"expr": "(sum(rate(wireguard_sent_bytes_total[5m])) - sum(rate(wireguard_received_bytes_total[5m])))/(sum(rate(wireguard_sent_bytes_total[5m])) + sum(rate(wireguard_received_bytes_total[5m])))",
"interval": "",
"legendFormat": "",
"queryType": "randomWalk",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Slip (send - received)",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:502",
"format": "percentunit",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:503",
"format": "Bps",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8
},
"hiddenSeries": false,
"id": 16,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"exemplar": false,
"expr": "sum by (public_key) (time() - (wireguard_latest_handshake_seconds!=0))",
"interval": "",
"legendFormat": "",
"queryType": "randomWalk",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "latest handshake",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:219",
"format": "s",
"label": null,
"logBase": 1,
"max": "1000",
"min": "0",
"show": true
},
{
"$$hashKey": "object:220",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 8
},
"hiddenSeries": false,
"id": 18,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"exemplar": true,
"expr": "sum by (instance) (rate(kilo_reconciles_total[30m]))",
"interval": "",
"legendFormat": "",
"queryType": "randomWalk",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "kilo reconciles",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:539",
"decimals": null,
"format": "hertz",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:540",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 4,
"x": 0,
"y": 16
},
"id": 4,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"text": {},
"textMode": "auto"
},
"pluginVersion": "7.5.4",
"targets": [
{
"exemplar": true,
"expr": "avg(kilo_peers)",
"interval": "",
"legendFormat": "",
"queryType": "randomWalk",
"refId": "A"
}
],
"title": "Kilo Peers",
"type": "stat"
},
{
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 4,
"x": 4,
"y": 16
},
"id": 2,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"text": {},
"textMode": "auto"
},
"pluginVersion": "7.5.4",
"targets": [
{
"exemplar": false,
"expr": "avg(kilo_nodes)",
"interval": "",
"legendFormat": "",
"queryType": "randomWalk",
"refId": "A"
}
],
"timeFrom": null,
"timeShift": null,
"title": "Kilo Nodes",
"type": "stat"
},
{
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 4,
"x": 8,
"y": 16
},
"id": 8,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"text": {},
"textMode": "auto"
},
"pluginVersion": "7.5.4",
"targets": [
{
"exemplar": false,
"expr": "sum(kilo_leader)",
"interval": "",
"legendFormat": "",
"queryType": "randomWalk",
"refId": "A"
}
],
"title": "segments",
"type": "stat"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 16
},
"hiddenSeries": false,
"id": 6,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"exemplar": false,
"expr": "sum by (instance) (rate(kilo_errors_total[10m]))",
"interval": "",
"legendFormat": "",
"queryType": "randomWalk",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Kilo Errors",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:446",
"format": "hertz",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:447",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 24
},
"hiddenSeries": false,
"id": 20,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"exemplar": true,
"expr": "sum by (instance) (rate(process_cpu_seconds_total{pod=~\"kilo-.*\"}[1m]))",
"interval": "",
"legendFormat": "",
"queryType": "randomWalk",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "CPU usage",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:162",
"format": "percentunit",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:163",
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "${DS_PROMETHEUS}",
"fieldConfig": {
"defaults": {},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 24
},
"hiddenSeries": false,
"id": 22,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "7.5.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"exemplar": false,
"expr": "sum by (instance) (process_resident_memory_bytes{pod=~\"kilo-.*\"})",
"interval": "",
"legendFormat": "",
"queryType": "randomWalk",
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Memory Allocation",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"$$hashKey": "object:231",
"format": "decbytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"$$hashKey": "object:232",
"format": "decmbytes",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"collapsed": false,
"datasource": null,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 32
},
"id": 14,
"panels": [],
"title": "Row title",
"type": "row"
}
],
"refresh": false,
"schemaVersion": 27,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-24h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Kilo",
"uid": "R8Lja3H7z",
"version": 11
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 543 KiB

View File

@ -8,7 +8,7 @@ It performs several key functions, including:
* maintaining routing table entries and iptables rules. * maintaining routing table entries and iptables rules.
`kg` is typically installed on all nodes of a Kubernetes cluster using a DaemonSet. `kg` is typically installed on all nodes of a Kubernetes cluster using a DaemonSet.
Example manifests can be found [in the manifests directory](https://github.com/squat/kilo/tree/main/manifests). Example manifests can be found [in the manifests directory](https://github.com/kilo-io/kilo/tree/main/manifests).
## Usage ## Usage
@ -16,45 +16,26 @@ The behavior of `kg` can be configured using the command line flags listed below
[embedmd]:# (../tmp/help.txt) [embedmd]:# (../tmp/help.txt)
```txt ```txt
kg is the Kilo agent. Usage of bin//linux/amd64/kg:
It runs on every node of a cluster, --backend string The backend for the mesh. Possible values: kubernetes (default "kubernetes")
setting up the public and private keys for the VPN --clean-up-interface Should Kilo delete its interface when it shuts down?
as well as the necessary rules to route packets between locations. --cni Should Kilo manage the node's CNI configuration? (default true)
--cni-path string Path to CNI config. (default "/etc/cni/net.d/10-kilo.conflist")
Usage: --compatibility string Should Kilo run in compatibility mode? Possible values: flannel
kg [flags] --create-interface Should kilo create an interface on startup? (default true)
kg [command] --encapsulate string When should Kilo encapsulate packets within a location? Possible values: never, crosssubnet, always (default "always")
--hostname string Hostname of the node on which this process is running.
Available Commands: --interface string Name of the Kilo interface to use; if it does not exist, it will be created. (default "kilo0")
completion generate the autocompletion script for the specified shell --kubeconfig string Path to kubeconfig.
help Help about any command --listen string The address at which to listen for health and metrics. (default ":1107")
version Print the version and exit. --local Should Kilo manage routes within a location? (default true)
webhook webhook starts a HTTPS server to validate updates and creations of Kilo peers. --log-level string Log level to use. Possible values: all, debug, info, warn, error, none (default "info")
--master string The address of the Kubernetes API server (overrides any value in kubeconfig).
Flags: --mesh-granularity string The granularity of the network mesh to create. Possible values: location, full (default "location")
--backend string The backend for the mesh. Possible values: kubernetes (default "kubernetes") --mtu uint The MTU of the WireGuard interface created by Kilo. (default 1420)
--clean-up-interface Should Kilo delete its interface when it shuts down? --port uint The port over which WireGuard peers should communicate. (default 51820)
--cni Should Kilo manage the node's CNI configuration? (default true) --resync-period duration How often should the Kilo controllers reconcile? (default 30s)
--cni-path string Path to CNI config. (default "/etc/cni/net.d/10-kilo.conflist") --subnet string CIDR from which to allocate addresses for WireGuard interfaces. (default "10.4.0.0/16")
--compatibility string Should Kilo run in compatibility mode? Possible values: flannel --topology-label string Kubernetes node label used to group nodes into logical locations. (default "topology.kubernetes.io/region")
--create-interface Should kilo create an interface on startup? (default true) --version Print version and exit
--encapsulate string When should Kilo encapsulate packets within a location? Possible values: never, crosssubnet, always (default "always")
-h, --help help for kg
--hostname string Hostname of the node on which this process is running.
--interface string Name of the Kilo interface to use; if it does not exist, it will be created. (default "kilo0")
--iptables-forward-rules Add default accept rules to the FORWARD chain in iptables. Warning: this may break firewalls with a deny all policy and is potentially insecure!
--kubeconfig string Path to kubeconfig.
--listen string The address at which to listen for health and metrics. (default ":1107")
--local Should Kilo manage routes within a location? (default true)
--log-level string Log level to use. Possible values: all, debug, info, warn, error, none (default "info")
--master string The address of the Kubernetes API server (overrides any value in kubeconfig).
--mesh-granularity string The granularity of the network mesh to create. Possible values: location, full (default "location")
--mtu uint The MTU of the WireGuard interface created by Kilo. (default 1420)
--port int The port over which WireGuard peers should communicate. (default 51820)
--prioritise-private-addresses Prefer to assign a private IP address to the node's endpoint.
--resync-period duration How often should the Kilo controllers reconcile? (default 30s)
--subnet string CIDR from which to allocate addresses for WireGuard interfaces. (default "10.4.0.0/16")
--topology-label string Kubernetes node label used to group nodes into logical locations. (default "topology.kubernetes.io/region")
--version Print version and exit
``` ```

View File

@ -6,20 +6,20 @@ This tool can be used to understand a mesh's topology, get the WireGuard configu
## Installation ## Installation
The `kgctl` binary is automatically compiled for Linux, macOS, and Windows for every release of Kilo and can be downloaded from [the GitHub releases page](https://github.com/squat/kilo/releases/latest). The `kgctl` binary is automatically compiled for Linux, macOS, and Windows for every release of Kilo and can be downloaded from [the GitHub releases page](https://github.com/kilo-io/kilo/releases/latest).
### Building from Source ### Building from Source
Kilo is written in Golang and as a result the [Go toolchain must be installed](https://golang.org/doc/install) in order to build the `kgctl` binary. Kilo is written in Golang and as a result the [Go toolchain must be installed](https://golang.org/doc/install) in order to build the `kgctl` binary.
To download the Kilo source code and then build and install `kgctl` using the latest commit all with a single command, run: To download the Kilo source code and then build and install `kgctl` using the latest commit all with a single command, run:
```shell ```shell
go install github.com/squat/kilo/cmd/kgctl@latest go install github.com/kilo-io/kilo/cmd/kgctl@latest
``` ```
Alternatively, `kgctl` can be built and installed based on specific version of the code by specifying a Git tag or hash, e.g.: Alternatively, `kgctl` can be built and installed based on specific version of the code by specifying a Git tag or hash, e.g.:
```shell ```shell
go install github.com/squat/kilo/cmd/kgctl@0.2.0 go install github.com/kilo-io/kilo/cmd/kgctl@0.2.0
``` ```
When working on Kilo locally, it can be helpful to build and test the `kgctl` binary as part of the development cycle. When working on Kilo locally, it can be helpful to build and test the `kgctl` binary as part of the development cycle.
@ -31,70 +31,13 @@ make
This will produce a `kgctl` binary at `./bin/<your-os>/<your-architecture>/kgctl`. This will produce a `kgctl` binary at `./bin/<your-os>/<your-architecture>/kgctl`.
### Binary Packages
#### Arch Linux
Install `kgctl` from the Arch User Repository using an AUR helper like `paru` or `yay`:
```shell
paru -S kgctl-bin
```
#### Arkade
The [arkade](https://github.com/alexellis/arkade) CLI can be used to install `kgctl` on any OS and architecture:
```shell
arkade get kgctl
```
## Commands ## Commands
|Command|Syntax|Description| |Command|Syntax|Description|
|----|----|-------| |----|----|-------|
|[connect](#connect)|`kgctl connect <peer-name> [flags]`|Connect the host to the cluster, setting up the required interfaces, routes, and keys.|
|[graph](#graph)|`kgctl graph [flags]`|Produce a graph in GraphViz format representing the topology of the cluster.| |[graph](#graph)|`kgctl graph [flags]`|Produce a graph in GraphViz format representing the topology of the cluster.|
|[showconf](#showconf)|`kgctl showconf ( node \| peer ) <name> [flags]`|Show the WireGuard configuration for a node or peer in the mesh.| |[showconf](#showconf)|`kgctl showconf ( node \| peer ) NAME [flags]`|Show the WireGuard configuration for a node or peer in the mesh.|
### connect
The `connect` command configures the local host as a WireGuard Peer of the cluster and applies all of the necessary networking configuration to connect to the cluster.
As long as the process is running, it will watch the cluster for changes and automatically manage the configuration for new or updated Peers and Nodes.
If the given Peer name does not exist in the cluster, the command will register a new Peer and generate the necessary WireGuard keys.
When the command exits, all of the configuration, including newly registered Peers, is cleaned up.
Example:
```shell
PEER_NAME=laptop
SERVICECIDR=10.43.0.0/16
kgctl connect $PEER_NAME --allowed-ips $SERVICECIDR
```
The local host is now connected to the cluster and all IPs from the cluster and any registered Peers are fully routable.
When combined with the `--clean-up false` flag, the configuration produced by the command is persistent and will remain in effect even after the process is stopped.
With the service CIDR of the cluster routable from the local host, Kubernetes DNS names can now be resolved by the cluster DNS provider.
For example, the following snippet could be used to resolve the clusterIP of the Kubernetes API:
```shell
dig @$(kubectl get service -n kube-system kube-dns -o=jsonpath='{.spec.clusterIP}') kubernetes.default.svc.cluster.local +short
# > 10.43.0.1
```
For convenience, the cluster DNS provider's IP address can be configured as the local host's DNS server, making Kubernetes DNS names easily resolvable.
For example, if using `systemd-resolved`, the following snippet could be used:
```shell
systemd-resolve --interface kilo0 --set-dns $(kubectl get service -n kube-system kube-dns -o=jsonpath='{.spec.clusterIP}') --set-domain cluster.local
# Now all lookups for DNS names ending in `.cluster.local` will be routed over the `kilo0` interface to the cluster DNS provider.
dig kubernetes.default.svc.cluster.local +short
# > 10.43.0.1
```
> **Note**: The `connect` command is currently only supported on Linux.
> **Note**: The `connect` command requires the `CAP_NET_ADMIN` capability in order to configure the host's networking stack; unprivileged users will need to use `sudo` or similar tools.
### graph ### graph

View File

@ -1,100 +0,0 @@
# Monitoring
The following assumes that you have applied the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) monitoring stack onto your cluster.
## Kilo
Monitor the Kilo DaemonSet with:
```shell
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/podmonitor.yaml
```
## WireGuard
Monitor the WireGuard interfaces with:
```shell
kubectl create ns kilo
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/wg-exporter.yaml
```
The manifest will deploy the [Prometheus WireGuard Exporter](https://github.com/MindFlavor/prometheus_wireguard_exporter) as a DaemonSet and a [PodMonitor](https://docs.openshift.com/container-platform/4.8/rest_api/monitoring_apis/podmonitor-monitoring-coreos-com-v1.html).
By default the kube-prometheus stack only monitors the `default`, `kube-system` and `monitoring` namespaces.
In order to allow Prometheus to monitor the `kilo` namespace, apply the Role and RoleBinding with:
```shell
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/wg-exporter-role-kube-prometheus.yaml
```
## Metrics
### Kilo
Kilo exports some standard metrics with the Prometheus GoCollector and ProcessCollector.
It also exposes some Kilo-specific metrics.
```
# HELP kilo_errors_total Number of errors that occurred while administering the mesh.
# TYPE kilo_errors_total counter
# HELP kilo_leader Leadership status of the node.
# TYPE kilo_leader gauge
# HELP kilo_nodes Number of nodes in the mesh.
# TYPE kilo_nodes gauge
# HELP kilo_peers Number of peers in the mesh.
# TYPE kilo_peers gauge
# HELP kilo_reconciles_total Number of reconciliation attempts.
# TYPE kilo_reconciles_total counter
```
### WireGuard
The [Prometheus WireGuard Exporter](https://github.com/MindFlavor/prometheus_wireguard_exporter) exports the following metrics:
```
# HELP wireguard_sent_bytes_total Bytes sent to the peer
# TYPE wireguard_sent_bytes_total counter
# HELP wireguard_received_bytes_total Bytes received from the peer
# TYPE wireguard_received_bytes_total counter
# HELP wireguard_latest_handshake_seconds Seconds from the last handshake
# TYPE wireguard_latest_handshake_seconds gauge
```
## Display some Metrics
If your laptop is a Kilo peer of the cluster you can access the Prometheus UI by navigating your browser directly to the cluster IP of the `prometheus-k8s` service.
Otherwise use `port-forward`:
```shell
kubectl -n monitoring port-forward svc/prometheus-k8s 9090
```
and navigate your browser to `localhost:9090`.
Check if you can see the PodMonitors for Kilo and the WireGuard Exporter under **Status** -> **Targets** in the Prometheus web UI.
If you don't see them, check the logs of the `prometheus-k8s` Pods; it may be that Prometheus doesn't have the permission to get Pods in the `kilo` namespace.
In this case, you need to apply the Role and RoleBinding from above.
Navigate to **Graph** and try to execute a simple query, e.g. type `kilo_nodes` and click on `execute`.
You should see some data.
## Using Grafana
Let's navigate to the Grafana dashboard.
Again, if your laptop is not a Kilo peer, use `port-forward`:
```shell
kubectl -n monitoring port-forward svc/grafana 3000
```
Now navigate your browser to `localhost:3000`.
The default user and password is `admin` `admin`.
An example configuration for a dashboard displaying Kilo metrics can be found [here](https://raw.githubusercontent.com/squat/kilo/main/docs/grafana/kilo.json).
You can import this dashboard by hitting **+** -> **Import** on the Grafana dashboard.
The dashboard looks like this:
<img src="./graphs/kilo.png" />

View File

@ -10,7 +10,7 @@ Support for [Kubernetes network policies](https://kubernetes.io/docs/concepts/se
The following command adds network policy support by deploying kube-router to work alongside Kilo: The following command adds network policy support by deploying kube-router to work alongside Kilo:
```shell ```shell
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/kube-router.yaml kubectl apply -f kubectl apply -f https://raw.githubusercontent.com/kilo-io/kilo/main/manifests/kube-router.yaml
``` ```
## Examples ## Examples

View File

@ -9,14 +9,29 @@ Once such a configuration is applied, the Kubernetes API server will send an Adm
With regard to the [failure policy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy), the API server will apply the requested changes to a resource if the request was answered with `"allowed": true`, or deny the changes if the answer was `"allowed": false`. With regard to the [failure policy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy), the API server will apply the requested changes to a resource if the request was answered with `"allowed": true`, or deny the changes if the answer was `"allowed": false`.
In case of Kilo Peer Validation, the specified operations are `UPDATE` and `CREATE`, the resources are `Peers`, and the default `failurePolicy` is set to `Fail`. In case of Kilo Peer Validation, the specified operations are `UPDATE` and `CREATE`, the resources are `Peers`, and the default `failurePolicy` is set to `Fail`.
View the full ValidatingWebhookConfiguration [here](https://github.com/squat/kilo/blob/main/manifests/peer-validation.yaml). View the full ValidatingWebhookConfiguration [here](https://github.com/leonnicolas/kilo-peer-validation/blob/main/deployment-no-cabundle.yaml).
## Getting Started ## Getting Started
[Kilo-Peer-Validation](https://github.com/leonnicolas/kilo-peer-validation) is a webserver that rejects any AdmissionReviewRequest with a faulty Peer configuration.
Apply the Service, the Deployment of the actual webserver, and the ValidatingWebhookConfiguration with: Apply the Service, the Deployment of the actual webserver, and the ValidatingWebhookConfiguration with:
```shell ```shell
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/blob/main/manifests/peer-validation.yaml kubectl apply -f https://raw.githubusercontent.com/leonnicolas/kilo-peer-validation/main/deployment-no-cabundle.yaml
``` ```
The Kubernetes API server will only talk to webhook servers via TLS so the Kilo-Peer-Validation server must be given a valid TLS certificate and key, and the API server must be told what certificate authority (CA) to trust. The Kubernetes API server will only talk to webhook servers via TLS so the Kilo-Peer-Validation server must be given a valid TLS certificate and key, and the API server must be told what certificate authority (CA) to trust.
The above manifest will use [kube-webhook-certgen](https://github.com/jet/kube-webhook-certgen) to generate the requiered certificates and patch the [ValidatingWebhookConfiguration](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#configure-admission-webhooks-on-the-fly). One way to do this is to use the [kube-webhook-certgen](https://github.com/jet/kube-webhook-certgen) project to create a Kubernetes Secret holding the TLS certificate and key for the webhook server and to make a certificate signing request to the Kubernetes API server.
The following snippet can be used to run kube-webhook-certgen in a Docker container to create a Secret and certificate signing request:
```shell
docker run -v /path/to/kubeconfig:/kubeconfig.yaml:ro jettech/kube-webhook-certgen:v1.5.2 --kubeconfig /kubeconfig.yaml create --namespace kilo --secret-name peer-validation-webhook-tls --host peer-validation,peer-validation.kilo.svc --key-name tls.key --cert-name tls.config
```
Now, the Kubernetes API server can be told what CA to trust by patching the ValidatingWebhookConfiguration with the newly created CA bundle:
```shell
docker run -v /path/to/kubeconfig:/kubeconfig.yaml:ro jettech/kube-webhook-certgen:v1.5.2 --kubeconfig /kubeconfig.yaml patch --webhook-name peer-validation.kilo.svc --secret-name peer-validation-webhook-tls --namespace kilo --patch-mutating=false
```
## Alternative Method
An alternative method to generate a ValidatingWebhookConfiguration manifest without using Kubernetes' Certificate Signing API is described in [Kilo-Peer-Validation](https://github.com/leonnicolas/kilo-peer-validation#use-the-set-up-script).

View File

@ -18,8 +18,8 @@ This DaemonSet creates a WireGuard interface that Kilo will manage.
An example configuration for a K3s cluster with [BoringTun] can be applied with: An example configuration for a K3s cluster with [BoringTun] can be applied with:
```shell ```shell
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/crds.yaml kubectl apply -f https://raw.githubusercontent.com/kilo-io/kilo/main/manifests/crds.yaml
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/kilo-k3s-userspace.yaml kubectl apply -f https://raw.githubusercontent.com/kilo-io/kilo/main/manifests/kilo-k3s-userspace.yaml
``` ```
> **Note**: even if some nodes have the WireGuard kernel module, this configuration will cause all nodes to use the userspace implementation of WireGuard. > **Note**: even if some nodes have the WireGuard kernel module, this configuration will cause all nodes to use the userspace implementation of WireGuard.
@ -30,8 +30,8 @@ In a heterogeneous cluster where some nodes are missing the WireGuard kernel mod
An example of such a configuration for a K3s cluster can by applied with: An example of such a configuration for a K3s cluster can by applied with:
```shell ```shell
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/crds.yaml kubectl apply -f https://raw.githubusercontent.com/kilo-io/kilo/main/manifests/crds.yaml
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/kilo-k3s-userspace-heterogeneous.yaml kubectl apply -f https://raw.githubusercontent.com/kilo-io/kilo/main/manifests/kilo-k3s-userspace-heterogeneous.yaml
``` ```
This configuration will deploy [nkml](https://github.com/leonnicolas/nkml) as a DaemonSet to label all nodes according to the presence of the WireGuard kernel module. This configuration will deploy [nkml](https://github.com/leonnicolas/nkml) as a DaemonSet to label all nodes according to the presence of the WireGuard kernel module.

View File

@ -18,7 +18,7 @@ test_full_mesh_connectivity() {
} }
test_full_mesh_peer() { test_full_mesh_peer() {
check_peer wg99 e2e 10.5.0.1/32 full check_peer wg1 e2e 10.5.0.1/32 full
} }
test_full_mesh_allowed_location_ips() { test_full_mesh_allowed_location_ips() {

View File

@ -1,17 +0,0 @@
#!/usr/bin/env bash
# shellcheck disable=SC1091
. lib.sh
setup_suite() {
# shellcheck disable=SC2016
block_until_ready_by_name kube-system kilo-userspace
_kubectl wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
}
test_connect() {
local PEER=test
local ALLOWED_IP=10.5.0.1/32
docker run -d --name="$PEER" --rm --network=host --cap-add=NET_ADMIN -v "$KGCTL_BINARY":/kgctl -v "$PWD/$KUBECONFIG":/kubeconfig --entrypoint=/kgctl alpine --kubeconfig /kubeconfig connect "$PEER" --allowed-ip "$ALLOWED_IP"
assert "retry 10 5 '' check_ping --local" "should be able to ping Pods from host"
docker stop "$PEER"
}

View File

@ -8,7 +8,7 @@ metadata:
data: data:
cni-conf.json: | cni-conf.json: |
{ {
"cniVersion":"0.4.0", "cniVersion":"0.3.1",
"name":"kilo", "name":"kilo",
"plugins":[ "plugins":[
{ {
@ -101,7 +101,7 @@ spec:
hostNetwork: true hostNetwork: true
containers: containers:
- name: kilo - name: kilo
image: squat/kilo:test image: kiloio/kilo:test
imagePullPolicy: Never imagePullPolicy: Never
args: args:
- --hostname=$(NODE_NAME) - --hostname=$(NODE_NAME)
@ -136,7 +136,7 @@ spec:
mountPath: /etc/kubernetes mountPath: /etc/kubernetes
readOnly: true readOnly: true
- name: boringtun - name: boringtun
image: leonnicolas/boringtun:cc19859 image: leonnicolas/boringtun:alpine
args: args:
- --disable-drop-privileges=true - --disable-drop-privileges=true
- --foreground - --foreground
@ -149,7 +149,7 @@ spec:
readOnly: false readOnly: false
initContainers: initContainers:
- name: install-cni - name: install-cni
image: squat/kilo:test image: kiloio/kilo:test
imagePullPolicy: Never imagePullPolicy: Never
command: command:
- /bin/sh - /bin/sh

View File

@ -4,7 +4,7 @@ KIND_CLUSTER="kind-cluster-kilo"
KIND_BINARY="${KIND_BINARY:-kind}" KIND_BINARY="${KIND_BINARY:-kind}"
KUBECTL_BINARY="${KUBECTL_BINARY:-kubectl}" KUBECTL_BINARY="${KUBECTL_BINARY:-kubectl}"
KGCTL_BINARY="${KGCTL_BINARY:-kgctl}" KGCTL_BINARY="${KGCTL_BINARY:-kgctl}"
KILO_IMAGE="${KILO_IMAGE:-squat/kilo}" KILO_IMAGE="${KILO_IMAGE:-kiloio/kilo}"
retry() { retry() {
local COUNT="${1:-10}" local COUNT="${1:-10}"
@ -54,7 +54,7 @@ build_kind_config() {
export API_SERVER_PORT="${2:-6443}" export API_SERVER_PORT="${2:-6443}"
export POD_SUBNET="${3:-10.42.0.0/16}" export POD_SUBNET="${3:-10.42.0.0/16}"
export SERVICE_SUBNET="${4:-10.43.0.0/16}" export SERVICE_SUBNET="${4:-10.43.0.0/16}"
export WORKERS="" export WORKERS=""
local i=0 local i=0
while [ "$i" -lt "$WORKER_COUNT" ]; do while [ "$i" -lt "$WORKER_COUNT" ]; do
WORKERS="$(printf "%s\n- role: worker" "$WORKERS")" WORKERS="$(printf "%s\n- role: worker" "$WORKERS")"
@ -65,7 +65,7 @@ build_kind_config() {
} }
create_interface() { create_interface() {
docker run -d --name="$1" --rm --network=host --cap-add=NET_ADMIN --device=/dev/net/tun -v /var/run/wireguard:/var/run/wireguard -e WG_LOG_LEVEL=debug leonnicolas/boringtun:cc19859 --foreground --disable-drop-privileges true "$1" docker run -d --name="$1" --rm --network=host --cap-add=NET_ADMIN --device=/dev/net/tun -v /var/run/wireguard:/var/run/wireguard -e WG_LOG_LEVEL=debug leonnicolas/boringtun --foreground --disable-drop-privileges true "$1"
} }
delete_interface() { delete_interface() {
@ -118,15 +118,15 @@ create_cluster() {
# Create the kind cluster. # Create the kind cluster.
_kind create cluster --name $KIND_CLUSTER --config <(echo "$CONFIG") _kind create cluster --name $KIND_CLUSTER --config <(echo "$CONFIG")
# Load the Kilo image into kind. # Load the Kilo image into kind.
docker tag "$KILO_IMAGE" squat/kilo:test docker tag "$KILO_IMAGE" kiloio/kilo:test
# This command does not accept the --kubeconfig flag, so call the command directly. # This command does not accept the --kubeconfig flag, so call the command directly.
$KIND_BINARY load docker-image squat/kilo:test --name $KIND_CLUSTER $KIND_BINARY load docker-image kiloio/kilo:test --name $KIND_CLUSTER
# Create the kubeconfig secret. # Create the kubeconfig secret.
_kubectl create secret generic kubeconfig --from-file=kubeconfig="$KUBECONFIG" -n kube-system _kubectl create secret generic kubeconfig --from-file=kubeconfig="$KUBECONFIG" -n kube-system
# Apply Kilo the the cluster. # Apply Kilo the the cluster.
_kubectl apply -f ../manifests/crds.yaml _kubectl apply -f ../manifests/crds.yaml
_kubectl apply -f kilo-kind-userspace.yaml _kubectl apply -f kilo-kind-userspace.yaml
block_until_ready_by_name kube-system kilo-userspace block_until_ready_by_name kube-system kilo-userspace
_kubectl wait nodes --all --for=condition=Ready _kubectl wait nodes --all --for=condition=Ready
# Wait for CoreDNS. # Wait for CoreDNS.
block_until_ready kube_system k8s-app=kube-dns block_until_ready kube_system k8s-app=kube-dns
@ -135,7 +135,7 @@ create_cluster() {
block_until_ready_by_name default curl block_until_ready_by_name default curl
_kubectl taint node $KIND_CLUSTER-control-plane node-role.kubernetes.io/master:NoSchedule- _kubectl taint node $KIND_CLUSTER-control-plane node-role.kubernetes.io/master:NoSchedule-
_kubectl apply -f https://raw.githubusercontent.com/kilo-io/adjacency/main/example.yaml _kubectl apply -f https://raw.githubusercontent.com/kilo-io/adjacency/main/example.yaml
block_until_ready_by_name default adjacency block_until_ready_by_name adjacency adjacency
} }
delete_cluster () { delete_cluster () {
@ -184,14 +184,14 @@ check_peer() {
local ALLOWED_IP=$3 local ALLOWED_IP=$3
local GRANULARITY=$4 local GRANULARITY=$4
create_interface "$INTERFACE" create_interface "$INTERFACE"
docker run --rm leonnicolas/wg-tools wg genkey > "$INTERFACE" docker run --rm --entrypoint=/usr/bin/wg "$KILO_IMAGE" genkey > "$INTERFACE"
assert "create_peer $PEER $ALLOWED_IP 10 $(docker run --rm --entrypoint=/bin/sh -v "$PWD/$INTERFACE":/key leonnicolas/wg-tools -c 'cat /key | wg pubkey')" "should be able to create Peer" assert "create_peer $PEER $ALLOWED_IP 10 $(docker run --rm --entrypoint=/bin/sh -v "$PWD/$INTERFACE":/key "$KILO_IMAGE" -c 'cat /key | wg pubkey')" "should be able to create Peer"
assert "_kgctl showconf peer $PEER --mesh-granularity=$GRANULARITY > $PEER.ini" "should be able to get Peer configuration" assert "_kgctl showconf peer $PEER --mesh-granularity=$GRANULARITY > $PEER.ini" "should be able to get Peer configuration"
assert "docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/usr/bin/wg -v /var/run/wireguard:/var/run/wireguard -v $PWD/$PEER.ini:/peer.ini leonnicolas/wg-tools setconf $INTERFACE /peer.ini" "should be able to apply configuration from kgctl" assert "docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/usr/bin/wg -v /var/run/wireguard:/var/run/wireguard -v $PWD/$PEER.ini:/peer.ini $KILO_IMAGE setconf $INTERFACE /peer.ini" "should be able to apply configuration from kgctl"
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/usr/bin/wg -v /var/run/wireguard:/var/run/wireguard -v "$PWD/$INTERFACE":/key leonnicolas/wg-tools set "$INTERFACE" private-key /key docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/usr/bin/wg -v /var/run/wireguard:/var/run/wireguard -v "$PWD/$INTERFACE":/key "$KILO_IMAGE" set "$INTERFACE" private-key /key
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip leonnicolas/wg-tools address add "$ALLOWED_IP" dev "$INTERFACE" docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" address add "$ALLOWED_IP" dev "$INTERFACE"
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip leonnicolas/wg-tools link set "$INTERFACE" up docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" link set "$INTERFACE" up
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip leonnicolas/wg-tools route add 10.42/16 dev "$INTERFACE" docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" route add 10.42/16 dev "$INTERFACE"
assert "retry 10 5 '' check_ping --local" "should be able to ping Pods from host" assert "retry 10 5 '' check_ping --local" "should be able to ping Pods from host"
assert_equals "$(_kgctl showconf peer "$PEER")" "$(_kgctl showconf peer "$PEER" --mesh-granularity="$GRANULARITY")" "kgctl should be able to auto detect the mesh granularity" assert_equals "$(_kgctl showconf peer "$PEER")" "$(_kgctl showconf peer "$PEER" --mesh-granularity="$GRANULARITY")" "kgctl should be able to auto detect the mesh granularity"
rm "$INTERFACE" "$PEER".ini rm "$INTERFACE" "$PEER".ini

View File

@ -18,7 +18,7 @@ test_location_mesh_connectivity() {
} }
test_location_mesh_peer() { test_location_mesh_peer() {
check_peer wg99 e2e 10.5.0.1/32 location check_peer wg1 e2e 10.5.0.1/32 location
} }
test_mesh_granularity_auto_detect() { test_mesh_granularity_auto_detect() {

100
go.mod
View File

@ -1,88 +1,28 @@
module github.com/squat/kilo module github.com/kilo-io/kilo
go 1.18 go 1.15
require ( require (
github.com/awalterschulze/gographviz v0.0.0-20181013152038-b2885df04310 github.com/awalterschulze/gographviz v0.0.0-20181013152038-b2885df04310
github.com/campoy/embedmd v1.0.0 github.com/campoy/embedmd v1.0.0
github.com/containernetworking/cni v1.0.1 github.com/containernetworking/cni v0.6.0
github.com/containernetworking/plugins v1.1.1 github.com/containernetworking/plugins v0.6.0
github.com/coreos/go-iptables v0.6.0 github.com/coreos/go-iptables v0.4.0
github.com/go-kit/kit v0.9.0 github.com/go-kit/kit v0.9.0
github.com/imdario/mergo v0.3.6 // indirect
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348
github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a github.com/oklog/run v1.0.0
github.com/oklog/run v1.1.0 github.com/prometheus/client_golang v1.7.1
github.com/prometheus/client_golang v1.11.0 github.com/spf13/cobra v1.1.3
github.com/spf13/cobra v1.2.1 github.com/spf13/pflag v1.0.5
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 github.com/vishvananda/netlink v1.0.0
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211124212657-dd7407c86d22 golang.org/x/lint v0.0.0-20200302205851-738671d3881b
honnef.co/go/tools v0.3.1 golang.org/x/sys v0.0.0-20210510120138-977fb7262007
k8s.io/api v0.23.6 k8s.io/api v0.21.1
k8s.io/apiextensions-apiserver v0.23.6 k8s.io/apiextensions-apiserver v0.21.1
k8s.io/apimachinery v0.23.6 k8s.io/apimachinery v0.21.1
k8s.io/client-go v0.23.6 k8s.io/client-go v0.21.1
k8s.io/code-generator v0.23.6 k8s.io/code-generator v0.21.1
sigs.k8s.io/controller-tools v0.8.0 sigs.k8s.io/controller-tools v0.6.0
)
require (
github.com/BurntSushi/toml v0.4.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/fatih/color v1.12.0 // indirect
github.com/go-logfmt/logfmt v0.5.0 // indirect
github.com/go-logr/logr v1.2.0 // indirect
github.com/gobuffalo/flect v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.2.0 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/imdario/mergo v0.3.11 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/josharian/native v0.0.0-20200817173448-b6b71def0850 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mdlayher/genetlink v1.0.0 // indirect
github.com/mdlayher/netlink v1.4.1 // indirect
github.com/mdlayher/socket v0.0.0-20211102153432-57e3fa563ecb // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.28.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 // indirect
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
golang.zx2c4.com/wireguard v0.0.0-20211123210315-387f7c461a16 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c // indirect
k8s.io/klog/v2 v2.30.0 // indirect
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
) )

590
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,8 @@
---
apiVersion: apiextensions.k8s.io/v1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
annotations: annotations:
controller-gen.kubebuilder.io/version: v0.8.0 controller-gen.kubebuilder.io/version: v0.6.0
creationTimestamp: null creationTimestamp: null
name: peers.kilo.squat.ai name: peers.kilo.squat.ai
spec: spec:
@ -13,7 +12,7 @@ spec:
listKind: PeerList listKind: PeerList
plural: peers plural: peers
singular: peer singular: peer
scope: Cluster scope: Namespaced
versions: versions:
- name: v1alpha1 - name: v1alpha1
schema: schema:

View File

@ -67,7 +67,7 @@ spec:
hostNetwork: true hostNetwork: true
containers: containers:
- name: kilo - name: kilo
image: squat/kilo:0.5.0 image: squat/kilo
args: args:
- --kubeconfig=/etc/kubernetes/kubeconfig - --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME) - --hostname=$(NODE_NAME)

View File

@ -8,7 +8,7 @@ metadata:
data: data:
cni-conf.json: | cni-conf.json: |
{ {
"cniVersion":"0.4.0", "cniVersion":"0.3.1",
"name":"kilo", "name":"kilo",
"plugins":[ "plugins":[
{ {
@ -101,7 +101,7 @@ spec:
hostNetwork: true hostNetwork: true
containers: containers:
- name: kilo - name: kilo
image: squat/kilo:0.5.0 image: squat/kilo
args: args:
- --kubeconfig=/etc/kubernetes/kubeconfig - --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME) - --hostname=$(NODE_NAME)
@ -131,7 +131,7 @@ spec:
readOnly: false readOnly: false
initContainers: initContainers:
- name: install-cni - name: install-cni
image: squat/kilo:0.5.0 image: squat/kilo
command: command:
- /bin/sh - /bin/sh
- -c - -c

View File

@ -1,176 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kilo
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kilo
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- patch
- watch
- apiGroups:
- kilo.squat.ai
resources:
- peers
verbs:
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kilo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kilo
subjects:
- kind: ServiceAccount
name: kilo
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kilo-scripts
namespace: kube-system
data:
init.sh: |
#!/bin/sh
cat > /etc/kubernetes/kubeconfig <<EOF
apiVersion: v1
kind: Config
name: kilo
clusters:
- cluster:
server: $(sed -n 's/.*server: \(.*\)/\1/p' /var/lib/rancher/k3s/agent/kubelet.kubeconfig)
certificate-authority: /var/lib/rancher/k3s/agent/server-ca.crt
users:
- name: kilo
user:
token: $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
contexts:
- name: kilo
context:
cluster: kilo
namespace: ${NAMESPACE}
user: kilo
current-context: kilo
EOF
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
app.kubernetes.io/part-of: kilo
spec:
selector:
matchLabels:
app.kubernetes.io/name: kilo
app.kubernetes.io/part-of: kilo
template:
metadata:
labels:
app.kubernetes.io/name: kilo
app.kubernetes.io/part-of: kilo
spec:
serviceAccountName: kilo
hostNetwork: true
containers:
- name: kilo
image: squat/kilo:0.5.0
args:
- --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME)
- --cni=false
- --compatibility=cilium
- --local=false
- --encapsulate=crosssubnet
- --clean-up-interface=true
- --log-level=all
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
ports:
- containerPort: 1107
name: metrics
securityContext:
privileged: true
volumeMounts:
- name: kilo-dir
mountPath: /var/lib/kilo
- name: kubeconfig
mountPath: /etc/kubernetes
readOnly: true
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
initContainers:
- name: generate-kubeconfig
image: squat/kilo:0.5.0
command:
- /bin/sh
args:
- /scripts/init.sh
imagePullPolicy: Always
volumeMounts:
- name: kubeconfig
mountPath: /etc/kubernetes
- name: scripts
mountPath: /scripts/
readOnly: true
- name: k3s-agent
mountPath: /var/lib/rancher/k3s/agent/
readOnly: true
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- name: kilo-dir
hostPath:
path: /var/lib/kilo
- name: kubeconfig
emptyDir: {}
- name: scripts
configMap:
name: kilo-scripts
- name: k3s-agent
hostPath:
path: /var/lib/rancher/k3s/agent
- name: lib-modules
hostPath:
path: /lib/modules
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate

View File

@ -96,7 +96,7 @@ spec:
hostNetwork: true hostNetwork: true
containers: containers:
- name: kilo - name: kilo
image: squat/kilo:0.5.0 image: squat/kilo
args: args:
- --kubeconfig=/etc/kubernetes/kubeconfig - --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME) - --hostname=$(NODE_NAME)
@ -127,7 +127,7 @@ spec:
readOnly: false readOnly: false
initContainers: initContainers:
- name: generate-kubeconfig - name: generate-kubeconfig
image: squat/kilo:0.5.0 image: squat/kilo
command: command:
- /bin/sh - /bin/sh
args: args:

View File

@ -8,7 +8,7 @@ metadata:
data: data:
cni-conf.json: | cni-conf.json: |
{ {
"cniVersion":"0.4.0", "cniVersion":"0.3.1",
"name":"kilo", "name":"kilo",
"plugins":[ "plugins":[
{ {
@ -133,7 +133,7 @@ spec:
hostNetwork: true hostNetwork: true
containers: containers:
- name: kilo - name: kilo
image: squat/kilo:0.5.0 image: squat/kilo
args: args:
- --kubeconfig=/etc/kubernetes/kubeconfig - --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME) - --hostname=$(NODE_NAME)
@ -164,7 +164,7 @@ spec:
readOnly: false readOnly: false
initContainers: initContainers:
- name: generate-kubeconfig - name: generate-kubeconfig
image: squat/kilo:0.5.0 image: squat/kilo
command: command:
- /bin/sh - /bin/sh
args: args:
@ -185,7 +185,7 @@ spec:
fieldRef: fieldRef:
fieldPath: metadata.namespace fieldPath: metadata.namespace
- name: install-cni - name: install-cni
image: squat/kilo:0.5.0 image: squat/kilo
command: command:
- /bin/sh - /bin/sh
- -c - -c
@ -264,7 +264,7 @@ spec:
hostNetwork: true hostNetwork: true
containers: containers:
- name: kilo - name: kilo
image: squat/kilo:0.5.0 image: squat/kilo
args: args:
- --kubeconfig=/etc/kubernetes/kubeconfig - --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME) - --hostname=$(NODE_NAME)
@ -298,7 +298,7 @@ spec:
mountPath: /var/run/wireguard mountPath: /var/run/wireguard
readOnly: false readOnly: false
- name: boringtun - name: boringtun
image: leonnicolas/boringtun:cc19859 image: leonnicolas/boringtun
args: args:
- --disable-drop-privileges=true - --disable-drop-privileges=true
- --foreground - --foreground
@ -311,7 +311,7 @@ spec:
readOnly: false readOnly: false
initContainers: initContainers:
- name: generate-kubeconfig - name: generate-kubeconfig
image: squat/kilo:0.5.0 image: squat/kilo
command: command:
- /bin/sh - /bin/sh
args: args:
@ -332,7 +332,7 @@ spec:
fieldRef: fieldRef:
fieldPath: metadata.namespace fieldPath: metadata.namespace
- name: install-cni - name: install-cni
image: squat/kilo:0.5.0 image: squat/kilo
command: command:
- /bin/sh - /bin/sh
- -c - -c
@ -391,7 +391,7 @@ spec:
--- ---
kind: DaemonSet kind: DaemonSet
apiVersion: apps/v1 apiVersion: apps/v1
metadata: metadata:
name: nkml name: nkml
namespace: kube-system namespace: kube-system
labels: labels:
@ -410,7 +410,7 @@ spec:
containers: containers:
- name: nkml - name: nkml
image: leonnicolas/nkml image: leonnicolas/nkml
args: args:
- --hostname=$(NODE_NAME) - --hostname=$(NODE_NAME)
- --label-mod=wireguard - --label-mod=wireguard
- --kubeconfig=/etc/kubernetes/kubeconfig - --kubeconfig=/etc/kubernetes/kubeconfig
@ -428,7 +428,7 @@ spec:
readOnly: true readOnly: true
initContainers: initContainers:
- name: generate-kubeconfig - name: generate-kubeconfig
image: squat/kilo:0.5.0 image: squat/kilo
command: command:
- /bin/sh - /bin/sh
args: args:

View File

@ -8,7 +8,7 @@ metadata:
data: data:
cni-conf.json: | cni-conf.json: |
{ {
"cniVersion":"0.4.0", "cniVersion":"0.3.1",
"name":"kilo", "name":"kilo",
"plugins":[ "plugins":[
{ {
@ -131,7 +131,7 @@ spec:
hostNetwork: true hostNetwork: true
containers: containers:
- name: kilo - name: kilo
image: squat/kilo:0.5.0 image: squat/kilo
args: args:
- --kubeconfig=/etc/kubernetes/kubeconfig - --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME) - --hostname=$(NODE_NAME)
@ -165,7 +165,7 @@ spec:
mountPath: /var/run/wireguard mountPath: /var/run/wireguard
readOnly: false readOnly: false
- name: boringtun - name: boringtun
image: leonnicolas/boringtun:cc19859 image: leonnicolas/boringtun
args: args:
- --disable-drop-privileges=true - --disable-drop-privileges=true
- --foreground - --foreground
@ -178,7 +178,7 @@ spec:
readOnly: false readOnly: false
initContainers: initContainers:
- name: generate-kubeconfig - name: generate-kubeconfig
image: squat/kilo:0.5.0 image: squat/kilo
command: command:
- /bin/sh - /bin/sh
args: args:
@ -199,7 +199,7 @@ spec:
fieldRef: fieldRef:
fieldPath: metadata.namespace fieldPath: metadata.namespace
- name: install-cni - name: install-cni
image: squat/kilo:0.5.0 image: squat/kilo
command: command:
- /bin/sh - /bin/sh
- -c - -c

View File

@ -8,7 +8,7 @@ metadata:
data: data:
cni-conf.json: | cni-conf.json: |
{ {
"cniVersion":"0.4.0", "cniVersion":"0.3.1",
"name":"kilo", "name":"kilo",
"plugins":[ "plugins":[
{ {
@ -130,7 +130,7 @@ spec:
hostNetwork: true hostNetwork: true
containers: containers:
- name: kilo - name: kilo
image: squat/kilo:0.5.0 image: squat/kilo
args: args:
- --kubeconfig=/etc/kubernetes/kubeconfig - --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME) - --hostname=$(NODE_NAME)
@ -160,7 +160,7 @@ spec:
readOnly: false readOnly: false
initContainers: initContainers:
- name: generate-kubeconfig - name: generate-kubeconfig
image: squat/kilo:0.5.0 image: squat/kilo
command: command:
- /bin/sh - /bin/sh
args: args:
@ -181,7 +181,7 @@ spec:
fieldRef: fieldRef:
fieldPath: metadata.namespace fieldPath: metadata.namespace
- name: install-cni - name: install-cni
image: squat/kilo:0.5.0 image: squat/kilo
command: command:
- /bin/sh - /bin/sh
- -c - -c

View File

@ -1,142 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kilo
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kilo
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- patch
- watch
- apiGroups:
- kilo.squat.ai
resources:
- peers
verbs:
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kilo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kilo
subjects:
- kind: ServiceAccount
name: kilo
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
app.kubernetes.io/part-of: kilo
spec:
selector:
matchLabels:
app.kubernetes.io/name: kilo
app.kubernetes.io/part-of: kilo
template:
metadata:
labels:
app.kubernetes.io/name: kilo
app.kubernetes.io/part-of: kilo
spec:
serviceAccountName: kilo
hostNetwork: true
containers:
- name: kilo
image: squat/kilo:0.5.0
args:
- --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME)
- --cni=false
- --compatibility=cilium
- --local=false
# additional and also optional flag
- --encapsulate=crosssubnet
- --clean-up-interface=true
- --subnet=172.31.254.0/24
- --log-level=all
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
ports:
- containerPort: 1107
name: metrics
securityContext:
privileged: true
volumeMounts:
- name: kilo-dir
mountPath: /var/lib/kilo
# with kube-proxy configmap
# - name: kubeconfig
# mountPath: /etc/kubernetes
# readOnly: true
# without kube-proxy host kubeconfig binding
- name: kubeconfig
mount_path: /etc/kubernetes/kubeconfig
sub_path: admin.conf
read_only: true
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- name: kilo-dir
hostPath:
path: /var/lib/kilo
# with kube-proxy configmap
# - name: kubeconfig
# configMap:
# name: kube-proxy
# items:
# - key: kubeconfig.conf
# path: kubeconfig
# without kube-proxy host kubeconfig binding
- name: kubeconfig
host_path:
path: /etc/kubernetes
- name: lib-modules
hostPath:
path: /lib/modules
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate

View File

@ -1,142 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kilo
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kilo
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- patch
- watch
- apiGroups:
- kilo.squat.ai
resources:
- peers
verbs:
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kilo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kilo
subjects:
- kind: ServiceAccount
name: kilo
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
app.kubernetes.io/part-of: kilo
spec:
selector:
matchLabels:
app.kubernetes.io/name: kilo
app.kubernetes.io/part-of: kilo
template:
metadata:
labels:
app.kubernetes.io/name: kilo
app.kubernetes.io/part-of: kilo
spec:
serviceAccountName: kilo
hostNetwork: true
containers:
- name: boringtun
image: leonnicolas/boringtun:cc19859
args:
- --disable-drop-privileges=true
- --foreground
- kilo0
securityContext:
privileged: true
volumeMounts:
- name: wireguard
mountPath: /var/run/wireguard
readOnly: false
- name: kilo
image: squat/kilo:0.5.0
args:
- --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME)
- --create-interface=false
- --interface=kilo0
- --cni=false
- --compatibility=flannel
- --local=false
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
ports:
- containerPort: 1107
name: metrics
securityContext:
privileged: true
volumeMounts:
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kilo-dir
mountPath: /var/lib/kilo
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
- name: wireguard
mountPath: /var/run/wireguard
readOnly: false
tolerations:
- operator: Exists
volumes:
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d
- name: kilo-dir
hostPath:
path: /var/lib/kilo
- name: kubeconfig
configMap:
name: kube-proxy
items:
- key: kubeconfig.conf
path: kubeconfig
- name: lib-modules
hostPath:
path: /lib/modules
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: wireguard
hostPath:
path: /var/run/wireguard

View File

@ -67,7 +67,7 @@ spec:
hostNetwork: true hostNetwork: true
containers: containers:
- name: kilo - name: kilo
image: squat/kilo:0.5.0 image: squat/kilo
args: args:
- --kubeconfig=/etc/kubernetes/kubeconfig - --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME) - --hostname=$(NODE_NAME)

View File

@ -1,207 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
data:
cni-conf.json: |
{
"cniVersion":"0.4.0",
"name":"kilo",
"plugins":[
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"forceAddress":true,
"mtu": 1420,
"ipam":{
"type":"host-local"
}
},
{
"type":"portmap",
"snat":true,
"capabilities":{
"portMappings":true
}
}
]
}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kilo
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kilo
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- patch
- watch
- apiGroups:
- kilo.squat.ai
resources:
- peers
verbs:
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kilo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kilo
subjects:
- kind: ServiceAccount
name: kilo
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
app.kubernetes.io/part-of: kilo
spec:
selector:
matchLabels:
app.kubernetes.io/name: kilo
app.kubernetes.io/part-of: kilo
template:
metadata:
labels:
app.kubernetes.io/name: kilo
app.kubernetes.io/part-of: kilo
spec:
serviceAccountName: kilo
hostNetwork: true
containers:
- name: boringtun
image: leonnicolas/boringtun:cc19859
imagePullPolicy: IfNotPresent
args:
- --disable-drop-privileges=true
- --foreground
- kilo0
securityContext:
privileged: true
volumeMounts:
- name: wireguard
mountPath: /var/run/wireguard
readOnly: false
- name: kilo
image: squat/kilo:0.5.0
imagePullPolicy: IfNotPresent
args:
- --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME)
- --create-interface=false
- --interface=kilo0
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
ports:
- containerPort: 1107
name: metrics
securityContext:
privileged: true
volumeMounts:
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kilo-dir
mountPath: /var/lib/kilo
- name: kubeconfig
mountPath: /etc/kubernetes
readOnly: true
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
- name: wireguard
mountPath: /var/run/wireguard
readOnly: false
initContainers:
- name: install-cni
image: squat/kilo:0.5.0
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- set -e -x;
cp /opt/cni/bin/* /host/opt/cni/bin/;
TMP_CONF="$CNI_CONF_NAME".tmp;
echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;
rm -f /host/etc/cni/net.d/*;
mv $TMP_CONF /host/etc/cni/net.d/$CNI_CONF_NAME
env:
- name: CNI_CONF_NAME
value: 10-kilo.conflist
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kilo
key: cni-conf.json
volumeMounts:
- name: cni-bin-dir
mountPath: /host/opt/cni/bin
- name: cni-conf-dir
mountPath: /host/etc/cni/net.d
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d
- name: kilo-dir
hostPath:
path: /var/lib/kilo
- name: kubeconfig
configMap:
name: kube-proxy
items:
- key: kubeconfig.conf
path: kubeconfig
- name: lib-modules
hostPath:
path: /lib/modules
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: wireguard
hostPath:
path: /var/run/wireguard

View File

@ -8,7 +8,7 @@ metadata:
data: data:
cni-conf.json: | cni-conf.json: |
{ {
"cniVersion":"0.4.0", "cniVersion":"0.3.1",
"name":"kilo", "name":"kilo",
"plugins":[ "plugins":[
{ {
@ -101,7 +101,7 @@ spec:
hostNetwork: true hostNetwork: true
containers: containers:
- name: kilo - name: kilo
image: squat/kilo:0.5.0 image: squat/kilo
args: args:
- --kubeconfig=/etc/kubernetes/kubeconfig - --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME) - --hostname=$(NODE_NAME)
@ -131,7 +131,7 @@ spec:
readOnly: false readOnly: false
initContainers: initContainers:
- name: install-cni - name: install-cni
image: squat/kilo:0.5.0 image: squat/kilo
command: command:
- /bin/sh - /bin/sh
- -c - -c

View File

@ -67,7 +67,7 @@ spec:
hostNetwork: true hostNetwork: true
containers: containers:
- name: kilo - name: kilo
image: squat/kilo:0.5.0 image: squat/kilo
args: args:
- --kubeconfig=/etc/kubernetes/kubeconfig - --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME) - --hostname=$(NODE_NAME)

View File

@ -8,7 +8,7 @@ metadata:
data: data:
cni-conf.json: | cni-conf.json: |
{ {
"cniVersion":"0.4.0", "cniVersion":"0.3.1",
"name":"kilo", "name":"kilo",
"plugins":[ "plugins":[
{ {
@ -101,7 +101,7 @@ spec:
hostNetwork: true hostNetwork: true
containers: containers:
- name: kilo - name: kilo
image: squat/kilo:0.5.0 image: squat/kilo
args: args:
- --kubeconfig=/etc/kubernetes/kubeconfig - --kubeconfig=/etc/kubernetes/kubeconfig
- --hostname=$(NODE_NAME) - --hostname=$(NODE_NAME)
@ -131,7 +131,7 @@ spec:
readOnly: false readOnly: false
initContainers: initContainers:
- name: install-cni - name: install-cni
image: squat/kilo:0.5.0 image: squat/kilo
command: command:
- /bin/sh - /bin/sh
- -c - -c

View File

@ -1,173 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: kilo
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: "peers.kilo.squat.ai"
webhooks:
- name: "peers.kilo.squat.ai"
rules:
- apiGroups: ["kilo.squat.ai"]
apiVersions: ["v1alpha1"]
operations: ["CREATE","UPDATE"]
resources: ["peers"]
scope: "Cluster"
clientConfig:
service:
namespace: "kilo"
name: "peer-validation"
path: "/validate"
admissionReviewVersions: ["v1"]
sideEffects: None
timeoutSeconds: 5
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: peer-validation-server
namespace: kilo
labels:
app.kubernetes.io/name: peer-validation-server
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: peer-validation-server
template:
metadata:
labels:
app.kubernetes.io/name: peer-validation-server
spec:
securityContext:
runAsNonRoot: true
runAsUser: 1000
containers:
- name: server
image: squat/kilo:0.5.0
args:
- webhook
- --cert-file=/run/secrets/tls/tls.crt
- --key-file=/run/secrets/tls/tls.key
- --listen-metrics=:1107
- --listen=:8443
ports:
- containerPort: 8443
name: webhook
- containerPort: 1107
name: metrics
volumeMounts:
- name: tls
mountPath: /run/secrets/tls
readOnly: true
volumes:
- name: tls
secret:
secretName: peer-validation-webhook-tls
---
apiVersion: v1
kind: Service
metadata:
name: peer-validation
namespace: kilo
spec:
selector:
app.kubernetes.io/name: peer-validation-server
ports:
- port: 443
targetPort: webhook
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kilo-peer-validation
namespace: kilo
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kilo-peer-validation
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
resourceNames:
- peers.kilo.squat.ai
verbs:
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kilo-peer-validation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kilo-peer-validation
subjects:
- kind: ServiceAccount
namespace: kilo
name: kilo-peer-validation
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kilo-peer-validation
namespace: kilo
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kilo-peer-validation
namespace: kilo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kilo-peer-validation
subjects:
- kind: ServiceAccount
namespace: kilo
name: kilo-peer-validation
---
apiVersion: batch/v1
kind: Job
metadata:
name: cert-gen
namespace: kilo
spec:
template:
spec:
serviceAccountName: kilo-peer-validation
initContainers:
- name: create
image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0
args:
- create
- --namespace=kilo
- --secret-name=peer-validation-webhook-tls
- --host=peer-validation,peer-validation.kilo.svc
- --key-name=tls.key
- --cert-name=tls.crt
containers:
- name: patch
image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0
args:
- patch
- --webhook-name=peers.kilo.squat.ai
- --secret-name=peer-validation-webhook-tls
- --namespace=kilo
- --patch-mutating=false
restartPolicy: OnFailure
backoffLimit: 4

View File

@ -1,56 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: prometheus
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.26.0
name: prometheus-k8s
namespace: kilo
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: prometheus
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.26.0
name: prometheus-k8s
namespace: kilo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: prometheus-k8s
subjects:
- kind: ServiceAccount
name: prometheus-k8s
namespace: monitoring

View File

@ -1,67 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
labels:
app.kubernetes.io/name: wg-exporter
app.kubernetes.io/part-of: kilo
name: wg-exporter
namespace: kilo
spec:
namespaceSelector:
matchNames:
- kilo
podMetricsEndpoints:
- interval: 15s
port: metrics
path: /metrics
selector:
matchLabels:
app.kubernetes.io/part-of: kilo
app.kubernetes.io/name: wg-exporter
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app.kubernetes.io/name: wg-exporter
app.kubernetes.io/part-of: kilo
name: wg-exporter
namespace: kilo
spec:
selector:
matchLabels:
app.kubernetes.io/name: wg-exporter
app.kubernetes.io/part-of: kilo
template:
metadata:
labels:
app.kubernetes.io/name: wg-exporter
app.kubernetes.io/part-of: kilo
spec:
containers:
- args:
- -a
- -i=kilo0
- -p=9586
image: mindflavor/prometheus-wireguard-exporter
name: wg-exporter
ports:
- containerPort: 9586
name: metrics
protocol: TCP
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- name: wireguard
mountPath: /var/run/wireguard
volumes:
- name: wireguard
hostPath:
path: /var/run/wireguard
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists

View File

@ -1,111 +0,0 @@
// Copyright 2019 the Kilo authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encapsulation
import (
"fmt"
"net"
"sync"
"github.com/vishvananda/netlink"
"github.com/squat/kilo/pkg/iptables"
)
const ciliumDeviceName = "cilium_host"
type cilium struct {
iface int
strategy Strategy
ch chan netlink.LinkUpdate
done chan struct{}
// mu guards updates to the iface field.
mu sync.Mutex
}
// NewCilium returns an encapsulator that uses Cilium.
func NewCilium(strategy Strategy) Encapsulator {
return &cilium{
ch: make(chan netlink.LinkUpdate),
done: make(chan struct{}),
strategy: strategy,
}
}
// CleanUp close done channel
func (f *cilium) CleanUp() error {
close(f.done)
return nil
}
// Gw returns the correct gateway IP associated with the given node.
func (f *cilium) Gw(_, _ net.IP, subnet *net.IPNet) net.IP {
return subnet.IP
}
// Index returns the index of the Cilium interface.
func (f *cilium) Index() int {
f.mu.Lock()
defer f.mu.Unlock()
return f.iface
}
// Init finds the Cilium interface index.
func (f *cilium) Init(_ int) error {
if err := netlink.LinkSubscribe(f.ch, f.done); err != nil {
return fmt.Errorf("failed to subscribe to updates to %s: %v", ciliumDeviceName, err)
}
go func() {
var lu netlink.LinkUpdate
for {
select {
case lu = <-f.ch:
if lu.Attrs().Name == ciliumDeviceName {
f.mu.Lock()
f.iface = lu.Attrs().Index
f.mu.Unlock()
}
case <-f.done:
return
}
}
}()
i, err := netlink.LinkByName(ciliumDeviceName)
if _, ok := err.(netlink.LinkNotFoundError); ok {
return nil
}
if err != nil {
return fmt.Errorf("failed to query for Cilium interface: %v", err)
}
f.mu.Lock()
f.iface = i.Attrs().Index
f.mu.Unlock()
return nil
}
// Rules is a no-op.
func (f *cilium) Rules(_ []*net.IPNet) []iptables.Rule {
return nil
}
// Set is a no-op.
func (f *cilium) Set(_ *net.IPNet) error {
return nil
}
// Strategy returns the configured strategy for encapsulation.
func (f *cilium) Strategy() Strategy {
return f.strategy
}

View File

@ -17,7 +17,7 @@ package encapsulation
import ( import (
"net" "net"
"github.com/squat/kilo/pkg/iptables" "github.com/kilo-io/kilo/pkg/iptables"
) )
// Strategy identifies which packets within a location should // Strategy identifies which packets within a location should

View File

@ -19,8 +19,9 @@ import (
"net" "net"
"sync" "sync"
"github.com/squat/kilo/pkg/iptables"
"github.com/vishvananda/netlink" "github.com/vishvananda/netlink"
"github.com/kilo-io/kilo/pkg/iptables"
) )
const flannelDeviceName = "flannel.1" const flannelDeviceName = "flannel.1"
@ -56,8 +57,6 @@ func (f *flannel) Gw(_, _ net.IP, subnet *net.IPNet) net.IP {
// Index returns the index of the Flannel interface. // Index returns the index of the Flannel interface.
func (f *flannel) Index() int { func (f *flannel) Index() int {
f.mu.Lock()
defer f.mu.Unlock()
return f.iface return f.iface
} }

View File

@ -18,8 +18,8 @@ import (
"fmt" "fmt"
"net" "net"
"github.com/squat/kilo/pkg/iproute" "github.com/kilo-io/kilo/pkg/iproute"
"github.com/squat/kilo/pkg/iptables" "github.com/kilo-io/kilo/pkg/iptables"
) )
type ipip struct { type ipip struct {
@ -74,7 +74,7 @@ func (i *ipip) Rules(nodes []*net.IPNet) []iptables.Rule {
rules = append(rules, iptables.NewIPv6Rule("filter", "INPUT", "-p", proto, "-m", "comment", "--comment", "Kilo: jump to IPIP chain", "-j", "KILO-IPIP")) rules = append(rules, iptables.NewIPv6Rule("filter", "INPUT", "-p", proto, "-m", "comment", "--comment", "Kilo: jump to IPIP chain", "-j", "KILO-IPIP"))
for _, n := range nodes { for _, n := range nodes {
// Accept encapsulated traffic from peers. // Accept encapsulated traffic from peers.
rules = append(rules, iptables.NewRule(iptables.GetProtocol(n.IP), "filter", "KILO-IPIP", "-s", n.String(), "-m", "comment", "--comment", "Kilo: allow IPIP traffic", "-j", "ACCEPT")) rules = append(rules, iptables.NewRule(iptables.GetProtocol(len(n.IP)), "filter", "KILO-IPIP", "-s", n.String(), "-m", "comment", "--comment", "Kilo: allow IPIP traffic", "-j", "ACCEPT"))
} }
// Drop all other IPIP traffic. // Drop all other IPIP traffic.
rules = append(rules, iptables.NewIPv4Rule("filter", "INPUT", "-p", proto, "-m", "comment", "--comment", "Kilo: reject other IPIP traffic", "-j", "DROP")) rules = append(rules, iptables.NewIPv4Rule("filter", "INPUT", "-p", proto, "-m", "comment", "--comment", "Kilo: reject other IPIP traffic", "-j", "DROP"))

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build cgo
// +build cgo // +build cgo
package encapsulation package encapsulation

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !cgo
// +build !cgo // +build !cgo
package encapsulation package encapsulation

View File

@ -17,7 +17,7 @@ package encapsulation
import ( import (
"net" "net"
"github.com/squat/kilo/pkg/iptables" "github.com/kilo-io/kilo/pkg/iptables"
) )
// Noop is an encapsulation that does nothing. // Noop is an encapsulation that does nothing.

View File

@ -16,33 +16,15 @@ package iptables
import ( import (
"fmt" "fmt"
"io"
"net" "net"
"os"
"sync" "sync"
"time" "time"
"github.com/coreos/go-iptables/iptables" "github.com/coreos/go-iptables/iptables"
"github.com/go-kit/kit/log" "github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level" "github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
) )
const ipv6ModuleDisabledPath = "/sys/module/ipv6/parameters/disable"
func ipv6Disabled() (bool, error) {
f, err := os.Open(ipv6ModuleDisabledPath)
if err != nil {
return false, err
}
defer f.Close()
disabled := make([]byte, 1)
if _, err = io.ReadFull(f, disabled); err != nil {
return false, err
}
return disabled[0] == '1', nil
}
// Protocol represents an IP protocol. // Protocol represents an IP protocol.
type Protocol byte type Protocol byte
@ -54,11 +36,11 @@ const (
) )
// GetProtocol will return a protocol from the length of an IP address. // GetProtocol will return a protocol from the length of an IP address.
func GetProtocol(ip net.IP) Protocol { func GetProtocol(length int) Protocol {
if len(ip) == net.IPv4len || ip.To4() != nil { if length == net.IPv6len {
return ProtocolIPv4 return ProtocolIPv6
} }
return ProtocolIPv6 return ProtocolIPv4
} }
// Client represents any type that can administer iptables rules. // Client represents any type that can administer iptables rules.
@ -221,7 +203,6 @@ type Controller struct {
errors chan error errors chan error
logger log.Logger logger log.Logger
resyncPeriod time.Duration resyncPeriod time.Duration
registerer prometheus.Registerer
sync.Mutex sync.Mutex
rules []Rule rules []Rule
@ -253,12 +234,6 @@ func WithClients(v4, v6 Client) ControllerOption {
} }
} }
func WithRegisterer(registerer prometheus.Registerer) ControllerOption {
return func(c *Controller) {
c.registerer = registerer
}
}
// New generates a new iptables rules controller. // New generates a new iptables rules controller.
// If no options are given, IPv4 and IPv6 clients // If no options are given, IPv4 and IPv6 clients
// will be instantiated using the regular iptables backend. // will be instantiated using the regular iptables backend.
@ -275,23 +250,14 @@ func New(opts ...ControllerOption) (*Controller, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create iptables IPv4 client: %v", err) return nil, fmt.Errorf("failed to create iptables IPv4 client: %v", err)
} }
c.v4 = wrapWithMetrics(v4, "IPv4", c.registerer) c.v4 = v4
} }
if c.v6 == nil { if c.v6 == nil {
disabled, err := ipv6Disabled() v6, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to check IPv6 status: %v", err) return nil, fmt.Errorf("failed to create iptables IPv6 client: %v", err)
}
if disabled {
level.Info(c.logger).Log("msg", "IPv6 is disabled in the kernel; disabling the IPv6 iptables controller")
c.v6 = &fakeClient{}
} else {
v6, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return nil, fmt.Errorf("failed to create iptables IPv6 client: %v", err)
}
c.v6 = wrapWithMetrics(v6, "IPv6", c.registerer)
} }
c.v6 = v6
} }
return c, nil return c, nil
} }

View File

@ -1,115 +0,0 @@
// Copyright 2022 the Kilo authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package iptables
import (
"github.com/prometheus/client_golang/prometheus"
)
type metricsClientWrapper struct {
client Client
operationCounter *prometheus.CounterVec
}
func wrapWithMetrics(client Client, protocol string, registerer prometheus.Registerer) Client {
if registerer == nil {
return client
}
labelNames := []string{
"operation",
"table",
"chain",
}
counter := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "kilo_iptables_operations_total",
Help: "Number of iptables operations.",
ConstLabels: prometheus.Labels{"protocol": protocol},
}, labelNames)
registerer.MustRegister(counter)
return &metricsClientWrapper{client, counter}
}
func (m *metricsClientWrapper) AppendUnique(table string, chain string, rule ...string) error {
m.operationCounter.With(prometheus.Labels{
"operation": "AppendUnique",
"table": table,
"chain": chain,
}).Inc()
return m.client.AppendUnique(table, chain, rule...)
}
func (m *metricsClientWrapper) Delete(table string, chain string, rule ...string) error {
m.operationCounter.With(prometheus.Labels{
"operation": "Delete",
"table": table,
"chain": chain,
}).Inc()
return m.client.Delete(table, chain, rule...)
}
func (m *metricsClientWrapper) Exists(table string, chain string, rule ...string) (bool, error) {
m.operationCounter.With(prometheus.Labels{
"operation": "Exists",
"table": table,
"chain": chain,
}).Inc()
return m.client.Exists(table, chain, rule...)
}
func (m *metricsClientWrapper) List(table string, chain string) ([]string, error) {
m.operationCounter.With(prometheus.Labels{
"operation": "List",
"table": table,
"chain": chain,
}).Inc()
return m.client.List(table, chain)
}
func (m *metricsClientWrapper) ClearChain(table string, chain string) error {
m.operationCounter.With(prometheus.Labels{
"operation": "ClearChain",
"table": table,
"chain": chain,
}).Inc()
return m.client.ClearChain(table, chain)
}
func (m *metricsClientWrapper) DeleteChain(table string, chain string) error {
m.operationCounter.With(prometheus.Labels{
"operation": "DeleteChain",
"table": table,
"chain": chain,
}).Inc()
return m.client.DeleteChain(table, chain)
}
func (m *metricsClientWrapper) NewChain(table string, chain string) error {
m.operationCounter.With(prometheus.Labels{
"operation": "NewChain",
"table": table,
"chain": chain,
}).Inc()
return m.client.NewChain(table, chain)
}
func (m *metricsClientWrapper) ListChains(table string) ([]string, error) {
m.operationCounter.With(prometheus.Labels{
"operation": "ListChains",
"table": table,
"chain": "*",
}).Inc()
return m.client.ListChains(table)
}

View File

@ -48,7 +48,6 @@ var PeerShortNames = []string{"peer"}
// +genclient:nonNamespaced // +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true // +k8s:openapi-gen=true
// +kubebuilder:resource:scope=Cluster
// Peer is a WireGuard peer that should have access to the VPN. // Peer is a WireGuard peer that should have access to the VPN.
type Peer struct { type Peer struct {

View File

@ -1,7 +1,6 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -25,26 +25,24 @@ import (
"strings" "strings"
"time" "time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/validation"
v1informers "k8s.io/client-go/informers/core/v1" v1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
v1listers "k8s.io/client-go/listers/core/v1" v1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1" "github.com/kilo-io/kilo/pkg/k8s/apis/kilo/v1alpha1"
kiloclient "github.com/squat/kilo/pkg/k8s/clientset/versioned" kiloclient "github.com/kilo-io/kilo/pkg/k8s/clientset/versioned"
v1alpha1informers "github.com/squat/kilo/pkg/k8s/informers/kilo/v1alpha1" v1alpha1informers "github.com/kilo-io/kilo/pkg/k8s/informers/kilo/v1alpha1"
v1alpha1listers "github.com/squat/kilo/pkg/k8s/listers/kilo/v1alpha1" v1alpha1listers "github.com/kilo-io/kilo/pkg/k8s/listers/kilo/v1alpha1"
"github.com/squat/kilo/pkg/mesh" "github.com/kilo-io/kilo/pkg/mesh"
"github.com/squat/kilo/pkg/wireguard" "github.com/kilo-io/kilo/pkg/wireguard"
) )
const ( const (
@ -69,8 +67,6 @@ const (
jsonRemovePatch = `{"op": "remove", "path": "%s"}` jsonRemovePatch = `{"op": "remove", "path": "%s"}`
) )
var logger = log.NewNopLogger()
type backend struct { type backend struct {
nodes *nodeBackend nodes *nodeBackend
peers *peerBackend peers *peerBackend
@ -103,12 +99,10 @@ type peerBackend struct {
} }
// New creates a new instance of a mesh.Backend. // New creates a new instance of a mesh.Backend.
func New(c kubernetes.Interface, kc kiloclient.Interface, ec apiextensions.Interface, topologyLabel string, l log.Logger) mesh.Backend { func New(c kubernetes.Interface, kc kiloclient.Interface, ec apiextensions.Interface, topologyLabel string) mesh.Backend {
ni := v1informers.NewNodeInformer(c, 5*time.Minute, nil) ni := v1informers.NewNodeInformer(c, 5*time.Minute, nil)
pi := v1alpha1informers.NewPeerInformer(kc, 5*time.Minute, nil) pi := v1alpha1informers.NewPeerInformer(kc, 5*time.Minute, nil)
logger = l
return &backend{ return &backend{
&nodeBackend{ &nodeBackend{
client: c, client: c,
@ -128,7 +122,7 @@ func New(c kubernetes.Interface, kc kiloclient.Interface, ec apiextensions.Inter
} }
// CleanUp removes configuration applied to the backend. // CleanUp removes configuration applied to the backend.
func (nb *nodeBackend) CleanUp(ctx context.Context, name string) error { func (nb *nodeBackend) CleanUp(name string) error {
patch := []byte("[" + strings.Join([]string{ patch := []byte("[" + strings.Join([]string{
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(endpointAnnotationKey, "/", jsonPatchSlash, 1))), fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(endpointAnnotationKey, "/", jsonPatchSlash, 1))),
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(internalIPAnnotationKey, "/", jsonPatchSlash, 1))), fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(internalIPAnnotationKey, "/", jsonPatchSlash, 1))),
@ -138,7 +132,7 @@ func (nb *nodeBackend) CleanUp(ctx context.Context, name string) error {
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(discoveredEndpointsKey, "/", jsonPatchSlash, 1))), fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(discoveredEndpointsKey, "/", jsonPatchSlash, 1))),
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(granularityKey, "/", jsonPatchSlash, 1))), fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(granularityKey, "/", jsonPatchSlash, 1))),
}, ",") + "]") }, ",") + "]")
if _, err := nb.client.CoreV1().Nodes().Patch(ctx, name, types.JSONPatchType, patch, metav1.PatchOptions{}); err != nil { if _, err := nb.client.CoreV1().Nodes().Patch(context.TODO(), name, types.JSONPatchType, patch, metav1.PatchOptions{}); err != nil {
return fmt.Errorf("failed to patch node: %v", err) return fmt.Errorf("failed to patch node: %v", err)
} }
return nil return nil
@ -155,9 +149,9 @@ func (nb *nodeBackend) Get(name string) (*mesh.Node, error) {
// Init initializes the backend; for this backend that means // Init initializes the backend; for this backend that means
// syncing the informer cache. // syncing the informer cache.
func (nb *nodeBackend) Init(ctx context.Context) error { func (nb *nodeBackend) Init(stop <-chan struct{}) error {
go nb.informer.Run(ctx.Done()) go nb.informer.Run(stop)
if ok := cache.WaitForCacheSync(ctx.Done(), func() bool { if ok := cache.WaitForCacheSync(stop, func() bool {
return nb.informer.HasSynced() return nb.informer.HasSynced()
}); !ok { }); !ok {
return errors.New("failed to sync node cache") return errors.New("failed to sync node cache")
@ -212,7 +206,7 @@ func (nb *nodeBackend) List() ([]*mesh.Node, error) {
} }
// Set sets the fields of a node. // Set sets the fields of a node.
func (nb *nodeBackend) Set(ctx context.Context, name string, node *mesh.Node) error { func (nb *nodeBackend) Set(name string, node *mesh.Node) error {
old, err := nb.lister.Get(name) old, err := nb.lister.Get(name)
if err != nil { if err != nil {
return fmt.Errorf("failed to find node: %v", err) return fmt.Errorf("failed to find node: %v", err)
@ -224,7 +218,7 @@ func (nb *nodeBackend) Set(ctx context.Context, name string, node *mesh.Node) er
} else { } else {
n.ObjectMeta.Annotations[internalIPAnnotationKey] = node.InternalIP.String() n.ObjectMeta.Annotations[internalIPAnnotationKey] = node.InternalIP.String()
} }
n.ObjectMeta.Annotations[keyAnnotationKey] = node.Key.String() n.ObjectMeta.Annotations[keyAnnotationKey] = string(node.Key)
n.ObjectMeta.Annotations[lastSeenAnnotationKey] = strconv.FormatInt(node.LastSeen, 10) n.ObjectMeta.Annotations[lastSeenAnnotationKey] = strconv.FormatInt(node.LastSeen, 10)
if node.WireGuardIP == nil { if node.WireGuardIP == nil {
n.ObjectMeta.Annotations[wireGuardIPAnnotationKey] = "" n.ObjectMeta.Annotations[wireGuardIPAnnotationKey] = ""
@ -253,7 +247,7 @@ func (nb *nodeBackend) Set(ctx context.Context, name string, node *mesh.Node) er
if err != nil { if err != nil {
return fmt.Errorf("failed to create patch for node %q: %v", n.Name, err) return fmt.Errorf("failed to create patch for node %q: %v", n.Name, err)
} }
if _, err = nb.client.CoreV1().Nodes().Patch(ctx, name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}); err != nil { if _, err = nb.client.CoreV1().Nodes().Patch(context.TODO(), name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}); err != nil {
return fmt.Errorf("failed to patch node: %v", err) return fmt.Errorf("failed to patch node: %v", err)
} }
return nil return nil
@ -282,9 +276,9 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
location = node.ObjectMeta.Labels[topologyLabel] location = node.ObjectMeta.Labels[topologyLabel]
} }
// Allow the endpoint to be overridden. // Allow the endpoint to be overridden.
endpoint := wireguard.ParseEndpoint(node.ObjectMeta.Annotations[forceEndpointAnnotationKey]) endpoint := parseEndpoint(node.ObjectMeta.Annotations[forceEndpointAnnotationKey])
if endpoint == nil { if endpoint == nil {
endpoint = wireguard.ParseEndpoint(node.ObjectMeta.Annotations[endpointAnnotationKey]) endpoint = parseEndpoint(node.ObjectMeta.Annotations[endpointAnnotationKey])
} }
// Allow the internal IP to be overridden. // Allow the internal IP to be overridden.
internalIP := normalizeIP(node.ObjectMeta.Annotations[forceInternalIPAnnotationKey]) internalIP := normalizeIP(node.ObjectMeta.Annotations[forceInternalIPAnnotationKey])
@ -298,11 +292,13 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
internalIP = nil internalIP = nil
} }
// Set Wireguard PersistentKeepalive setting for the node. // Set Wireguard PersistentKeepalive setting for the node.
var persistentKeepalive time.Duration var persistentKeepalive int64
if keepAlive, ok := node.ObjectMeta.Annotations[persistentKeepaliveKey]; ok { if keepAlive, ok := node.ObjectMeta.Annotations[persistentKeepaliveKey]; !ok {
// We can ignore the error, because p will be set to 0 if an error occures. persistentKeepalive = 0
p, _ := strconv.ParseInt(keepAlive, 10, 64) } else {
persistentKeepalive = time.Duration(p) * time.Second if persistentKeepalive, err = strconv.ParseInt(keepAlive, 10, 64); err != nil {
persistentKeepalive = 0
}
} }
var lastSeen int64 var lastSeen int64
if ls, ok := node.ObjectMeta.Annotations[lastSeenAnnotationKey]; !ok { if ls, ok := node.ObjectMeta.Annotations[lastSeenAnnotationKey]; !ok {
@ -312,7 +308,7 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
lastSeen = 0 lastSeen = 0
} }
} }
var discoveredEndpoints map[string]*net.UDPAddr var discoveredEndpoints map[string]*wireguard.Endpoint
if de, ok := node.ObjectMeta.Annotations[discoveredEndpointsKey]; ok { if de, ok := node.ObjectMeta.Annotations[discoveredEndpointsKey]; ok {
err := json.Unmarshal([]byte(de), &discoveredEndpoints) err := json.Unmarshal([]byte(de), &discoveredEndpoints)
if err != nil { if err != nil {
@ -320,11 +316,11 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
} }
} }
// Set allowed IPs for a location. // Set allowed IPs for a location.
var allowedLocationIPs []net.IPNet var allowedLocationIPs []*net.IPNet
if str, ok := node.ObjectMeta.Annotations[allowedLocationIPsKey]; ok { if str, ok := node.ObjectMeta.Annotations[allowedLocationIPsKey]; ok {
for _, ip := range strings.Split(str, ",") { for _, ip := range strings.Split(str, ",") {
if ipnet := normalizeIP(ip); ipnet != nil { if ipnet := normalizeIP(ip); ipnet != nil {
allowedLocationIPs = append(allowedLocationIPs, *ipnet) allowedLocationIPs = append(allowedLocationIPs, ipnet)
} }
} }
} }
@ -339,9 +335,6 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
} }
} }
// TODO log some error or warning.
key, _ := wgtypes.ParseKey(node.ObjectMeta.Annotations[keyAnnotationKey])
return &mesh.Node{ return &mesh.Node{
// Endpoint and InternalIP should only ever fail to parse if the // Endpoint and InternalIP should only ever fail to parse if the
// remote node's agent has not yet set its IP address; // remote node's agent has not yet set its IP address;
@ -352,12 +345,12 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
Endpoint: endpoint, Endpoint: endpoint,
NoInternalIP: noInternalIP, NoInternalIP: noInternalIP,
InternalIP: internalIP, InternalIP: internalIP,
Key: key, Key: []byte(node.ObjectMeta.Annotations[keyAnnotationKey]),
LastSeen: lastSeen, LastSeen: lastSeen,
Leader: leader, Leader: leader,
Location: location, Location: location,
Name: node.Name, Name: node.Name,
PersistentKeepalive: persistentKeepalive, PersistentKeepalive: int(persistentKeepalive),
Subnet: subnet, Subnet: subnet,
// WireGuardIP can fail to parse if the node is not a leader or if // WireGuardIP can fail to parse if the node is not a leader or if
// the node's agent has not yet reconciled. In either case, the IP // the node's agent has not yet reconciled. In either case, the IP
@ -374,14 +367,14 @@ func translatePeer(peer *v1alpha1.Peer) *mesh.Peer {
if peer == nil { if peer == nil {
return nil return nil
} }
var aips []net.IPNet var aips []*net.IPNet
for _, aip := range peer.Spec.AllowedIPs { for _, aip := range peer.Spec.AllowedIPs {
aip := normalizeIP(aip) aip := normalizeIP(aip)
// Skip any invalid IPs. // Skip any invalid IPs.
if aip == nil { if aip == nil {
continue continue
} }
aips = append(aips, *aip) aips = append(aips, aip)
} }
var endpoint *wireguard.Endpoint var endpoint *wireguard.Endpoint
if peer.Spec.Endpoint != nil { if peer.Spec.Endpoint != nil {
@ -391,47 +384,42 @@ func translatePeer(peer *v1alpha1.Peer) *mesh.Peer {
} else { } else {
ip = ip.To16() ip = ip.To16()
} }
if peer.Spec.Endpoint.Port > 0 { if peer.Spec.Endpoint.Port > 0 && (ip != nil || peer.Spec.Endpoint.DNS != "") {
if ip != nil { endpoint = &wireguard.Endpoint{
endpoint = wireguard.NewEndpoint(ip, int(peer.Spec.Endpoint.Port)) DNSOrIP: wireguard.DNSOrIP{
} DNS: peer.Spec.Endpoint.DNS,
if peer.Spec.Endpoint.DNS != "" { IP: ip,
endpoint = wireguard.ParseEndpoint(fmt.Sprintf("%s:%d", peer.Spec.Endpoint.DNS, peer.Spec.Endpoint.Port)) },
Port: peer.Spec.Endpoint.Port,
} }
} }
} }
var key []byte
key, err := wgtypes.ParseKey(peer.Spec.PublicKey) if len(peer.Spec.PublicKey) > 0 {
if err != nil { key = []byte(peer.Spec.PublicKey)
level.Error(logger).Log("msg", "failed to parse public key", "peer", peer.Name, "err", err.Error())
} }
var psk *wgtypes.Key var psk []byte
if k, err := wgtypes.ParseKey(peer.Spec.PresharedKey); err != nil { if len(peer.Spec.PresharedKey) > 0 {
// Set key to nil to avoid setting a key to the zero value wgtypes.Key{} psk = []byte(peer.Spec.PresharedKey)
psk = nil
} else {
psk = &k
} }
var pka time.Duration var pka int
if peer.Spec.PersistentKeepalive > 0 { if peer.Spec.PersistentKeepalive > 0 {
pka = time.Duration(peer.Spec.PersistentKeepalive) * time.Second pka = peer.Spec.PersistentKeepalive
} }
return &mesh.Peer{ return &mesh.Peer{
Name: peer.Name, Name: peer.Name,
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ AllowedIPs: aips,
AllowedIPs: aips, Endpoint: endpoint,
PersistentKeepaliveInterval: &pka, PersistentKeepalive: pka,
PresharedKey: psk, PresharedKey: psk,
PublicKey: key, PublicKey: key,
},
Endpoint: endpoint,
}, },
} }
} }
// CleanUp removes configuration applied to the backend. // CleanUp removes configuration applied to the backend.
func (pb *peerBackend) CleanUp(_ context.Context, _ string) error { func (pb *peerBackend) CleanUp(name string) error {
return nil return nil
} }
@ -446,14 +434,14 @@ func (pb *peerBackend) Get(name string) (*mesh.Peer, error) {
// Init initializes the backend; for this backend that means // Init initializes the backend; for this backend that means
// syncing the informer cache. // syncing the informer cache.
func (pb *peerBackend) Init(ctx context.Context) error { func (pb *peerBackend) Init(stop <-chan struct{}) error {
// Check the presents of the CRD peers.kilo.squat.ai. // Check the presents of the CRD peers.kilo.squat.ai.
if _, err := pb.extensionsClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, strings.Join([]string{v1alpha1.PeerPlural, v1alpha1.GroupName}, "."), metav1.GetOptions{}); err != nil { if _, err := pb.extensionsClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), strings.Join([]string{v1alpha1.PeerPlural, v1alpha1.GroupName}, "."), metav1.GetOptions{}); err != nil {
return fmt.Errorf("CRD is not present: %v", err) return fmt.Errorf("CRD is not present: %v", err)
} }
go pb.informer.Run(ctx.Done()) go pb.informer.Run(stop)
if ok := cache.WaitForCacheSync(ctx.Done(), func() bool { if ok := cache.WaitForCacheSync(stop, func() bool {
return pb.informer.HasSynced() return pb.informer.HasSynced()
}); !ok { }); !ok {
return errors.New("failed to sync peer cache") return errors.New("failed to sync peer cache")
@ -512,7 +500,7 @@ func (pb *peerBackend) List() ([]*mesh.Peer, error) {
} }
// Set sets the fields of a peer. // Set sets the fields of a peer.
func (pb *peerBackend) Set(ctx context.Context, name string, peer *mesh.Peer) error { func (pb *peerBackend) Set(name string, peer *mesh.Peer) error {
old, err := pb.lister.Get(name) old, err := pb.lister.Get(name)
if err != nil { if err != nil {
return fmt.Errorf("failed to find peer: %v", err) return fmt.Errorf("failed to find peer: %v", err)
@ -523,26 +511,22 @@ func (pb *peerBackend) Set(ctx context.Context, name string, peer *mesh.Peer) er
p.Spec.AllowedIPs[i] = peer.AllowedIPs[i].String() p.Spec.AllowedIPs[i] = peer.AllowedIPs[i].String()
} }
if peer.Endpoint != nil { if peer.Endpoint != nil {
var ip string
if peer.Endpoint.IP != nil {
ip = peer.Endpoint.IP.String()
}
p.Spec.Endpoint = &v1alpha1.PeerEndpoint{ p.Spec.Endpoint = &v1alpha1.PeerEndpoint{
DNSOrIP: v1alpha1.DNSOrIP{ DNSOrIP: v1alpha1.DNSOrIP{
IP: peer.Endpoint.IP().String(), IP: ip,
DNS: peer.Endpoint.DNS(), DNS: peer.Endpoint.DNS,
}, },
Port: uint32(peer.Endpoint.Port()), Port: peer.Endpoint.Port,
} }
} }
if peer.PersistentKeepaliveInterval == nil { p.Spec.PersistentKeepalive = peer.PersistentKeepalive
p.Spec.PersistentKeepalive = 0 p.Spec.PresharedKey = string(peer.PresharedKey)
} else { p.Spec.PublicKey = string(peer.PublicKey)
p.Spec.PersistentKeepalive = int(*peer.PersistentKeepaliveInterval / time.Second) if _, err = pb.client.KiloV1alpha1().Peers().Update(context.TODO(), p, metav1.UpdateOptions{}); err != nil {
}
if peer.PresharedKey == nil {
p.Spec.PresharedKey = ""
} else {
p.Spec.PresharedKey = peer.PresharedKey.String()
}
p.Spec.PublicKey = peer.PublicKey.String()
if _, err = pb.client.KiloV1alpha1().Peers().Update(ctx, p, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("failed to update peer: %v", err) return fmt.Errorf("failed to update peer: %v", err)
} }
return nil return nil
@ -565,3 +549,35 @@ func normalizeIP(ip string) *net.IPNet {
ipNet.IP = i.To16() ipNet.IP = i.To16()
return ipNet return ipNet
} }
func parseEndpoint(endpoint string) *wireguard.Endpoint {
if len(endpoint) == 0 {
return nil
}
parts := strings.Split(endpoint, ":")
if len(parts) < 2 {
return nil
}
portRaw := parts[len(parts)-1]
hostRaw := strings.Trim(strings.Join(parts[:len(parts)-1], ":"), "[]")
port, err := strconv.ParseUint(portRaw, 10, 32)
if err != nil {
return nil
}
if len(validation.IsValidPortNum(int(port))) != 0 {
return nil
}
ip := net.ParseIP(hostRaw)
if ip == nil {
if len(validation.IsDNS1123Subdomain(hostRaw)) == 0 {
return &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{DNS: hostRaw}, Port: uint32(port)}
}
return nil
}
if ip4 := ip.To4(); ip4 != nil {
ip = ip4
} else {
ip = ip.To16()
}
return &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: ip}, Port: uint32(port)}
}

View File

@ -1,4 +1,4 @@
// Copyright 2021 the Kilo authors // Copyright 2019 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -17,39 +17,13 @@ package k8s
import ( import (
"net" "net"
"testing" "testing"
"time"
"github.com/kylelemons/godebug/pretty" "github.com/kylelemons/godebug/pretty"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1" "github.com/kilo-io/kilo/pkg/k8s/apis/kilo/v1alpha1"
"github.com/squat/kilo/pkg/mesh" "github.com/kilo-io/kilo/pkg/mesh"
"github.com/squat/kilo/pkg/wireguard" "github.com/kilo-io/kilo/pkg/wireguard"
)
func mustKey() (k wgtypes.Key) {
var err error
if k, err = wgtypes.GeneratePrivateKey(); err != nil {
panic(err.Error())
}
return
}
func mustPSKKey() (key *wgtypes.Key) {
if k, err := wgtypes.GenerateKey(); err != nil {
panic(err.Error())
} else {
key = &k
}
return
}
var (
fooKey = mustKey()
pskKey = mustPSKKey()
second = time.Second
zero = time.Duration(0)
) )
func TestTranslateNode(t *testing.T) { func TestTranslateNode(t *testing.T) {
@ -80,19 +54,8 @@ func TestTranslateNode(t *testing.T) {
internalIPAnnotationKey: "10.0.0.2/32", internalIPAnnotationKey: "10.0.0.2/32",
}, },
out: &mesh.Node{ out: &mesh.Node{
Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.1").To4(), mesh.DefaultKiloPort), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("10.0.0.1")}, Port: mesh.DefaultKiloPort},
InternalIP: &net.IPNet{IP: net.ParseIP("10.0.0.2").To4(), Mask: net.CIDRMask(32, 32)}, InternalIP: &net.IPNet{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(32, 32)},
},
},
{
name: "valid ips with ipv6",
annotations: map[string]string{
endpointAnnotationKey: "[ff10::10]:51820",
internalIPAnnotationKey: "ff60::10/64",
},
out: &mesh.Node{
Endpoint: wireguard.NewEndpoint(net.ParseIP("ff10::10").To16(), mesh.DefaultKiloPort),
InternalIP: &net.IPNet{IP: net.ParseIP("ff60::10").To16(), Mask: net.CIDRMask(64, 128)},
}, },
}, },
{ {
@ -105,7 +68,7 @@ func TestTranslateNode(t *testing.T) {
name: "normalize subnet", name: "normalize subnet",
annotations: map[string]string{}, annotations: map[string]string{},
out: &mesh.Node{ out: &mesh.Node{
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0").To4(), Mask: net.CIDRMask(24, 32)}, Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(24, 32)},
}, },
subnet: "10.2.0.1/24", subnet: "10.2.0.1/24",
}, },
@ -113,7 +76,7 @@ func TestTranslateNode(t *testing.T) {
name: "valid subnet", name: "valid subnet",
annotations: map[string]string{}, annotations: map[string]string{},
out: &mesh.Node{ out: &mesh.Node{
Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0").To4(), Mask: net.CIDRMask(24, 32)}, Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)},
}, },
subnet: "10.2.1.0/24", subnet: "10.2.1.0/24",
}, },
@ -145,7 +108,7 @@ func TestTranslateNode(t *testing.T) {
forceEndpointAnnotationKey: "-10.0.0.2:51821", forceEndpointAnnotationKey: "-10.0.0.2:51821",
}, },
out: &mesh.Node{ out: &mesh.Node{
Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.1").To4(), mesh.DefaultKiloPort), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("10.0.0.1")}, Port: mesh.DefaultKiloPort},
}, },
}, },
{ {
@ -155,7 +118,7 @@ func TestTranslateNode(t *testing.T) {
forceEndpointAnnotationKey: "10.0.0.2:51821", forceEndpointAnnotationKey: "10.0.0.2:51821",
}, },
out: &mesh.Node{ out: &mesh.Node{
Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.2").To4(), 51821), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("10.0.0.2")}, Port: 51821},
}, },
}, },
{ {
@ -164,7 +127,7 @@ func TestTranslateNode(t *testing.T) {
persistentKeepaliveKey: "25", persistentKeepaliveKey: "25",
}, },
out: &mesh.Node{ out: &mesh.Node{
PersistentKeepalive: 25 * time.Second, PersistentKeepalive: 25,
}, },
}, },
{ {
@ -174,7 +137,7 @@ func TestTranslateNode(t *testing.T) {
forceInternalIPAnnotationKey: "-10.1.0.2/24", forceInternalIPAnnotationKey: "-10.1.0.2/24",
}, },
out: &mesh.Node{ out: &mesh.Node{
InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.1").To4(), Mask: net.CIDRMask(24, 32)}, InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.1"), Mask: net.CIDRMask(24, 32)},
NoInternalIP: false, NoInternalIP: false,
}, },
}, },
@ -185,7 +148,7 @@ func TestTranslateNode(t *testing.T) {
forceInternalIPAnnotationKey: "10.1.0.2/24", forceInternalIPAnnotationKey: "10.1.0.2/24",
}, },
out: &mesh.Node{ out: &mesh.Node{
InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.2").To4(), Mask: net.CIDRMask(24, 32)}, InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.2"), Mask: net.CIDRMask(24, 32)},
NoInternalIP: false, NoInternalIP: false,
}, },
}, },
@ -203,7 +166,7 @@ func TestTranslateNode(t *testing.T) {
forceEndpointAnnotationKey: "10.0.0.2:51821", forceEndpointAnnotationKey: "10.0.0.2:51821",
forceInternalIPAnnotationKey: "10.1.0.2/32", forceInternalIPAnnotationKey: "10.1.0.2/32",
internalIPAnnotationKey: "10.1.0.1/32", internalIPAnnotationKey: "10.1.0.1/32",
keyAnnotationKey: fooKey.String(), keyAnnotationKey: "foo",
lastSeenAnnotationKey: "1000000000", lastSeenAnnotationKey: "1000000000",
leaderAnnotationKey: "", leaderAnnotationKey: "",
locationAnnotationKey: "b", locationAnnotationKey: "b",
@ -214,45 +177,14 @@ func TestTranslateNode(t *testing.T) {
RegionLabelKey: "a", RegionLabelKey: "a",
}, },
out: &mesh.Node{ out: &mesh.Node{
Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.2").To4(), 51821), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("10.0.0.2")}, Port: 51821},
NoInternalIP: false,
InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.2").To4(), Mask: net.CIDRMask(32, 32)},
Key: fooKey,
LastSeen: 1000000000,
Leader: true,
Location: "b",
PersistentKeepalive: 25 * time.Second,
Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0").To4(), Mask: net.CIDRMask(24, 32)},
WireGuardIP: &net.IPNet{IP: net.ParseIP("10.4.0.1").To4(), Mask: net.CIDRMask(16, 32)},
},
subnet: "10.2.1.0/24",
},
{
name: "complete with ipv6",
annotations: map[string]string{
endpointAnnotationKey: "10.0.0.1:51820",
forceEndpointAnnotationKey: "[1100::10]:51821",
forceInternalIPAnnotationKey: "10.1.0.2/32",
internalIPAnnotationKey: "10.1.0.1/32",
keyAnnotationKey: fooKey.String(),
lastSeenAnnotationKey: "1000000000",
leaderAnnotationKey: "",
locationAnnotationKey: "b",
persistentKeepaliveKey: "25",
wireGuardIPAnnotationKey: "10.4.0.1/16",
},
labels: map[string]string{
RegionLabelKey: "a",
},
out: &mesh.Node{
Endpoint: wireguard.NewEndpoint(net.ParseIP("1100::10"), 51821),
NoInternalIP: false, NoInternalIP: false,
InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.2"), Mask: net.CIDRMask(32, 32)}, InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.2"), Mask: net.CIDRMask(32, 32)},
Key: fooKey, Key: []byte("foo"),
LastSeen: 1000000000, LastSeen: 1000000000,
Leader: true, Leader: true,
Location: "b", Location: "b",
PersistentKeepalive: 25 * time.Second, PersistentKeepalive: 25,
Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)}, Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)},
WireGuardIP: &net.IPNet{IP: net.ParseIP("10.4.0.1"), Mask: net.CIDRMask(16, 32)}, WireGuardIP: &net.IPNet{IP: net.ParseIP("10.4.0.1"), Mask: net.CIDRMask(16, 32)},
}, },
@ -263,7 +195,7 @@ func TestTranslateNode(t *testing.T) {
annotations: map[string]string{ annotations: map[string]string{
endpointAnnotationKey: "10.0.0.1:51820", endpointAnnotationKey: "10.0.0.1:51820",
internalIPAnnotationKey: "", internalIPAnnotationKey: "",
keyAnnotationKey: fooKey.String(), keyAnnotationKey: "foo",
lastSeenAnnotationKey: "1000000000", lastSeenAnnotationKey: "1000000000",
locationAnnotationKey: "b", locationAnnotationKey: "b",
persistentKeepaliveKey: "25", persistentKeepaliveKey: "25",
@ -273,13 +205,13 @@ func TestTranslateNode(t *testing.T) {
RegionLabelKey: "a", RegionLabelKey: "a",
}, },
out: &mesh.Node{ out: &mesh.Node{
Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.1"), 51820), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("10.0.0.1")}, Port: 51820},
InternalIP: nil, InternalIP: nil,
Key: fooKey, Key: []byte("foo"),
LastSeen: 1000000000, LastSeen: 1000000000,
Leader: false, Leader: false,
Location: "b", Location: "b",
PersistentKeepalive: 25 * time.Second, PersistentKeepalive: 25,
Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)}, Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)},
WireGuardIP: &net.IPNet{IP: net.ParseIP("10.4.0.1"), Mask: net.CIDRMask(16, 32)}, WireGuardIP: &net.IPNet{IP: net.ParseIP("10.4.0.1"), Mask: net.CIDRMask(16, 32)},
}, },
@ -291,7 +223,7 @@ func TestTranslateNode(t *testing.T) {
endpointAnnotationKey: "10.0.0.1:51820", endpointAnnotationKey: "10.0.0.1:51820",
internalIPAnnotationKey: "10.1.0.1/32", internalIPAnnotationKey: "10.1.0.1/32",
forceInternalIPAnnotationKey: "", forceInternalIPAnnotationKey: "",
keyAnnotationKey: fooKey.String(), keyAnnotationKey: "foo",
lastSeenAnnotationKey: "1000000000", lastSeenAnnotationKey: "1000000000",
locationAnnotationKey: "b", locationAnnotationKey: "b",
persistentKeepaliveKey: "25", persistentKeepaliveKey: "25",
@ -301,14 +233,14 @@ func TestTranslateNode(t *testing.T) {
RegionLabelKey: "a", RegionLabelKey: "a",
}, },
out: &mesh.Node{ out: &mesh.Node{
Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.1"), 51820), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("10.0.0.1")}, Port: 51820},
NoInternalIP: true, NoInternalIP: true,
InternalIP: nil, InternalIP: nil,
Key: fooKey, Key: []byte("foo"),
LastSeen: 1000000000, LastSeen: 1000000000,
Leader: false, Leader: false,
Location: "b", Location: "b",
PersistentKeepalive: 25 * time.Second, PersistentKeepalive: 25,
Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)}, Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)},
WireGuardIP: &net.IPNet{IP: net.ParseIP("10.4.0.1"), Mask: net.CIDRMask(16, 32)}, WireGuardIP: &net.IPNet{IP: net.ParseIP("10.4.0.1"), Mask: net.CIDRMask(16, 32)},
}, },
@ -334,13 +266,7 @@ func TestTranslatePeer(t *testing.T) {
}{ }{
{ {
name: "empty", name: "empty",
out: &mesh.Peer{ out: &mesh.Peer{},
Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{
PersistentKeepaliveInterval: &zero,
},
},
},
}, },
{ {
name: "invalid ips", name: "invalid ips",
@ -350,13 +276,7 @@ func TestTranslatePeer(t *testing.T) {
"foo", "foo",
}, },
}, },
out: &mesh.Peer{ out: &mesh.Peer{},
Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{
PersistentKeepaliveInterval: &zero,
},
},
},
}, },
{ {
name: "valid ips", name: "valid ips",
@ -368,12 +288,9 @@ func TestTranslatePeer(t *testing.T) {
}, },
out: &mesh.Peer{ out: &mesh.Peer{
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ AllowedIPs: []*net.IPNet{
AllowedIPs: []net.IPNet{ {IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)}, {IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(32, 32)},
{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(32, 32)},
},
PersistentKeepaliveInterval: &zero,
}, },
}, },
}, },
@ -388,13 +305,7 @@ func TestTranslatePeer(t *testing.T) {
Port: mesh.DefaultKiloPort, Port: mesh.DefaultKiloPort,
}, },
}, },
out: &mesh.Peer{ out: &mesh.Peer{},
Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{
PersistentKeepaliveInterval: &zero,
},
},
},
}, },
{ {
name: "only endpoint port", name: "only endpoint port",
@ -403,13 +314,7 @@ func TestTranslatePeer(t *testing.T) {
Port: mesh.DefaultKiloPort, Port: mesh.DefaultKiloPort,
}, },
}, },
out: &mesh.Peer{ out: &mesh.Peer{},
Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{
PersistentKeepaliveInterval: &zero,
},
},
},
}, },
{ {
name: "valid endpoint ip", name: "valid endpoint ip",
@ -423,29 +328,10 @@ func TestTranslatePeer(t *testing.T) {
}, },
out: &mesh.Peer{ out: &mesh.Peer{
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ Endpoint: &wireguard.Endpoint{
PersistentKeepaliveInterval: &zero, DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("10.0.0.1")},
Port: mesh.DefaultKiloPort,
}, },
Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.1").To4(), mesh.DefaultKiloPort),
},
},
},
{
name: "valid endpoint ipv6",
spec: v1alpha1.PeerSpec{
Endpoint: &v1alpha1.PeerEndpoint{
DNSOrIP: v1alpha1.DNSOrIP{
IP: "ff60::2",
},
Port: mesh.DefaultKiloPort,
},
},
out: &mesh.Peer{
Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{
PersistentKeepaliveInterval: &zero,
},
Endpoint: wireguard.NewEndpoint(net.ParseIP("ff60::2").To16(), mesh.DefaultKiloPort),
}, },
}, },
}, },
@ -461,9 +347,9 @@ func TestTranslatePeer(t *testing.T) {
}, },
out: &mesh.Peer{ out: &mesh.Peer{
Peer: wireguard.Peer{ Peer: wireguard.Peer{
Endpoint: wireguard.ParseEndpoint("example.com:51820"), Endpoint: &wireguard.Endpoint{
PeerConfig: wgtypes.PeerConfig{ DNSOrIP: wireguard.DNSOrIP{DNS: "example.com"},
PersistentKeepaliveInterval: &zero, Port: mesh.DefaultKiloPort,
}, },
}, },
}, },
@ -473,25 +359,16 @@ func TestTranslatePeer(t *testing.T) {
spec: v1alpha1.PeerSpec{ spec: v1alpha1.PeerSpec{
PublicKey: "", PublicKey: "",
}, },
out: &mesh.Peer{ out: &mesh.Peer{},
Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{
PersistentKeepaliveInterval: &zero,
},
},
},
}, },
{ {
name: "valid key", name: "valid key",
spec: v1alpha1.PeerSpec{ spec: v1alpha1.PeerSpec{
PublicKey: fooKey.String(), PublicKey: "foo",
}, },
out: &mesh.Peer{ out: &mesh.Peer{
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ PublicKey: []byte("foo"),
PublicKey: fooKey,
PersistentKeepaliveInterval: &zero,
},
}, },
}, },
}, },
@ -500,13 +377,7 @@ func TestTranslatePeer(t *testing.T) {
spec: v1alpha1.PeerSpec{ spec: v1alpha1.PeerSpec{
PersistentKeepalive: -1, PersistentKeepalive: -1,
}, },
out: &mesh.Peer{ out: &mesh.Peer{},
Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{
PersistentKeepaliveInterval: &zero,
},
},
},
}, },
{ {
name: "valid keepalive", name: "valid keepalive",
@ -515,23 +386,18 @@ func TestTranslatePeer(t *testing.T) {
}, },
out: &mesh.Peer{ out: &mesh.Peer{
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ PersistentKeepalive: 1,
PersistentKeepaliveInterval: &second,
},
}, },
}, },
}, },
{ {
name: "valid preshared key", name: "valid preshared key",
spec: v1alpha1.PeerSpec{ spec: v1alpha1.PeerSpec{
PresharedKey: pskKey.String(), PresharedKey: "psk",
}, },
out: &mesh.Peer{ out: &mesh.Peer{
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ PresharedKey: []byte("psk"),
PersistentKeepaliveInterval: &zero,
PresharedKey: pskKey,
},
}, },
}, },
}, },
@ -544,3 +410,52 @@ func TestTranslatePeer(t *testing.T) {
} }
} }
} }
func TestParseEndpoint(t *testing.T) {
for _, tc := range []struct {
name string
endpoint string
out *wireguard.Endpoint
}{
{
name: "empty",
endpoint: "",
out: nil,
},
{
name: "invalid IP",
endpoint: "10.0.0.:51820",
out: nil,
},
{
name: "invalid hostname",
endpoint: "foo-:51820",
out: nil,
},
{
name: "invalid port",
endpoint: "10.0.0.1:100000000",
out: nil,
},
{
name: "valid IP",
endpoint: "10.0.0.1:51820",
out: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("10.0.0.1")}, Port: mesh.DefaultKiloPort},
},
{
name: "valid IPv6",
endpoint: "[ff02::114]:51820",
out: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("ff02::114")}, Port: mesh.DefaultKiloPort},
},
{
name: "valid hostname",
endpoint: "foo:51821",
out: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{DNS: "foo"}, Port: 51821},
},
} {
endpoint := parseEndpoint(tc.endpoint)
if diff := pretty.Compare(endpoint, tc.out); diff != "" {
t.Errorf("test case %q: got diff: %v", tc.name, diff)
}
}
}

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -18,9 +18,8 @@ package versioned
import ( import (
"fmt" "fmt"
"net/http"
kilov1alpha1 "github.com/squat/kilo/pkg/k8s/clientset/versioned/typed/kilo/v1alpha1" kilov1alpha1 "github.com/kilo-io/kilo/pkg/k8s/clientset/versioned/typed/kilo/v1alpha1"
discovery "k8s.io/client-go/discovery" discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest" rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol" flowcontrol "k8s.io/client-go/util/flowcontrol"
@ -54,45 +53,22 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface {
// NewForConfig creates a new Clientset for the given config. // NewForConfig creates a new Clientset for the given config.
// If config's RateLimiter is not set and QPS and Burst are acceptable, // If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfig will generate a rate-limiter in configShallowCopy. // NewForConfig will generate a rate-limiter in configShallowCopy.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*Clientset, error) { func NewForConfig(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c configShallowCopy := *c
if configShallowCopy.UserAgent == "" {
configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
}
// share the transport between all clients
httpClient, err := rest.HTTPClientFor(&configShallowCopy)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&configShallowCopy, httpClient)
}
// NewForConfigAndClient creates a new Clientset for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
if configShallowCopy.Burst <= 0 { if configShallowCopy.Burst <= 0 {
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
} }
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
} }
var cs Clientset var cs Clientset
var err error var err error
cs.kiloV1alpha1, err = kilov1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) cs.kiloV1alpha1, err = kilov1alpha1.NewForConfig(&configShallowCopy)
if err != nil { if err != nil {
return nil, err return nil, err
} }
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -102,11 +78,11 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
// NewForConfigOrDie creates a new Clientset for the given config and // NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config. // panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset { func NewForConfigOrDie(c *rest.Config) *Clientset {
cs, err := NewForConfig(c) var cs Clientset
if err != nil { cs.kiloV1alpha1 = kilov1alpha1.NewForConfigOrDie(c)
panic(err)
} cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
return cs return &cs
} }
// New creates a new Clientset for the given RESTClient. // New creates a new Clientset for the given RESTClient.

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -17,9 +17,9 @@
package fake package fake
import ( import (
clientset "github.com/squat/kilo/pkg/k8s/clientset/versioned" clientset "github.com/kilo-io/kilo/pkg/k8s/clientset/versioned"
kilov1alpha1 "github.com/squat/kilo/pkg/k8s/clientset/versioned/typed/kilo/v1alpha1" kilov1alpha1 "github.com/kilo-io/kilo/pkg/k8s/clientset/versioned/typed/kilo/v1alpha1"
fakekilov1alpha1 "github.com/squat/kilo/pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/fake" fakekilov1alpha1 "github.com/kilo-io/kilo/pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/fake"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery" "k8s.io/client-go/discovery"
@ -72,10 +72,7 @@ func (c *Clientset) Tracker() testing.ObjectTracker {
return c.tracker return c.tracker
} }
var ( var _ clientset.Interface = &Clientset{}
_ clientset.Interface = &Clientset{}
_ testing.FakeClient = &Clientset{}
)
// KiloV1alpha1 retrieves the KiloV1alpha1Client // KiloV1alpha1 retrieves the KiloV1alpha1Client
func (c *Clientset) KiloV1alpha1() kilov1alpha1.KiloV1alpha1Interface { func (c *Clientset) KiloV1alpha1() kilov1alpha1.KiloV1alpha1Interface {

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -17,7 +17,7 @@
package fake package fake
import ( import (
kilov1alpha1 "github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1" kilov1alpha1 "github.com/kilo-io/kilo/pkg/k8s/apis/kilo/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema" schema "k8s.io/apimachinery/pkg/runtime/schema"

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -17,7 +17,7 @@
package scheme package scheme
import ( import (
kilov1alpha1 "github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1" kilov1alpha1 "github.com/kilo-io/kilo/pkg/k8s/apis/kilo/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema" schema "k8s.io/apimachinery/pkg/runtime/schema"

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -17,7 +17,7 @@
package fake package fake
import ( import (
v1alpha1 "github.com/squat/kilo/pkg/k8s/clientset/versioned/typed/kilo/v1alpha1" v1alpha1 "github.com/kilo-io/kilo/pkg/k8s/clientset/versioned/typed/kilo/v1alpha1"
rest "k8s.io/client-go/rest" rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing" testing "k8s.io/client-go/testing"
) )

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -19,7 +19,7 @@ package fake
import ( import (
"context" "context"
v1alpha1 "github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1" v1alpha1 "github.com/kilo-io/kilo/pkg/k8s/apis/kilo/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels" labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema" schema "k8s.io/apimachinery/pkg/runtime/schema"
@ -97,7 +97,7 @@ func (c *FakePeers) Update(ctx context.Context, peer *v1alpha1.Peer, opts v1.Upd
// Delete takes name of the peer and deletes it. Returns an error if one occurs. // Delete takes name of the peer and deletes it. Returns an error if one occurs.
func (c *FakePeers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { func (c *FakePeers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake. _, err := c.Fake.
Invokes(testing.NewRootDeleteActionWithOptions(peersResource, name, opts), &v1alpha1.Peer{}) Invokes(testing.NewRootDeleteAction(peersResource, name), &v1alpha1.Peer{})
return err return err
} }

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -17,10 +17,8 @@
package v1alpha1 package v1alpha1
import ( import (
"net/http" v1alpha1 "github.com/kilo-io/kilo/pkg/k8s/apis/kilo/v1alpha1"
"github.com/kilo-io/kilo/pkg/k8s/clientset/versioned/scheme"
v1alpha1 "github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1"
"github.com/squat/kilo/pkg/k8s/clientset/versioned/scheme"
rest "k8s.io/client-go/rest" rest "k8s.io/client-go/rest"
) )
@ -39,28 +37,12 @@ func (c *KiloV1alpha1Client) Peers() PeerInterface {
} }
// NewForConfig creates a new KiloV1alpha1Client for the given config. // NewForConfig creates a new KiloV1alpha1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*KiloV1alpha1Client, error) { func NewForConfig(c *rest.Config) (*KiloV1alpha1Client, error) {
config := *c config := *c
if err := setConfigDefaults(&config); err != nil { if err := setConfigDefaults(&config); err != nil {
return nil, err return nil, err
} }
httpClient, err := rest.HTTPClientFor(&config) client, err := rest.RESTClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new KiloV1alpha1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*KiloV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -20,8 +20,8 @@ import (
"context" "context"
"time" "time"
v1alpha1 "github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1" v1alpha1 "github.com/kilo-io/kilo/pkg/k8s/apis/kilo/v1alpha1"
scheme "github.com/squat/kilo/pkg/k8s/clientset/versioned/scheme" scheme "github.com/kilo-io/kilo/pkg/k8s/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types" types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch" watch "k8s.io/apimachinery/pkg/watch"

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -21,9 +21,9 @@ import (
sync "sync" sync "sync"
time "time" time "time"
versioned "github.com/squat/kilo/pkg/k8s/clientset/versioned" versioned "github.com/kilo-io/kilo/pkg/k8s/clientset/versioned"
internalinterfaces "github.com/squat/kilo/pkg/k8s/informers/internalinterfaces" internalinterfaces "github.com/kilo-io/kilo/pkg/k8s/informers/internalinterfaces"
kilo "github.com/squat/kilo/pkg/k8s/informers/kilo" kilo "github.com/kilo-io/kilo/pkg/k8s/informers/kilo"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema" schema "k8s.io/apimachinery/pkg/runtime/schema"

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -19,7 +19,7 @@ package informers
import ( import (
"fmt" "fmt"
v1alpha1 "github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1" v1alpha1 "github.com/kilo-io/kilo/pkg/k8s/apis/kilo/v1alpha1"
schema "k8s.io/apimachinery/pkg/runtime/schema" schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache" cache "k8s.io/client-go/tools/cache"
) )

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -19,7 +19,7 @@ package internalinterfaces
import ( import (
time "time" time "time"
versioned "github.com/squat/kilo/pkg/k8s/clientset/versioned" versioned "github.com/kilo-io/kilo/pkg/k8s/clientset/versioned"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
cache "k8s.io/client-go/tools/cache" cache "k8s.io/client-go/tools/cache"

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -17,8 +17,8 @@
package kilo package kilo
import ( import (
internalinterfaces "github.com/squat/kilo/pkg/k8s/informers/internalinterfaces" internalinterfaces "github.com/kilo-io/kilo/pkg/k8s/informers/internalinterfaces"
v1alpha1 "github.com/squat/kilo/pkg/k8s/informers/kilo/v1alpha1" v1alpha1 "github.com/kilo-io/kilo/pkg/k8s/informers/kilo/v1alpha1"
) )
// Interface provides access to each of this group's versions. // Interface provides access to each of this group's versions.

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -17,7 +17,7 @@
package v1alpha1 package v1alpha1
import ( import (
internalinterfaces "github.com/squat/kilo/pkg/k8s/informers/internalinterfaces" internalinterfaces "github.com/kilo-io/kilo/pkg/k8s/informers/internalinterfaces"
) )
// Interface provides access to all the informers in this group version. // Interface provides access to all the informers in this group version.

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -20,10 +20,10 @@ import (
"context" "context"
time "time" time "time"
kilov1alpha1 "github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1" kilov1alpha1 "github.com/kilo-io/kilo/pkg/k8s/apis/kilo/v1alpha1"
versioned "github.com/squat/kilo/pkg/k8s/clientset/versioned" versioned "github.com/kilo-io/kilo/pkg/k8s/clientset/versioned"
internalinterfaces "github.com/squat/kilo/pkg/k8s/informers/internalinterfaces" internalinterfaces "github.com/kilo-io/kilo/pkg/k8s/informers/internalinterfaces"
v1alpha1 "github.com/squat/kilo/pkg/k8s/listers/kilo/v1alpha1" v1alpha1 "github.com/kilo-io/kilo/pkg/k8s/listers/kilo/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch" watch "k8s.io/apimachinery/pkg/watch"

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2022 the Kilo authors // Copyright 2021 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -17,7 +17,7 @@
package v1alpha1 package v1alpha1
import ( import (
v1alpha1 "github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1" v1alpha1 "github.com/kilo-io/kilo/pkg/k8s/apis/kilo/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"

View File

@ -15,13 +15,10 @@
package mesh package mesh
import ( import (
"context"
"net" "net"
"time" "time"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes" "github.com/kilo-io/kilo/pkg/wireguard"
"github.com/squat/kilo/pkg/wireguard"
) )
const ( const (
@ -58,7 +55,7 @@ const (
// Node represents a node in the network. // Node represents a node in the network.
type Node struct { type Node struct {
Endpoint *wireguard.Endpoint Endpoint *wireguard.Endpoint
Key wgtypes.Key Key []byte
NoInternalIP bool NoInternalIP bool
InternalIP *net.IPNet InternalIP *net.IPNet
// LastSeen is a Unix time for the last time // LastSeen is a Unix time for the last time
@ -69,23 +66,18 @@ type Node struct {
Leader bool Leader bool
Location string Location string
Name string Name string
PersistentKeepalive time.Duration PersistentKeepalive int
Subnet *net.IPNet Subnet *net.IPNet
WireGuardIP *net.IPNet WireGuardIP *net.IPNet
// DiscoveredEndpoints cannot be DNS endpoints, only net.UDPAddr. DiscoveredEndpoints map[string]*wireguard.Endpoint
DiscoveredEndpoints map[string]*net.UDPAddr AllowedLocationIPs []*net.IPNet
AllowedLocationIPs []net.IPNet
Granularity Granularity Granularity Granularity
} }
// Ready indicates whether or not the node is ready. // Ready indicates whether or not the node is ready.
func (n *Node) Ready() bool { func (n *Node) Ready() bool {
// Nodes that are not leaders will not have WireGuardIPs, so it is not required. // Nodes that are not leaders will not have WireGuardIPs, so it is not required.
return n != nil && return n != nil && n.Endpoint != nil && !(n.Endpoint.IP == nil && n.Endpoint.DNS == "") && n.Endpoint.Port != 0 && n.Key != nil && n.Subnet != nil && time.Now().Unix()-n.LastSeen < int64(checkInPeriod)*2/int64(time.Second)
n.Endpoint.Ready() &&
n.Key != wgtypes.Key{} &&
n.Subnet != nil &&
time.Now().Unix()-n.LastSeen < int64(checkInPeriod)*2/int64(time.Second)
} }
// Peer represents a peer in the network. // Peer represents a peer in the network.
@ -100,10 +92,7 @@ type Peer struct {
// will not declare their endpoint and instead allow it to be // will not declare their endpoint and instead allow it to be
// discovered. // discovered.
func (p *Peer) Ready() bool { func (p *Peer) Ready() bool {
return p != nil && return p != nil && p.AllowedIPs != nil && len(p.AllowedIPs) != 0 && p.PublicKey != nil
p.AllowedIPs != nil &&
len(p.AllowedIPs) != 0 &&
p.PublicKey != wgtypes.Key{} // If Key was not set, it will be wgtypes.Key{}.
} }
// EventType describes what kind of an action an event represents. // EventType describes what kind of an action an event represents.
@ -147,11 +136,11 @@ type Backend interface {
// clean up any changes applied to the backend, // clean up any changes applied to the backend,
// and watch for changes to nodes. // and watch for changes to nodes.
type NodeBackend interface { type NodeBackend interface {
CleanUp(context.Context, string) error CleanUp(string) error
Get(string) (*Node, error) Get(string) (*Node, error)
Init(context.Context) error Init(<-chan struct{}) error
List() ([]*Node, error) List() ([]*Node, error)
Set(context.Context, string, *Node) error Set(string, *Node) error
Watch() <-chan *NodeEvent Watch() <-chan *NodeEvent
} }
@ -161,10 +150,10 @@ type NodeBackend interface {
// clean up any changes applied to the backend, // clean up any changes applied to the backend,
// and watch for changes to peers. // and watch for changes to peers.
type PeerBackend interface { type PeerBackend interface {
CleanUp(context.Context, string) error CleanUp(string) error
Get(string) (*Peer, error) Get(string) (*Peer, error)
Init(context.Context) error Init(<-chan struct{}) error
List() ([]*Peer, error) List() ([]*Peer, error)
Set(context.Context, string, *Peer) error Set(string, *Peer) error
Watch() <-chan *PeerEvent Watch() <-chan *PeerEvent
} }

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build linux
// +build linux // +build linux
package mesh package mesh

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build linux
// +build linux // +build linux
package mesh package mesh
@ -60,7 +59,6 @@ func getIP(hostname string, ignoreIfaces ...int) (*net.IPNet, *net.IPNet, error)
ignore[oneAddressCIDR(ip.IP).String()] = struct{}{} ignore[oneAddressCIDR(ip.IP).String()] = struct{}{}
} }
} }
var hostPriv, hostPub []*net.IPNet var hostPriv, hostPub []*net.IPNet
{ {
// Check IPs to which hostname resolves first. // Check IPs to which hostname resolves first.
@ -73,9 +71,6 @@ func getIP(hostname string, ignoreIfaces ...int) (*net.IPNet, *net.IPNet, error)
if !ok { if !ok {
continue continue
} }
if isLocal(ip.IP) {
continue
}
ip.Mask = mask ip.Mask = mask
if isPublic(ip.IP) { if isPublic(ip.IP) {
hostPub = append(hostPub, ip) hostPub = append(hostPub, ip)

View File

@ -1,4 +1,4 @@
// Copyright 2021 the Kilo authors // Copyright 2019 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -21,7 +21,7 @@ import (
"github.com/awalterschulze/gographviz" "github.com/awalterschulze/gographviz"
"github.com/squat/kilo/pkg/wireguard" "github.com/kilo-io/kilo/pkg/wireguard"
) )
// Dot generates a Graphviz graph of the Topology in DOT fomat. // Dot generates a Graphviz graph of the Topology in DOT fomat.
@ -167,9 +167,8 @@ func nodeLabel(location, name string, cidr *net.IPNet, priv, wgIP net.IP, endpoi
if wgIP != nil { if wgIP != nil {
label = append(label, wgIP.String()) label = append(label, wgIP.String())
} }
str := endpoint.String() if endpoint != nil {
if str != "" { label = append(label, endpoint.String())
label = append(label, str)
} }
return graphEscape(strings.Join(label, "\\n")) return graphEscape(strings.Join(label, "\\n"))
} }

View File

@ -1,4 +1,4 @@
// Copyright 2021 the Kilo authors // Copyright 2019 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -12,14 +12,12 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build linux
// +build linux // +build linux
package mesh package mesh
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net" "net"
@ -31,14 +29,12 @@ import (
"github.com/go-kit/kit/log/level" "github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/vishvananda/netlink" "github.com/vishvananda/netlink"
"golang.zx2c4.com/wireguard/wgctrl"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"github.com/squat/kilo/pkg/encapsulation" "github.com/kilo-io/kilo/pkg/encapsulation"
"github.com/squat/kilo/pkg/iproute" "github.com/kilo-io/kilo/pkg/iproute"
"github.com/squat/kilo/pkg/iptables" "github.com/kilo-io/kilo/pkg/iptables"
"github.com/squat/kilo/pkg/route" "github.com/kilo-io/kilo/pkg/route"
"github.com/squat/kilo/pkg/wireguard" "github.com/kilo-io/kilo/pkg/wireguard"
) )
const ( const (
@ -46,32 +42,34 @@ const (
kiloPath = "/var/lib/kilo" kiloPath = "/var/lib/kilo"
// privateKeyPath is the filepath where the WireGuard private key is stored. // privateKeyPath is the filepath where the WireGuard private key is stored.
privateKeyPath = kiloPath + "/key" privateKeyPath = kiloPath + "/key"
// confPath is the filepath where the WireGuard configuration is stored.
confPath = kiloPath + "/conf"
) )
// Mesh is able to create Kilo network meshes. // Mesh is able to create Kilo network meshes.
type Mesh struct { type Mesh struct {
Backend Backend
cleanUpIface bool cleanUpIface bool
cni bool cni bool
cniPath string cniPath string
enc encapsulation.Encapsulator enc encapsulation.Encapsulator
externalIP *net.IPNet externalIP *net.IPNet
granularity Granularity granularity Granularity
hostname string hostname string
internalIP *net.IPNet internalIP *net.IPNet
ipTables *iptables.Controller ipTables *iptables.Controller
kiloIface int kiloIface int
kiloIfaceName string key []byte
local bool local bool
port int port uint32
priv wgtypes.Key priv []byte
privIface int privIface int
pub wgtypes.Key pub []byte
resyncPeriod time.Duration resyncPeriod time.Duration
iptablesForwardRule bool stop chan struct{}
subnet *net.IPNet subnet *net.IPNet
table *route.Table table *route.Table
wireGuardIP *net.IPNet wireGuardIP *net.IPNet
// nodes and peers are mutable fields in the struct // nodes and peers are mutable fields in the struct
// and need to be guarded. // and need to be guarded.
@ -88,27 +86,23 @@ type Mesh struct {
} }
// New returns a new Mesh instance. // New returns a new Mesh instance.
func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularity, hostname string, port int, subnet *net.IPNet, local, cni bool, cniPath, iface string, cleanUpIface bool, createIface bool, mtu uint, resyncPeriod time.Duration, prioritisePrivateAddr, iptablesForwardRule bool, logger log.Logger, registerer prometheus.Registerer) (*Mesh, error) { func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularity, hostname string, port uint32, subnet *net.IPNet, local, cni bool, cniPath, iface string, cleanUpIface bool, createIface bool, mtu uint, resyncPeriod time.Duration, logger log.Logger) (*Mesh, error) {
if err := os.MkdirAll(kiloPath, 0700); err != nil { if err := os.MkdirAll(kiloPath, 0700); err != nil {
return nil, fmt.Errorf("failed to create directory to store configuration: %v", err) return nil, fmt.Errorf("failed to create directory to store configuration: %v", err)
} }
privateB, err := ioutil.ReadFile(privateKeyPath) private, err := ioutil.ReadFile(privateKeyPath)
if err != nil && !os.IsNotExist(err) { private = bytes.Trim(private, "\n")
return nil, fmt.Errorf("failed to read private key file: %v", err)
}
privateB = bytes.Trim(privateB, "\n")
private, err := wgtypes.ParseKey(string(privateB))
if err != nil { if err != nil {
level.Warn(logger).Log("msg", "no private key found on disk; generating one now") level.Warn(logger).Log("msg", "no private key found on disk; generating one now")
if private, err = wgtypes.GeneratePrivateKey(); err != nil { if private, err = wireguard.GenKey(); err != nil {
return nil, err return nil, err
} }
} }
public := private.PublicKey() public, err := wireguard.PubKey(private)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err := ioutil.WriteFile(privateKeyPath, []byte(private.String()), 0600); err != nil { if err := ioutil.WriteFile(privateKeyPath, private, 0600); err != nil {
return nil, fmt.Errorf("failed to write private key to disk: %v", err) return nil, fmt.Errorf("failed to write private key to disk: %v", err)
} }
cniIndex, err := cniDeviceIndex() cniIndex, err := cniDeviceIndex()
@ -149,41 +143,34 @@ func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularit
enc = encapsulation.Noop(enc.Strategy()) enc = encapsulation.Noop(enc.Strategy())
level.Debug(logger).Log("msg", "running without a private IP address") level.Debug(logger).Log("msg", "running without a private IP address")
} }
var externalIP *net.IPNet
if prioritisePrivateAddr && privateIP != nil {
externalIP = privateIP
} else {
externalIP = publicIP
}
level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the public IP address", publicIP.String())) level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the public IP address", publicIP.String()))
ipTables, err := iptables.New(iptables.WithRegisterer(registerer), iptables.WithLogger(log.With(logger, "component", "iptables")), iptables.WithResyncPeriod(resyncPeriod)) ipTables, err := iptables.New(iptables.WithLogger(log.With(logger, "component", "iptables")), iptables.WithResyncPeriod(resyncPeriod))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to IP tables controller: %v", err) return nil, fmt.Errorf("failed to IP tables controller: %v", err)
} }
mesh := Mesh{ return &Mesh{
Backend: backend, Backend: backend,
cleanUpIface: cleanUpIface, cleanUpIface: cleanUpIface,
cni: cni, cni: cni,
cniPath: cniPath, cniPath: cniPath,
enc: enc, enc: enc,
externalIP: externalIP, externalIP: publicIP,
granularity: granularity, granularity: granularity,
hostname: hostname, hostname: hostname,
internalIP: privateIP, internalIP: privateIP,
ipTables: ipTables, ipTables: ipTables,
kiloIface: kiloIface, kiloIface: kiloIface,
kiloIfaceName: iface, nodes: make(map[string]*Node),
nodes: make(map[string]*Node), peers: make(map[string]*Peer),
peers: make(map[string]*Peer), port: port,
port: port, priv: private,
priv: private, privIface: privIface,
privIface: privIface, pub: public,
pub: public, resyncPeriod: resyncPeriod,
resyncPeriod: resyncPeriod, local: local,
iptablesForwardRule: iptablesForwardRule, stop: make(chan struct{}),
local: local, subnet: subnet,
subnet: subnet, table: route.NewTable(),
table: route.NewTable(),
errorCounter: prometheus.NewCounterVec(prometheus.CounterOpts{ errorCounter: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "kilo_errors_total", Name: "kilo_errors_total",
Help: "Number of errors that occurred while administering the mesh.", Help: "Number of errors that occurred while administering the mesh.",
@ -205,20 +192,12 @@ func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularit
Help: "Number of reconciliation attempts.", Help: "Number of reconciliation attempts.",
}), }),
logger: logger, logger: logger,
} }, nil
registerer.MustRegister(
mesh.errorCounter,
mesh.leaderGuage,
mesh.nodesGuage,
mesh.peersGuage,
mesh.reconcileCounter,
)
return &mesh, nil
} }
// Run starts the mesh. // Run starts the mesh.
func (m *Mesh) Run(ctx context.Context) error { func (m *Mesh) Run() error {
if err := m.Nodes().Init(ctx); err != nil { if err := m.Nodes().Init(m.stop); err != nil {
return fmt.Errorf("failed to initialize node backend: %v", err) return fmt.Errorf("failed to initialize node backend: %v", err)
} }
// Try to set the CNI config quickly. // Try to set the CNI config quickly.
@ -230,14 +209,14 @@ func (m *Mesh) Run(ctx context.Context) error {
level.Warn(m.logger).Log("error", fmt.Errorf("failed to get node %q: %v", m.hostname, err)) level.Warn(m.logger).Log("error", fmt.Errorf("failed to get node %q: %v", m.hostname, err))
} }
} }
if err := m.Peers().Init(ctx); err != nil { if err := m.Peers().Init(m.stop); err != nil {
return fmt.Errorf("failed to initialize peer backend: %v", err) return fmt.Errorf("failed to initialize peer backend: %v", err)
} }
ipTablesErrors, err := m.ipTables.Run(ctx.Done()) ipTablesErrors, err := m.ipTables.Run(m.stop)
if err != nil { if err != nil {
return fmt.Errorf("failed to watch for IP tables updates: %v", err) return fmt.Errorf("failed to watch for IP tables updates: %v", err)
} }
routeErrors, err := m.table.Run(ctx.Done()) routeErrors, err := m.table.Run(m.stop)
if err != nil { if err != nil {
return fmt.Errorf("failed to watch for route table updates: %v", err) return fmt.Errorf("failed to watch for route table updates: %v", err)
} }
@ -247,7 +226,7 @@ func (m *Mesh) Run(ctx context.Context) error {
select { select {
case err = <-ipTablesErrors: case err = <-ipTablesErrors:
case err = <-routeErrors: case err = <-routeErrors:
case <-ctx.Done(): case <-m.stop:
return return
} }
if err != nil { if err != nil {
@ -266,11 +245,11 @@ func (m *Mesh) Run(ctx context.Context) error {
for { for {
select { select {
case ne = <-nw: case ne = <-nw:
m.syncNodes(ctx, ne) m.syncNodes(ne)
case pe = <-pw: case pe = <-pw:
m.syncPeers(pe) m.syncPeers(pe)
case <-checkIn.C: case <-checkIn.C:
m.checkIn(ctx) m.checkIn()
checkIn.Reset(checkInPeriod) checkIn.Reset(checkInPeriod)
case <-resync.C: case <-resync.C:
if m.cni { if m.cni {
@ -278,18 +257,18 @@ func (m *Mesh) Run(ctx context.Context) error {
} }
m.applyTopology() m.applyTopology()
resync.Reset(m.resyncPeriod) resync.Reset(m.resyncPeriod)
case <-ctx.Done(): case <-m.stop:
return nil return nil
} }
} }
} }
func (m *Mesh) syncNodes(ctx context.Context, e *NodeEvent) { func (m *Mesh) syncNodes(e *NodeEvent) {
logger := log.With(m.logger, "event", e.Type) logger := log.With(m.logger, "event", e.Type)
level.Debug(logger).Log("msg", "syncing nodes", "event", e.Type) level.Debug(logger).Log("msg", "syncing nodes", "event", e.Type)
if isSelf(m.hostname, e.Node) { if isSelf(m.hostname, e.Node) {
level.Debug(logger).Log("msg", "processing local node", "node", e.Node) level.Debug(logger).Log("msg", "processing local node", "node", e.Node)
m.handleLocal(ctx, e.Node) m.handleLocal(e.Node)
return return
} }
var diff bool var diff bool
@ -326,7 +305,7 @@ func (m *Mesh) syncPeers(e *PeerEvent) {
var diff bool var diff bool
m.mu.Lock() m.mu.Lock()
// Peers are indexed by public key. // Peers are indexed by public key.
key := e.Peer.PublicKey.String() key := string(e.Peer.PublicKey)
if !e.Peer.Ready() { if !e.Peer.Ready() {
// Trace non ready peer with their presence in the mesh. // Trace non ready peer with their presence in the mesh.
_, ok := m.peers[key] _, ok := m.peers[key]
@ -336,8 +315,8 @@ func (m *Mesh) syncPeers(e *PeerEvent) {
case AddEvent: case AddEvent:
fallthrough fallthrough
case UpdateEvent: case UpdateEvent:
if e.Old != nil && key != e.Old.PublicKey.String() { if e.Old != nil && key != string(e.Old.PublicKey) {
delete(m.peers, e.Old.PublicKey.String()) delete(m.peers, string(e.Old.PublicKey))
diff = true diff = true
} }
if !peersAreEqual(m.peers[key], e.Peer) { if !peersAreEqual(m.peers[key], e.Peer) {
@ -357,7 +336,7 @@ func (m *Mesh) syncPeers(e *PeerEvent) {
// checkIn will try to update the local node's LastSeen timestamp // checkIn will try to update the local node's LastSeen timestamp
// in the backend. // in the backend.
func (m *Mesh) checkIn(ctx context.Context) { func (m *Mesh) checkIn() {
m.mu.Lock() m.mu.Lock()
defer m.mu.Unlock() defer m.mu.Unlock()
n := m.nodes[m.hostname] n := m.nodes[m.hostname]
@ -367,7 +346,7 @@ func (m *Mesh) checkIn(ctx context.Context) {
} }
oldTime := n.LastSeen oldTime := n.LastSeen
n.LastSeen = time.Now().Unix() n.LastSeen = time.Now().Unix()
if err := m.Nodes().Set(ctx, m.hostname, n); err != nil { if err := m.Nodes().Set(m.hostname, n); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", n) level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", n)
m.errorCounter.WithLabelValues("checkin").Inc() m.errorCounter.WithLabelValues("checkin").Inc()
// Revert time. // Revert time.
@ -377,12 +356,10 @@ func (m *Mesh) checkIn(ctx context.Context) {
level.Debug(m.logger).Log("msg", "successfully checked in local node in backend") level.Debug(m.logger).Log("msg", "successfully checked in local node in backend")
} }
func (m *Mesh) handleLocal(ctx context.Context, n *Node) { func (m *Mesh) handleLocal(n *Node) {
// Allow the IPs to be overridden. // Allow the IPs to be overridden.
if !n.Endpoint.Ready() { if n.Endpoint == nil || (n.Endpoint.DNS == "" && n.Endpoint.IP == nil) {
e := wireguard.NewEndpoint(m.externalIP.IP, m.port) n.Endpoint = &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: m.externalIP.IP}, Port: m.port}
level.Info(m.logger).Log("msg", "overriding endpoint", "node", m.hostname, "old endpoint", n.Endpoint.String(), "new endpoint", e.String())
n.Endpoint = e
} }
if n.InternalIP == nil && !n.NoInternalIP { if n.InternalIP == nil && !n.NoInternalIP {
n.InternalIP = m.internalIP n.InternalIP = m.internalIP
@ -408,7 +385,7 @@ func (m *Mesh) handleLocal(ctx context.Context, n *Node) {
} }
if !nodesAreEqual(n, local) { if !nodesAreEqual(n, local) {
level.Debug(m.logger).Log("msg", "local node differs from backend") level.Debug(m.logger).Log("msg", "local node differs from backend")
if err := m.Nodes().Set(ctx, m.hostname, local); err != nil { if err := m.Nodes().Set(m.hostname, local); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", local) level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", local)
m.errorCounter.WithLabelValues("local").Inc() m.errorCounter.WithLabelValues("local").Inc()
return return
@ -476,26 +453,22 @@ func (m *Mesh) applyTopology() {
m.errorCounter.WithLabelValues("apply").Inc() m.errorCounter.WithLabelValues("apply").Inc()
return return
} }
// Find the old configuration.
wgClient, err := wgctrl.New() oldConfDump, err := wireguard.ShowDump(link.Attrs().Name)
if err != nil { if err != nil {
level.Error(m.logger).Log("error", err) level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc() m.errorCounter.WithLabelValues("apply").Inc()
return return
} }
defer wgClient.Close() oldConf, err := wireguard.ParseDump(oldConfDump)
// wgDevice is the current configuration of the wg interface.
wgDevice, err := wgClient.Device(m.kiloIfaceName)
if err != nil { if err != nil {
level.Error(m.logger).Log("error", err) level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc() m.errorCounter.WithLabelValues("apply").Inc()
return return
} }
natEndpoints := discoverNATEndpoints(nodes, peers, oldConf, m.logger)
natEndpoints := discoverNATEndpoints(nodes, peers, wgDevice, m.logger)
nodes[m.hostname].DiscoveredEndpoints = natEndpoints nodes[m.hostname].DiscoveredEndpoints = natEndpoints
t, err := NewTopology(nodes, peers, m.granularity, m.hostname, nodes[m.hostname].Endpoint.Port(), m.priv, m.subnet, nodes[m.hostname].PersistentKeepalive, m.logger) t, err := NewTopology(nodes, peers, m.granularity, m.hostname, nodes[m.hostname].Endpoint.Port, m.priv, m.subnet, nodes[m.hostname].PersistentKeepalive, m.logger)
if err != nil { if err != nil {
level.Error(m.logger).Log("error", err) level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc() m.errorCounter.WithLabelValues("apply").Inc()
@ -507,8 +480,19 @@ func (m *Mesh) applyTopology() {
} else { } else {
m.wireGuardIP = nil m.wireGuardIP = nil
} }
ipRules := t.Rules(m.cni, m.iptablesForwardRule) conf := t.Conf()
buf, err := conf.Bytes()
if err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
if err := ioutil.WriteFile(confPath, buf, 0600); err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
ipRules := t.Rules(m.cni)
// If we are handling local routes, ensure the local // If we are handling local routes, ensure the local
// tunnel has an IP address and IPIP traffic is allowed. // tunnel has an IP address and IPIP traffic is allowed.
if m.enc.Strategy() != encapsulation.Never && m.local { if m.enc.Strategy() != encapsulation.Never && m.local {
@ -524,9 +508,7 @@ func (m *Mesh) applyTopology() {
break break
} }
} }
ipRules = append(ipRules, m.enc.Rules(cidrs)...)
ipRules = append(m.enc.Rules(cidrs), ipRules...)
// If we are handling local routes, ensure the local // If we are handling local routes, ensure the local
// tunnel has an IP address. // tunnel has an IP address.
if err := m.enc.Set(oneAddressCIDR(newAllocator(*nodes[m.hostname].Subnet).next().IP)); err != nil { if err := m.enc.Set(oneAddressCIDR(newAllocator(*nodes[m.hostname].Subnet).next().IP)); err != nil {
@ -549,12 +531,10 @@ func (m *Mesh) applyTopology() {
} }
// Setting the WireGuard configuration interrupts existing connections // Setting the WireGuard configuration interrupts existing connections
// so only set the configuration if it has changed. // so only set the configuration if it has changed.
conf := t.Conf() equal := conf.Equal(oldConf)
equal, diff := conf.Equal(wgDevice)
if !equal { if !equal {
level.Info(m.logger).Log("msg", "WireGuard configurations are different", "diff", diff) level.Info(m.logger).Log("msg", "WireGuard configurations are different")
level.Debug(m.logger).Log("msg", "changing wg config", "config", conf.WGConfig()) if err := wireguard.SetConf(link.Attrs().Name, confPath); err != nil {
if err := wgClient.ConfigureDevice(m.kiloIfaceName, conf.WGConfig()); err != nil {
level.Error(m.logger).Log("error", err) level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc() m.errorCounter.WithLabelValues("apply").Inc()
return return
@ -583,6 +563,23 @@ func (m *Mesh) applyTopology() {
} }
} }
// RegisterMetrics registers Prometheus metrics on the given Prometheus
// registerer.
func (m *Mesh) RegisterMetrics(r prometheus.Registerer) {
r.MustRegister(
m.errorCounter,
m.leaderGuage,
m.nodesGuage,
m.peersGuage,
m.reconcileCounter,
)
}
// Stop stops the mesh.
func (m *Mesh) Stop() {
close(m.stop)
}
func (m *Mesh) cleanUp() { func (m *Mesh) cleanUp() {
if err := m.ipTables.CleanUp(); err != nil { if err := m.ipTables.CleanUp(); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up IP tables: %v", err)) level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up IP tables: %v", err))
@ -592,27 +589,23 @@ func (m *Mesh) cleanUp() {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up routes: %v", err)) level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up routes: %v", err))
m.errorCounter.WithLabelValues("cleanUp").Inc() m.errorCounter.WithLabelValues("cleanUp").Inc()
} }
if err := os.Remove(confPath); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to delete configuration file: %v", err))
m.errorCounter.WithLabelValues("cleanUp").Inc()
}
if m.cleanUpIface { if m.cleanUpIface {
if err := iproute.RemoveInterface(m.kiloIface); err != nil { if err := iproute.RemoveInterface(m.kiloIface); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to remove WireGuard interface: %v", err)) level.Error(m.logger).Log("error", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
m.errorCounter.WithLabelValues("cleanUp").Inc() m.errorCounter.WithLabelValues("cleanUp").Inc()
} }
} }
{ if err := m.Nodes().CleanUp(m.hostname); err != nil {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up node backend: %v", err))
defer cancel() m.errorCounter.WithLabelValues("cleanUp").Inc()
if err := m.Nodes().CleanUp(ctx, m.hostname); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up node backend: %v", err))
m.errorCounter.WithLabelValues("cleanUp").Inc()
}
} }
{ if err := m.Peers().CleanUp(m.hostname); err != nil {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up peer backend: %v", err))
defer cancel() m.errorCounter.WithLabelValues("cleanUp").Inc()
if err := m.Peers().CleanUp(ctx, m.hostname); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up peer backend: %v", err))
m.errorCounter.WithLabelValues("cleanUp").Inc()
}
} }
if err := m.enc.CleanUp(); err != nil { if err := m.enc.CleanUp(); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up encapsulator: %v", err)) level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up encapsulator: %v", err))
@ -627,8 +620,12 @@ func (m *Mesh) resolveEndpoints() error {
if !m.nodes[k].Ready() { if !m.nodes[k].Ready() {
continue continue
} }
// Resolve the Endpoint // If the node is ready, then the endpoint is not nil
if _, err := m.nodes[k].Endpoint.UDPAddr(true); err != nil { // but it may not have a DNS name.
if m.nodes[k].Endpoint.DNS == "" {
continue
}
if err := resolveEndpoint(m.nodes[k].Endpoint); err != nil {
return err return err
} }
} }
@ -639,16 +636,33 @@ func (m *Mesh) resolveEndpoints() error {
continue continue
} }
// Peers may have nil endpoints. // Peers may have nil endpoints.
if !m.peers[k].Endpoint.Ready() { if m.peers[k].Endpoint == nil || m.peers[k].Endpoint.DNS == "" {
continue continue
} }
if _, err := m.peers[k].Endpoint.UDPAddr(true); err != nil { if err := resolveEndpoint(m.peers[k].Endpoint); err != nil {
return err return err
} }
} }
return nil return nil
} }
func resolveEndpoint(endpoint *wireguard.Endpoint) error {
ips, err := net.LookupIP(endpoint.DNS)
if err != nil {
return fmt.Errorf("failed to look up DNS name %q: %v", endpoint.DNS, err)
}
nets := make([]*net.IPNet, len(ips), len(ips))
for i := range ips {
nets[i] = oneAddressCIDR(ips[i])
}
sortIPs(nets)
if len(nets) == 0 {
return fmt.Errorf("did not find any addresses for DNS name %q", endpoint.DNS)
}
endpoint.IP = nets[0].IP
return nil
}
func isSelf(hostname string, node *Node) bool { func isSelf(hostname string, node *Node) bool {
return node != nil && node.Name == hostname return node != nil && node.Name == hostname
} }
@ -668,18 +682,7 @@ func nodesAreEqual(a, b *Node) bool {
// Ignore LastSeen when comparing equality we want to check if the nodes are // Ignore LastSeen when comparing equality we want to check if the nodes are
// equivalent. However, we do want to check if LastSeen has transitioned // equivalent. However, we do want to check if LastSeen has transitioned
// between valid and invalid. // between valid and invalid.
return a.Key.String() == b.Key.String() && return string(a.Key) == string(b.Key) && ipNetsEqual(a.WireGuardIP, b.WireGuardIP) && ipNetsEqual(a.InternalIP, b.InternalIP) && a.Leader == b.Leader && a.Location == b.Location && a.Name == b.Name && subnetsEqual(a.Subnet, b.Subnet) && a.Ready() == b.Ready() && a.PersistentKeepalive == b.PersistentKeepalive && discoveredEndpointsAreEqual(a.DiscoveredEndpoints, b.DiscoveredEndpoints) && ipNetSlicesEqual(a.AllowedLocationIPs, b.AllowedLocationIPs) && a.Granularity == b.Granularity
ipNetsEqual(a.WireGuardIP, b.WireGuardIP) &&
ipNetsEqual(a.InternalIP, b.InternalIP) &&
a.Leader == b.Leader &&
a.Location == b.Location &&
a.Name == b.Name &&
subnetsEqual(a.Subnet, b.Subnet) &&
a.Ready() == b.Ready() &&
a.PersistentKeepalive == b.PersistentKeepalive &&
discoveredEndpointsAreEqual(a.DiscoveredEndpoints, b.DiscoveredEndpoints) &&
ipNetSlicesEqual(a.AllowedLocationIPs, b.AllowedLocationIPs) &&
a.Granularity == b.Granularity
} }
func peersAreEqual(a, b *Peer) bool { func peersAreEqual(a, b *Peer) bool {
@ -698,15 +701,11 @@ func peersAreEqual(a, b *Peer) bool {
return false return false
} }
for i := range a.AllowedIPs { for i := range a.AllowedIPs {
if !ipNetsEqual(&a.AllowedIPs[i], &b.AllowedIPs[i]) { if !ipNetsEqual(a.AllowedIPs[i], b.AllowedIPs[i]) {
return false return false
} }
} }
return a.PublicKey.String() == b.PublicKey.String() && return string(a.PublicKey) == string(b.PublicKey) && string(a.PresharedKey) == string(b.PresharedKey) && a.PersistentKeepalive == b.PersistentKeepalive
(a.PresharedKey == nil) == (b.PresharedKey == nil) &&
(a.PresharedKey == nil || a.PresharedKey.String() == b.PresharedKey.String()) &&
(a.PersistentKeepaliveInterval == nil) == (b.PersistentKeepaliveInterval == nil) &&
(a.PersistentKeepaliveInterval == nil || *a.PersistentKeepaliveInterval == *b.PersistentKeepaliveInterval)
} }
func ipNetsEqual(a, b *net.IPNet) bool { func ipNetsEqual(a, b *net.IPNet) bool {
@ -722,12 +721,12 @@ func ipNetsEqual(a, b *net.IPNet) bool {
return a.IP.Equal(b.IP) return a.IP.Equal(b.IP)
} }
func ipNetSlicesEqual(a, b []net.IPNet) bool { func ipNetSlicesEqual(a, b []*net.IPNet) bool {
if len(a) != len(b) { if len(a) != len(b) {
return false return false
} }
for i := range a { for i := range a {
if !ipNetsEqual(&a[i], &b[i]) { if !ipNetsEqual(a[i], b[i]) {
return false return false
} }
} }
@ -753,31 +752,18 @@ func subnetsEqual(a, b *net.IPNet) bool {
return true return true
} }
func udpAddrsEqual(a, b *net.UDPAddr) bool { func discoveredEndpointsAreEqual(a, b map[string]*wireguard.Endpoint) bool {
if a == nil && b == nil { if a == nil && b == nil {
return true return true
} }
if (a != nil) != (b != nil) { if (a != nil) != (b != nil) {
return false return false
} }
if a.Zone != b.Zone {
return false
}
if a.Port != b.Port {
return false
}
return a.IP.Equal(b.IP)
}
func discoveredEndpointsAreEqual(a, b map[string]*net.UDPAddr) bool {
if a == nil && b == nil {
return true
}
if len(a) != len(b) { if len(a) != len(b) {
return false return false
} }
for k := range a { for k := range a {
if !udpAddrsEqual(a[k], b[k]) { if !a[k].Equal(b[k], false) {
return false return false
} }
} }
@ -793,26 +779,24 @@ func linkByIndex(index int) (netlink.Link, error) {
} }
// discoverNATEndpoints uses the node's WireGuard configuration to returns a list of the most recently discovered endpoints for all nodes and peers behind NAT so that they can roam. // discoverNATEndpoints uses the node's WireGuard configuration to returns a list of the most recently discovered endpoints for all nodes and peers behind NAT so that they can roam.
// Discovered endpionts will never be DNS names, because WireGuard will always resolve them to net.UDPAddr. func discoverNATEndpoints(nodes map[string]*Node, peers map[string]*Peer, conf *wireguard.Conf, logger log.Logger) map[string]*wireguard.Endpoint {
func discoverNATEndpoints(nodes map[string]*Node, peers map[string]*Peer, conf *wgtypes.Device, logger log.Logger) map[string]*net.UDPAddr { natEndpoints := make(map[string]*wireguard.Endpoint)
natEndpoints := make(map[string]*net.UDPAddr) keys := make(map[string]*wireguard.Peer)
keys := make(map[string]wgtypes.Peer)
for i := range conf.Peers { for i := range conf.Peers {
keys[conf.Peers[i].PublicKey.String()] = conf.Peers[i] keys[string(conf.Peers[i].PublicKey)] = conf.Peers[i]
} }
for _, n := range nodes { for _, n := range nodes {
if peer, ok := keys[n.Key.String()]; ok && n.PersistentKeepalive != time.Duration(0) { if peer, ok := keys[string(n.Key)]; ok && n.PersistentKeepalive > 0 {
level.Debug(logger).Log("msg", "WireGuard Update NAT Endpoint", "node", n.Name, "endpoint", peer.Endpoint, "former-endpoint", n.Endpoint, "same", peer.Endpoint.String() == n.Endpoint.String(), "latest-handshake", peer.LastHandshakeTime) level.Debug(logger).Log("msg", "WireGuard Update NAT Endpoint", "node", n.Name, "endpoint", peer.Endpoint, "former-endpoint", n.Endpoint, "same", n.Endpoint.Equal(peer.Endpoint, false), "latest-handshake", peer.LatestHandshake)
// Don't update the endpoint, if there was never any handshake. if (peer.LatestHandshake != time.Time{}) {
if !peer.LastHandshakeTime.Equal(time.Time{}) { natEndpoints[string(n.Key)] = peer.Endpoint
natEndpoints[n.Key.String()] = peer.Endpoint
} }
} }
} }
for _, p := range peers { for _, p := range peers {
if peer, ok := keys[p.PublicKey.String()]; ok && p.PersistentKeepaliveInterval != nil { if peer, ok := keys[string(p.PublicKey)]; ok && p.PersistentKeepalive > 0 {
if !peer.LastHandshakeTime.Equal(time.Time{}) { if (peer.LatestHandshake != time.Time{}) {
natEndpoints[p.PublicKey.String()] = peer.Endpoint natEndpoints[string(p.PublicKey)] = peer.Endpoint
} }
} }
} }

View File

@ -19,21 +19,9 @@ import (
"testing" "testing"
"time" "time"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes" "github.com/kilo-io/kilo/pkg/wireguard"
"github.com/squat/kilo/pkg/wireguard"
) )
func mustKey() wgtypes.Key {
if k, err := wgtypes.GeneratePrivateKey(); err != nil {
panic(err.Error())
} else {
return k
}
}
var key = mustKey()
func TestReady(t *testing.T) { func TestReady(t *testing.T) {
internalIP := oneAddressCIDR(net.ParseIP("1.1.1.1")) internalIP := oneAddressCIDR(net.ParseIP("1.1.1.1"))
externalIP := oneAddressCIDR(net.ParseIP("2.2.2.2")) externalIP := oneAddressCIDR(net.ParseIP("2.2.2.2"))
@ -56,7 +44,7 @@ func TestReady(t *testing.T) {
name: "empty endpoint", name: "empty endpoint",
node: &Node{ node: &Node{
InternalIP: internalIP, InternalIP: internalIP,
Key: key, Key: []byte{},
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)},
}, },
ready: false, ready: false,
@ -64,9 +52,9 @@ func TestReady(t *testing.T) {
{ {
name: "empty endpoint IP", name: "empty endpoint IP",
node: &Node{ node: &Node{
Endpoint: wireguard.NewEndpoint(nil, DefaultKiloPort), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{}, Port: DefaultKiloPort},
InternalIP: internalIP, InternalIP: internalIP,
Key: wgtypes.Key{}, Key: []byte{},
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)},
}, },
ready: false, ready: false,
@ -74,9 +62,9 @@ func TestReady(t *testing.T) {
{ {
name: "empty endpoint port", name: "empty endpoint port",
node: &Node{ node: &Node{
Endpoint: wireguard.NewEndpoint(externalIP.IP, 0), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: externalIP.IP}},
InternalIP: internalIP, InternalIP: internalIP,
Key: wgtypes.Key{}, Key: []byte{},
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)},
}, },
ready: false, ready: false,
@ -84,8 +72,8 @@ func TestReady(t *testing.T) {
{ {
name: "empty internal IP", name: "empty internal IP",
node: &Node{ node: &Node{
Endpoint: wireguard.NewEndpoint(externalIP.IP, DefaultKiloPort), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: externalIP.IP}, Port: DefaultKiloPort},
Key: wgtypes.Key{}, Key: []byte{},
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)},
}, },
ready: false, ready: false,
@ -93,7 +81,7 @@ func TestReady(t *testing.T) {
{ {
name: "empty key", name: "empty key",
node: &Node{ node: &Node{
Endpoint: wireguard.NewEndpoint(externalIP.IP, DefaultKiloPort), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: externalIP.IP}, Port: DefaultKiloPort},
InternalIP: internalIP, InternalIP: internalIP,
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)},
}, },
@ -102,18 +90,18 @@ func TestReady(t *testing.T) {
{ {
name: "empty subnet", name: "empty subnet",
node: &Node{ node: &Node{
Endpoint: wireguard.NewEndpoint(externalIP.IP, DefaultKiloPort), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: externalIP.IP}, Port: DefaultKiloPort},
InternalIP: internalIP, InternalIP: internalIP,
Key: wgtypes.Key{}, Key: []byte{},
}, },
ready: false, ready: false,
}, },
{ {
name: "valid", name: "valid",
node: &Node{ node: &Node{
Endpoint: wireguard.NewEndpoint(externalIP.IP, DefaultKiloPort), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: externalIP.IP}, Port: DefaultKiloPort},
InternalIP: internalIP, InternalIP: internalIP,
Key: key, Key: []byte{},
LastSeen: time.Now().Unix(), LastSeen: time.Now().Unix(),
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)},
}, },

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build linux
// +build linux // +build linux
package mesh package mesh
@ -23,8 +22,8 @@ import (
"github.com/vishvananda/netlink" "github.com/vishvananda/netlink"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
"github.com/squat/kilo/pkg/encapsulation" "github.com/kilo-io/kilo/pkg/encapsulation"
"github.com/squat/kilo/pkg/iptables" "github.com/kilo-io/kilo/pkg/iptables"
) )
const kiloTableIndex = 1107 const kiloTableIndex = 1107
@ -40,7 +39,7 @@ func (t *Topology) Routes(kiloIfaceName string, kiloIface, privIface, tunlIface
var gw net.IP var gw net.IP
for _, segment := range t.segments { for _, segment := range t.segments {
if segment.location == t.location { if segment.location == t.location {
gw = enc.Gw(t.updateEndpoint(segment.endpoint, segment.key, &segment.persistentKeepalive).IP(), segment.privateIPs[segment.leader], segment.cidrs[segment.leader]) gw = enc.Gw(segment.endpoint.IP, segment.privateIPs[segment.leader], segment.cidrs[segment.leader])
break break
} }
} }
@ -113,7 +112,7 @@ func (t *Topology) Routes(kiloIfaceName string, kiloIface, privIface, tunlIface
// we need to set routes for allowed location IPs over the leader in the current location. // we need to set routes for allowed location IPs over the leader in the current location.
for i := range segment.allowedLocationIPs { for i := range segment.allowedLocationIPs {
routes = append(routes, encapsulateRoute(&netlink.Route{ routes = append(routes, encapsulateRoute(&netlink.Route{
Dst: &segment.allowedLocationIPs[i], Dst: segment.allowedLocationIPs[i],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: gw, Gw: gw,
LinkIndex: privIface, LinkIndex: privIface,
@ -125,7 +124,7 @@ func (t *Topology) Routes(kiloIfaceName string, kiloIface, privIface, tunlIface
for _, peer := range t.peers { for _, peer := range t.peers {
for i := range peer.AllowedIPs { for i := range peer.AllowedIPs {
routes = append(routes, encapsulateRoute(&netlink.Route{ routes = append(routes, encapsulateRoute(&netlink.Route{
Dst: &peer.AllowedIPs[i], Dst: peer.AllowedIPs[i],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: gw, Gw: gw,
LinkIndex: privIface, LinkIndex: privIface,
@ -196,7 +195,7 @@ func (t *Topology) Routes(kiloIfaceName string, kiloIface, privIface, tunlIface
// equals the external IP. This means that the node // equals the external IP. This means that the node
// is only accessible through an external IP and we // is only accessible through an external IP and we
// cannot encapsulate traffic to an IP through the IP. // cannot encapsulate traffic to an IP through the IP.
if segment.privateIPs == nil || segment.privateIPs[i].Equal(t.updateEndpoint(segment.endpoint, segment.key, &segment.persistentKeepalive).IP()) { if segment.privateIPs == nil || segment.privateIPs[i].Equal(segment.endpoint.IP) {
continue continue
} }
// Add routes to the private IPs of nodes in other segments. // Add routes to the private IPs of nodes in other segments.
@ -214,7 +213,7 @@ func (t *Topology) Routes(kiloIfaceName string, kiloIface, privIface, tunlIface
// we need to set routes for allowed location IPs over the wg interface. // we need to set routes for allowed location IPs over the wg interface.
for i := range segment.allowedLocationIPs { for i := range segment.allowedLocationIPs {
routes = append(routes, &netlink.Route{ routes = append(routes, &netlink.Route{
Dst: &segment.allowedLocationIPs[i], Dst: segment.allowedLocationIPs[i],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: segment.wireGuardIP, Gw: segment.wireGuardIP,
LinkIndex: kiloIface, LinkIndex: kiloIface,
@ -226,7 +225,7 @@ func (t *Topology) Routes(kiloIfaceName string, kiloIface, privIface, tunlIface
for _, peer := range t.peers { for _, peer := range t.peers {
for i := range peer.AllowedIPs { for i := range peer.AllowedIPs {
routes = append(routes, &netlink.Route{ routes = append(routes, &netlink.Route{
Dst: &peer.AllowedIPs[i], Dst: peer.AllowedIPs[i],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}) })
@ -235,74 +234,6 @@ func (t *Topology) Routes(kiloIfaceName string, kiloIface, privIface, tunlIface
return routes, rules return routes, rules
} }
// PeerRoutes generates a slice of routes and rules for a given peer in the Topology.
func (t *Topology) PeerRoutes(name string, kiloIface int, additionalAllowedIPs []net.IPNet) ([]*netlink.Route, []*netlink.Rule) {
var routes []*netlink.Route
var rules []*netlink.Rule
for _, segment := range t.segments {
for i := range segment.cidrs {
// Add routes to the Pod CIDRs of nodes in other segments.
routes = append(routes, &netlink.Route{
Dst: segment.cidrs[i],
Flags: int(netlink.FLAG_ONLINK),
Gw: segment.wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
})
}
for i := range segment.privateIPs {
// Add routes to the private IPs of nodes in other segments.
routes = append(routes, &netlink.Route{
Dst: oneAddressCIDR(segment.privateIPs[i]),
Flags: int(netlink.FLAG_ONLINK),
Gw: segment.wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
})
}
// Add routes for the allowed location IPs of all segments.
for i := range segment.allowedLocationIPs {
routes = append(routes, &netlink.Route{
Dst: &segment.allowedLocationIPs[i],
Flags: int(netlink.FLAG_ONLINK),
Gw: segment.wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
})
}
routes = append(routes, &netlink.Route{
Dst: oneAddressCIDR(segment.wireGuardIP),
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
})
}
// Add routes for the allowed IPs of peers.
for _, peer := range t.peers {
// Don't add routes to ourselves.
if peer.Name == name {
continue
}
for i := range peer.AllowedIPs {
routes = append(routes, &netlink.Route{
Dst: &peer.AllowedIPs[i],
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
})
}
}
for i := range additionalAllowedIPs {
routes = append(routes, &netlink.Route{
Dst: &additionalAllowedIPs[i],
Flags: int(netlink.FLAG_ONLINK),
Gw: t.segments[0].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
})
}
return routes, rules
}
func encapsulateRoute(route *netlink.Route, encapsulate encapsulation.Strategy, subnet *net.IPNet, tunlIface int) *netlink.Route { func encapsulateRoute(route *netlink.Route, encapsulate encapsulation.Strategy, subnet *net.IPNet, tunlIface int) *netlink.Route {
if encapsulate == encapsulation.Always || (encapsulate == encapsulation.CrossSubnet && !subnet.Contains(route.Gw)) { if encapsulate == encapsulation.Always || (encapsulate == encapsulation.CrossSubnet && !subnet.Contains(route.Gw)) {
route.LinkIndex = tunlIface route.LinkIndex = tunlIface
@ -311,45 +242,17 @@ func encapsulateRoute(route *netlink.Route, encapsulate encapsulation.Strategy,
} }
// Rules returns the iptables rules required by the local node. // Rules returns the iptables rules required by the local node.
func (t *Topology) Rules(cni, iptablesForwardRule bool) []iptables.Rule { func (t *Topology) Rules(cni bool) []iptables.Rule {
var rules []iptables.Rule var rules []iptables.Rule
rules = append(rules, iptables.NewIPv4Chain("nat", "KILO-NAT")) rules = append(rules, iptables.NewIPv4Chain("nat", "KILO-NAT"))
rules = append(rules, iptables.NewIPv6Chain("nat", "KILO-NAT")) rules = append(rules, iptables.NewIPv6Chain("nat", "KILO-NAT"))
if cni { if cni {
rules = append(rules, iptables.NewRule(iptables.GetProtocol(t.subnet.IP), "nat", "POSTROUTING", "-s", t.subnet.String(), "-m", "comment", "--comment", "Kilo: jump to KILO-NAT chain", "-j", "KILO-NAT")) rules = append(rules, iptables.NewRule(iptables.GetProtocol(len(t.subnet.IP)), "nat", "POSTROUTING", "-s", t.subnet.String(), "-m", "comment", "--comment", "Kilo: jump to KILO-NAT chain", "-j", "KILO-NAT"))
// Some linux distros or docker will set forward DROP in the filter table.
// To still be able to have pod to pod communication we need to ALLOW packets from and to pod CIDRs within a location.
// Leader nodes will forward packets from all nodes within a location because they act as a gateway for them.
// Non leader nodes only need to allow packages from and to their own pod CIDR.
if iptablesForwardRule && t.leader {
for _, s := range t.segments {
if s.location == t.location {
// Make sure packets to and from pod cidrs are not dropped in the forward chain.
for _, c := range s.cidrs {
rules = append(rules, iptables.NewRule(iptables.GetProtocol(c.IP), "filter", "FORWARD", "-m", "comment", "--comment", "Kilo: forward packets from the pod subnet", "-s", c.String(), "-j", "ACCEPT"))
rules = append(rules, iptables.NewRule(iptables.GetProtocol(c.IP), "filter", "FORWARD", "-m", "comment", "--comment", "Kilo: forward packets to the pod subnet", "-d", c.String(), "-j", "ACCEPT"))
}
// Make sure packets to and from allowed location IPs are not dropped in the forward chain.
for _, c := range s.allowedLocationIPs {
rules = append(rules, iptables.NewRule(iptables.GetProtocol(c.IP), "filter", "FORWARD", "-m", "comment", "--comment", "Kilo: forward packets from allowed location IPs", "-s", c.String(), "-j", "ACCEPT"))
rules = append(rules, iptables.NewRule(iptables.GetProtocol(c.IP), "filter", "FORWARD", "-m", "comment", "--comment", "Kilo: forward packets to allowed location IPs", "-d", c.String(), "-j", "ACCEPT"))
}
// Make sure packets to and from private IPs are not dropped in the forward chain.
for _, c := range s.privateIPs {
rules = append(rules, iptables.NewRule(iptables.GetProtocol(c), "filter", "FORWARD", "-m", "comment", "--comment", "Kilo: forward packets from private IPs", "-s", oneAddressCIDR(c).String(), "-j", "ACCEPT"))
rules = append(rules, iptables.NewRule(iptables.GetProtocol(c), "filter", "FORWARD", "-m", "comment", "--comment", "Kilo: forward packets to private IPs", "-d", oneAddressCIDR(c).String(), "-j", "ACCEPT"))
}
}
}
} else if iptablesForwardRule {
rules = append(rules, iptables.NewRule(iptables.GetProtocol(t.subnet.IP), "filter", "FORWARD", "-m", "comment", "--comment", "Kilo: forward packets from the node's pod subnet", "-s", t.subnet.String(), "-j", "ACCEPT"))
rules = append(rules, iptables.NewRule(iptables.GetProtocol(t.subnet.IP), "filter", "FORWARD", "-m", "comment", "--comment", "Kilo: forward packets to the node's pod subnet", "-d", t.subnet.String(), "-j", "ACCEPT"))
}
} }
for _, s := range t.segments { for _, s := range t.segments {
rules = append(rules, iptables.NewRule(iptables.GetProtocol(s.wireGuardIP), "nat", "KILO-NAT", "-d", oneAddressCIDR(s.wireGuardIP).String(), "-m", "comment", "--comment", "Kilo: do not NAT packets destined for WireGuared IPs", "-j", "RETURN")) rules = append(rules, iptables.NewRule(iptables.GetProtocol(len(s.wireGuardIP)), "nat", "KILO-NAT", "-d", oneAddressCIDR(s.wireGuardIP).String(), "-m", "comment", "--comment", "Kilo: do not NAT packets destined for WireGuared IPs", "-j", "RETURN"))
for _, aip := range s.allowedIPs { for _, aip := range s.allowedIPs {
rules = append(rules, iptables.NewRule(iptables.GetProtocol(aip.IP), "nat", "KILO-NAT", "-d", aip.String(), "-m", "comment", "--comment", "Kilo: do not NAT packets destined for known IPs", "-j", "RETURN")) rules = append(rules, iptables.NewRule(iptables.GetProtocol(len(aip.IP)), "nat", "KILO-NAT", "-d", aip.String(), "-m", "comment", "--comment", "Kilo: do not NAT packets destined for known IPs", "-j", "RETURN"))
} }
// Make sure packets to allowed location IPs go through the KILO-NAT chain, so they can be MASQUERADEd, // Make sure packets to allowed location IPs go through the KILO-NAT chain, so they can be MASQUERADEd,
// Otherwise packets to these destinations will reach the destination, but never find their way back. // Otherwise packets to these destinations will reach the destination, but never find their way back.
@ -357,7 +260,7 @@ func (t *Topology) Rules(cni, iptablesForwardRule bool) []iptables.Rule {
if t.location == s.location { if t.location == s.location {
for _, alip := range s.allowedLocationIPs { for _, alip := range s.allowedLocationIPs {
rules = append(rules, rules = append(rules,
iptables.NewRule(iptables.GetProtocol(alip.IP), "nat", "POSTROUTING", "-d", alip.String(), "-m", "comment", "--comment", "Kilo: jump to NAT chain", "-j", "KILO-NAT"), iptables.NewRule(iptables.GetProtocol(len(alip.IP)), "nat", "POSTROUTING", "-d", alip.String(), "-m", "comment", "--comment", "Kilo: jump to NAT chain", "-j", "KILO-NAT"),
) )
} }
} }
@ -365,8 +268,8 @@ func (t *Topology) Rules(cni, iptablesForwardRule bool) []iptables.Rule {
for _, p := range t.peers { for _, p := range t.peers {
for _, aip := range p.AllowedIPs { for _, aip := range p.AllowedIPs {
rules = append(rules, rules = append(rules,
iptables.NewRule(iptables.GetProtocol(aip.IP), "nat", "POSTROUTING", "-s", aip.String(), "-m", "comment", "--comment", "Kilo: jump to NAT chain", "-j", "KILO-NAT"), iptables.NewRule(iptables.GetProtocol(len(aip.IP)), "nat", "POSTROUTING", "-s", aip.String(), "-m", "comment", "--comment", "Kilo: jump to NAT chain", "-j", "KILO-NAT"),
iptables.NewRule(iptables.GetProtocol(aip.IP), "nat", "KILO-NAT", "-d", aip.String(), "-m", "comment", "--comment", "Kilo: do not NAT packets destined for peers", "-j", "RETURN"), iptables.NewRule(iptables.GetProtocol(len(aip.IP)), "nat", "KILO-NAT", "-d", aip.String(), "-m", "comment", "--comment", "Kilo: do not NAT packets destined for peers", "-j", "RETURN"),
) )
} }
} }

View File

@ -21,7 +21,7 @@ import (
"github.com/vishvananda/netlink" "github.com/vishvananda/netlink"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
"github.com/squat/kilo/pkg/encapsulation" "github.com/kilo-io/kilo/pkg/encapsulation"
) )
func TestRoutes(t *testing.T) { func TestRoutes(t *testing.T) {
@ -75,7 +75,7 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &nodes["b"].AllowedLocationIPs[0], Dst: nodes["b"].AllowedLocationIPs[0],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(LogicalGranularity, nodes["a"].Name).segments[1].wireGuardIP, Gw: mustTopoForGranularityAndHost(LogicalGranularity, nodes["a"].Name).segments[1].wireGuardIP,
LinkIndex: kiloIface, LinkIndex: kiloIface,
@ -89,17 +89,17 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[0], Dst: peers["a"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[1], Dst: peers["a"].AllowedIPs[1],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["b"].AllowedIPs[0], Dst: peers["b"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
@ -132,17 +132,17 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[0], Dst: peers["a"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[1], Dst: peers["a"].AllowedIPs[1],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["b"].AllowedIPs[0], Dst: peers["b"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
@ -196,21 +196,21 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[0], Dst: peers["a"].AllowedIPs[0],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP, Gw: nodes["b"].InternalIP.IP,
LinkIndex: privIface, LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[1], Dst: peers["a"].AllowedIPs[1],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP, Gw: nodes["b"].InternalIP.IP,
LinkIndex: privIface, LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["b"].AllowedIPs[0], Dst: peers["b"].AllowedIPs[0],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP, Gw: nodes["b"].InternalIP.IP,
LinkIndex: privIface, LinkIndex: privIface,
@ -266,24 +266,24 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &nodes["b"].AllowedLocationIPs[0], Dst: nodes["b"].AllowedLocationIPs[0],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(LogicalGranularity, nodes["d"].Name).segments[1].wireGuardIP, Gw: mustTopoForGranularityAndHost(LogicalGranularity, nodes["d"].Name).segments[1].wireGuardIP,
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[0], Dst: peers["a"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[1], Dst: peers["a"].AllowedIPs[1],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["b"].AllowedIPs[0], Dst: peers["b"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
@ -309,7 +309,7 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &nodes["b"].AllowedLocationIPs[0], Dst: nodes["b"].AllowedLocationIPs[0],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(FullGranularity, nodes["a"].Name).segments[1].wireGuardIP, Gw: mustTopoForGranularityAndHost(FullGranularity, nodes["a"].Name).segments[1].wireGuardIP,
LinkIndex: kiloIface, LinkIndex: kiloIface,
@ -337,17 +337,17 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[0], Dst: peers["a"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[1], Dst: peers["a"].AllowedIPs[1],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["b"].AllowedIPs[0], Dst: peers["b"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
@ -394,17 +394,17 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[0], Dst: peers["a"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[1], Dst: peers["a"].AllowedIPs[1],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["b"].AllowedIPs[0], Dst: peers["b"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
@ -444,7 +444,7 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &nodes["b"].AllowedLocationIPs[0], Dst: nodes["b"].AllowedLocationIPs[0],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(FullGranularity, nodes["c"].Name).segments[1].wireGuardIP, Gw: mustTopoForGranularityAndHost(FullGranularity, nodes["c"].Name).segments[1].wireGuardIP,
LinkIndex: kiloIface, LinkIndex: kiloIface,
@ -458,17 +458,17 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[0], Dst: peers["a"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[1], Dst: peers["a"].AllowedIPs[1],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["b"].AllowedIPs[0], Dst: peers["b"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
@ -509,7 +509,7 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &nodes["b"].AllowedLocationIPs[0], Dst: nodes["b"].AllowedLocationIPs[0],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(LogicalGranularity, nodes["a"].Name).segments[1].wireGuardIP, Gw: mustTopoForGranularityAndHost(LogicalGranularity, nodes["a"].Name).segments[1].wireGuardIP,
LinkIndex: kiloIface, LinkIndex: kiloIface,
@ -523,17 +523,17 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[0], Dst: peers["a"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[1], Dst: peers["a"].AllowedIPs[1],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["b"].AllowedIPs[0], Dst: peers["b"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
@ -574,7 +574,7 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &nodes["b"].AllowedLocationIPs[0], Dst: nodes["b"].AllowedLocationIPs[0],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(LogicalGranularity, nodes["a"].Name).segments[1].wireGuardIP, Gw: mustTopoForGranularityAndHost(LogicalGranularity, nodes["a"].Name).segments[1].wireGuardIP,
LinkIndex: kiloIface, LinkIndex: kiloIface,
@ -588,17 +588,17 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[0], Dst: peers["a"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[1], Dst: peers["a"].AllowedIPs[1],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["b"].AllowedIPs[0], Dst: peers["b"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
@ -639,17 +639,17 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[0], Dst: peers["a"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[1], Dst: peers["a"].AllowedIPs[1],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["b"].AllowedIPs[0], Dst: peers["b"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
@ -698,17 +698,17 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[0], Dst: peers["a"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[1], Dst: peers["a"].AllowedIPs[1],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["b"].AllowedIPs[0], Dst: peers["b"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
@ -782,21 +782,21 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[0], Dst: peers["a"].AllowedIPs[0],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP, Gw: nodes["b"].InternalIP.IP,
LinkIndex: privIface, LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[1], Dst: peers["a"].AllowedIPs[1],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP, Gw: nodes["b"].InternalIP.IP,
LinkIndex: privIface, LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["b"].AllowedIPs[0], Dst: peers["b"].AllowedIPs[0],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP, Gw: nodes["b"].InternalIP.IP,
LinkIndex: privIface, LinkIndex: privIface,
@ -868,21 +868,21 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[0], Dst: peers["a"].AllowedIPs[0],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP, Gw: nodes["b"].InternalIP.IP,
LinkIndex: tunlIface, LinkIndex: tunlIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[1], Dst: peers["a"].AllowedIPs[1],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP, Gw: nodes["b"].InternalIP.IP,
LinkIndex: tunlIface, LinkIndex: tunlIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["b"].AllowedIPs[0], Dst: peers["b"].AllowedIPs[0],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP, Gw: nodes["b"].InternalIP.IP,
LinkIndex: tunlIface, LinkIndex: tunlIface,
@ -918,7 +918,7 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &nodes["b"].AllowedLocationIPs[0], Dst: nodes["b"].AllowedLocationIPs[0],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(FullGranularity, nodes["a"].Name).segments[1].wireGuardIP, Gw: mustTopoForGranularityAndHost(FullGranularity, nodes["a"].Name).segments[1].wireGuardIP,
LinkIndex: kiloIface, LinkIndex: kiloIface,
@ -946,17 +946,17 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[0], Dst: peers["a"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[1], Dst: peers["a"].AllowedIPs[1],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["b"].AllowedIPs[0], Dst: peers["b"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
@ -1004,17 +1004,17 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[0], Dst: peers["a"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[1], Dst: peers["a"].AllowedIPs[1],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["b"].AllowedIPs[0], Dst: peers["b"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
@ -1055,7 +1055,7 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &nodes["b"].AllowedLocationIPs[0], Dst: nodes["b"].AllowedLocationIPs[0],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(FullGranularity, nodes["c"].Name).segments[1].wireGuardIP, Gw: mustTopoForGranularityAndHost(FullGranularity, nodes["c"].Name).segments[1].wireGuardIP,
LinkIndex: kiloIface, LinkIndex: kiloIface,
@ -1069,17 +1069,17 @@ func TestRoutes(t *testing.T) {
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[0], Dst: peers["a"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["a"].AllowedIPs[1], Dst: peers["a"].AllowedIPs[1],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },
{ {
Dst: &peers["b"].AllowedIPs[0], Dst: peers["b"].AllowedIPs[0],
LinkIndex: kiloIface, LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}, },

View File

@ -1,4 +1,4 @@
// Copyright 2021 the Kilo authors // Copyright 2019 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -18,13 +18,11 @@ import (
"errors" "errors"
"net" "net"
"sort" "sort"
"time"
"github.com/go-kit/kit/log" "github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level" "github.com/go-kit/kit/log/level"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"github.com/squat/kilo/pkg/wireguard" "github.com/kilo-io/kilo/pkg/wireguard"
) )
const ( const (
@ -35,8 +33,8 @@ const (
// Topology represents the logical structure of the overlay network. // Topology represents the logical structure of the overlay network.
type Topology struct { type Topology struct {
// key is the private key of the node creating the topology. // key is the private key of the node creating the topology.
key wgtypes.Key key []byte
port int port uint32
// Location is the logical location of the local host. // Location is the logical location of the local host.
location string location string
segments []*segment segments []*segment
@ -49,7 +47,7 @@ type Topology struct {
leader bool leader bool
// persistentKeepalive is the interval in seconds of the emission // persistentKeepalive is the interval in seconds of the emission
// of keepalive packets by the local node to its peers. // of keepalive packets by the local node to its peers.
persistentKeepalive time.Duration persistentKeepalive int
// privateIP is the private IP address of the local node. // privateIP is the private IP address of the local node.
privateIP *net.IPNet privateIP *net.IPNet
// subnet is the Pod subnet of the local node. // subnet is the Pod subnet of the local node.
@ -61,16 +59,15 @@ type Topology struct {
// is equal to the Kilo subnet. // is equal to the Kilo subnet.
wireGuardCIDR *net.IPNet wireGuardCIDR *net.IPNet
// discoveredEndpoints is the updated map of valid discovered Endpoints // discoveredEndpoints is the updated map of valid discovered Endpoints
discoveredEndpoints map[string]*net.UDPAddr discoveredEndpoints map[string]*wireguard.Endpoint
logger log.Logger logger log.Logger
} }
// segment represents one logical unit in the topology that is united by one common WireGuard IP.
type segment struct { type segment struct {
allowedIPs []net.IPNet allowedIPs []*net.IPNet
endpoint *wireguard.Endpoint endpoint *wireguard.Endpoint
key wgtypes.Key key []byte
persistentKeepalive time.Duration persistentKeepalive int
// Location is the logical location of this segment. // Location is the logical location of this segment.
location string location string
@ -88,11 +85,11 @@ type segment struct {
// allowedLocationIPs are not part of the cluster and are not peers. // allowedLocationIPs are not part of the cluster and are not peers.
// They are directly routable from nodes within the segment. // They are directly routable from nodes within the segment.
// A classic example is a printer that ought to be routable from other locations. // A classic example is a printer that ought to be routable from other locations.
allowedLocationIPs []net.IPNet allowedLocationIPs []*net.IPNet
} }
// NewTopology creates a new Topology struct from a given set of nodes and peers. // NewTopology creates a new Topology struct from a given set of nodes and peers.
func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Granularity, hostname string, port int, key wgtypes.Key, subnet *net.IPNet, persistentKeepalive time.Duration, logger log.Logger) (*Topology, error) { func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Granularity, hostname string, port uint32, key []byte, subnet *net.IPNet, persistentKeepalive int, logger log.Logger) (*Topology, error) {
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }
@ -123,18 +120,7 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
localLocation = nodeLocationPrefix + hostname localLocation = nodeLocationPrefix + hostname
} }
t := Topology{ t := Topology{key: key, port: port, hostname: hostname, location: localLocation, persistentKeepalive: persistentKeepalive, privateIP: nodes[hostname].InternalIP, subnet: nodes[hostname].Subnet, wireGuardCIDR: subnet, discoveredEndpoints: make(map[string]*wireguard.Endpoint), logger: logger}
key: key,
port: port,
hostname: hostname,
location: localLocation,
persistentKeepalive: persistentKeepalive,
privateIP: nodes[hostname].InternalIP,
subnet: nodes[hostname].Subnet,
wireGuardCIDR: subnet,
discoveredEndpoints: make(map[string]*net.UDPAddr),
logger: logger,
}
for location := range topoMap { for location := range topoMap {
// Sort the location so the result is stable. // Sort the location so the result is stable.
sort.Slice(topoMap[location], func(i, j int) bool { sort.Slice(topoMap[location], func(i, j int) bool {
@ -144,9 +130,9 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
if location == localLocation && topoMap[location][leader].Name == hostname { if location == localLocation && topoMap[location][leader].Name == hostname {
t.leader = true t.leader = true
} }
var allowedIPs []net.IPNet var allowedIPs []*net.IPNet
allowedLocationIPsMap := make(map[string]struct{}) allowedLocationIPsMap := make(map[string]struct{})
var allowedLocationIPs []net.IPNet var allowedLocationIPs []*net.IPNet
var cidrs []*net.IPNet var cidrs []*net.IPNet
var hostnames []string var hostnames []string
var privateIPs []net.IP var privateIPs []net.IP
@ -156,9 +142,7 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
// - the node's WireGuard IP // - the node's WireGuard IP
// - the node's internal IP // - the node's internal IP
// - IPs that were specified by the allowed-location-ips annotation // - IPs that were specified by the allowed-location-ips annotation
if node.Subnet != nil { allowedIPs = append(allowedIPs, node.Subnet)
allowedIPs = append(allowedIPs, *node.Subnet)
}
for _, ip := range node.AllowedLocationIPs { for _, ip := range node.AllowedLocationIPs {
if _, ok := allowedLocationIPsMap[ip.String()]; !ok { if _, ok := allowedLocationIPsMap[ip.String()]; !ok {
allowedLocationIPs = append(allowedLocationIPs, ip) allowedLocationIPs = append(allowedLocationIPs, ip)
@ -166,7 +150,7 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
} }
} }
if node.InternalIP != nil { if node.InternalIP != nil {
allowedIPs = append(allowedIPs, *oneAddressCIDR(node.InternalIP.IP)) allowedIPs = append(allowedIPs, oneAddressCIDR(node.InternalIP.IP))
privateIPs = append(privateIPs, node.InternalIP.IP) privateIPs = append(privateIPs, node.InternalIP.IP)
} }
cidrs = append(cidrs, node.Subnet) cidrs = append(cidrs, node.Subnet)
@ -188,8 +172,6 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
privateIPs: privateIPs, privateIPs: privateIPs,
allowedLocationIPs: allowedLocationIPs, allowedLocationIPs: allowedLocationIPs,
}) })
level.Debug(t.logger).Log("msg", "generated segment", "location", location, "allowedIPs", allowedIPs, "endpoint", topoMap[location][leader].Endpoint, "cidrs", cidrs, "hostnames", hostnames, "leader", leader, "privateIPs", privateIPs, "allowedLocationIPs", allowedLocationIPs)
} }
// Sort the Topology segments so the result is stable. // Sort the Topology segments so the result is stable.
sort.Slice(t.segments, func(i, j int) bool { sort.Slice(t.segments, func(i, j int) bool {
@ -218,7 +200,7 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
return nil, errors.New("failed to allocate an IP address; ran out of IP addresses") return nil, errors.New("failed to allocate an IP address; ran out of IP addresses")
} }
segment.wireGuardIP = ipNet.IP segment.wireGuardIP = ipNet.IP
segment.allowedIPs = append(segment.allowedIPs, *oneAddressCIDR(ipNet.IP)) segment.allowedIPs = append(segment.allowedIPs, oneAddressCIDR(ipNet.IP))
if t.leader && segment.location == t.location { if t.leader && segment.location == t.location {
t.wireGuardCIDR = &net.IPNet{IP: ipNet.IP, Mask: subnet.Mask} t.wireGuardCIDR = &net.IPNet{IP: ipNet.IP, Mask: subnet.Mask}
} }
@ -236,15 +218,14 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
segment.allowedLocationIPs = t.filterAllowedLocationIPs(segment.allowedLocationIPs, segment.location) segment.allowedLocationIPs = t.filterAllowedLocationIPs(segment.allowedLocationIPs, segment.location)
} }
level.Debug(t.logger).Log("msg", "generated topology", "location", t.location, "hostname", t.hostname, "wireGuardIP", t.wireGuardCIDR, "privateIP", t.privateIP, "subnet", t.subnet, "leader", t.leader)
return &t, nil return &t, nil
} }
func intersect(n1, n2 net.IPNet) bool { func intersect(n1, n2 *net.IPNet) bool {
return n1.Contains(n2.IP) || n2.Contains(n1.IP) return n1.Contains(n2.IP) || n2.Contains(n1.IP)
} }
func (t *Topology) filterAllowedLocationIPs(ips []net.IPNet, location string) (ret []net.IPNet) { func (t *Topology) filterAllowedLocationIPs(ips []*net.IPNet, location string) (ret []*net.IPNet) {
CheckIPs: CheckIPs:
for _, ip := range ips { for _, ip := range ips {
for _, s := range t.segments { for _, s := range t.segments {
@ -286,14 +267,14 @@ CheckIPs:
return return
} }
func (t *Topology) updateEndpoint(endpoint *wireguard.Endpoint, key wgtypes.Key, persistentKeepalive *time.Duration) *wireguard.Endpoint { func (t *Topology) updateEndpoint(endpoint *wireguard.Endpoint, key []byte, persistentKeepalive int) *wireguard.Endpoint {
// Do not update non-nat peers // Do not update non-nat peers
if persistentKeepalive == nil || *persistentKeepalive == time.Duration(0) { if persistentKeepalive == 0 {
return endpoint return endpoint
} }
e, ok := t.discoveredEndpoints[key.String()] e, ok := t.discoveredEndpoints[string(key)]
if ok { if ok {
return wireguard.NewEndpointFromUDPAddr(e) return e
} }
return endpoint return endpoint
} }
@ -301,37 +282,30 @@ func (t *Topology) updateEndpoint(endpoint *wireguard.Endpoint, key wgtypes.Key,
// Conf generates a WireGuard configuration file for a given Topology. // Conf generates a WireGuard configuration file for a given Topology.
func (t *Topology) Conf() *wireguard.Conf { func (t *Topology) Conf() *wireguard.Conf {
c := &wireguard.Conf{ c := &wireguard.Conf{
Config: wgtypes.Config{ Interface: &wireguard.Interface{
PrivateKey: &t.key, PrivateKey: t.key,
ListenPort: &t.port, ListenPort: t.port,
ReplacePeers: true,
}, },
} }
for _, s := range t.segments { for _, s := range t.segments {
if s.location == t.location { if s.location == t.location {
continue continue
} }
peer := wireguard.Peer{ peer := &wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ AllowedIPs: append(s.allowedIPs, s.allowedLocationIPs...),
AllowedIPs: append(s.allowedIPs, s.allowedLocationIPs...), Endpoint: t.updateEndpoint(s.endpoint, s.key, s.persistentKeepalive),
PersistentKeepaliveInterval: &t.persistentKeepalive, PersistentKeepalive: t.persistentKeepalive,
PublicKey: s.key, PublicKey: s.key,
ReplaceAllowedIPs: true,
},
Endpoint: t.updateEndpoint(s.endpoint, s.key, &s.persistentKeepalive),
} }
c.Peers = append(c.Peers, peer) c.Peers = append(c.Peers, peer)
} }
for _, p := range t.peers { for _, p := range t.peers {
peer := wireguard.Peer{ peer := &wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ AllowedIPs: p.AllowedIPs,
AllowedIPs: p.AllowedIPs, Endpoint: t.updateEndpoint(p.Endpoint, p.PublicKey, p.PersistentKeepalive),
PersistentKeepaliveInterval: &t.persistentKeepalive, PersistentKeepalive: t.persistentKeepalive,
PresharedKey: p.PresharedKey, PresharedKey: p.PresharedKey,
PublicKey: p.PublicKey, PublicKey: p.PublicKey,
ReplaceAllowedIPs: true,
},
Endpoint: t.updateEndpoint(p.Endpoint, p.PublicKey, p.PersistentKeepaliveInterval),
} }
c.Peers = append(c.Peers, peer) c.Peers = append(c.Peers, peer)
} }
@ -345,39 +319,34 @@ func (t *Topology) AsPeer() *wireguard.Peer {
if s.location != t.location { if s.location != t.location {
continue continue
} }
p := &wireguard.Peer{ return &wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ AllowedIPs: s.allowedIPs,
AllowedIPs: s.allowedIPs, Endpoint: s.endpoint,
PublicKey: s.key, PublicKey: s.key,
},
Endpoint: s.endpoint,
} }
return p
} }
return nil return nil
} }
// PeerConf generates a WireGuard configuration file for a given peer in a Topology. // PeerConf generates a WireGuard configuration file for a given peer in a Topology.
func (t *Topology) PeerConf(name string) *wireguard.Conf { func (t *Topology) PeerConf(name string) *wireguard.Conf {
var pka *time.Duration var pka int
var psk *wgtypes.Key var psk []byte
for i := range t.peers { for i := range t.peers {
if t.peers[i].Name == name { if t.peers[i].Name == name {
pka = t.peers[i].PersistentKeepaliveInterval pka = t.peers[i].PersistentKeepalive
psk = t.peers[i].PresharedKey psk = t.peers[i].PresharedKey
break break
} }
} }
c := &wireguard.Conf{} c := &wireguard.Conf{}
for _, s := range t.segments { for _, s := range t.segments {
peer := wireguard.Peer{ peer := &wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ AllowedIPs: s.allowedIPs,
AllowedIPs: append(s.allowedIPs, s.allowedLocationIPs...), Endpoint: s.endpoint,
PersistentKeepaliveInterval: pka, PersistentKeepalive: pka,
PresharedKey: psk, PresharedKey: psk,
PublicKey: s.key, PublicKey: s.key,
},
Endpoint: t.updateEndpoint(s.endpoint, s.key, &s.persistentKeepalive),
} }
c.Peers = append(c.Peers, peer) c.Peers = append(c.Peers, peer)
} }
@ -385,13 +354,11 @@ func (t *Topology) PeerConf(name string) *wireguard.Conf {
if t.peers[i].Name == name { if t.peers[i].Name == name {
continue continue
} }
peer := wireguard.Peer{ peer := &wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ AllowedIPs: t.peers[i].AllowedIPs,
AllowedIPs: t.peers[i].AllowedIPs, PersistentKeepalive: pka,
PersistentKeepaliveInterval: pka, PublicKey: t.peers[i].PublicKey,
PublicKey: t.peers[i].PublicKey, Endpoint: t.peers[i].Endpoint,
},
Endpoint: t.updateEndpoint(t.peers[i].Endpoint, t.peers[i].PublicKey, t.peers[i].PersistentKeepaliveInterval),
} }
c.Peers = append(c.Peers, peer) c.Peers = append(c.Peers, peer)
} }
@ -412,13 +379,13 @@ func findLeader(nodes []*Node) int {
var leaders, public []int var leaders, public []int
for i := range nodes { for i := range nodes {
if nodes[i].Leader { if nodes[i].Leader {
if isPublic(nodes[i].Endpoint.IP()) { if isPublic(nodes[i].Endpoint.IP) {
return i return i
} }
leaders = append(leaders, i) leaders = append(leaders, i)
} }
if nodes[i].Endpoint.IP() != nil && isPublic(nodes[i].Endpoint.IP()) { if isPublic(nodes[i].Endpoint.IP) {
public = append(public, i) public = append(public, i)
} }
} }
@ -438,12 +405,10 @@ func deduplicatePeerIPs(peers []*Peer) []*Peer {
p := Peer{ p := Peer{
Name: peer.Name, Name: peer.Name,
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ Endpoint: peer.Endpoint,
PersistentKeepaliveInterval: peer.PersistentKeepaliveInterval, PersistentKeepalive: peer.PersistentKeepalive,
PresharedKey: peer.PresharedKey, PresharedKey: peer.PresharedKey,
PublicKey: peer.PublicKey, PublicKey: peer.PublicKey,
},
Endpoint: peer.Endpoint,
}, },
} }
for _, ip := range peer.AllowedIPs { for _, ip := range peer.AllowedIPs {

View File

@ -16,35 +16,30 @@ package mesh
import ( import (
"net" "net"
"strings"
"testing" "testing"
"time"
"github.com/go-kit/kit/log" "github.com/go-kit/kit/log"
"github.com/kylelemons/godebug/pretty" "github.com/kylelemons/godebug/pretty"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"github.com/squat/kilo/pkg/wireguard" "github.com/kilo-io/kilo/pkg/wireguard"
) )
func mustParseCIDR(s string) (r net.IPNet) { func allowedIPs(ips ...string) string {
return strings.Join(ips, ", ")
}
func mustParseCIDR(s string) (r *net.IPNet) {
if _, ip, err := net.ParseCIDR(s); err != nil { if _, ip, err := net.ParseCIDR(s); err != nil {
panic("failed to parse CIDR") panic("failed to parse CIDR")
} else { } else {
r = *ip r = ip
} }
return return
} }
var ( func setup(t *testing.T) (map[string]*Node, map[string]*Peer, []byte, uint32) {
key1 = wgtypes.Key{'k', 'e', 'y', '1'} key := []byte("private")
key2 = wgtypes.Key{'k', 'e', 'y', '2'}
key3 = wgtypes.Key{'k', 'e', 'y', '3'}
key4 = wgtypes.Key{'k', 'e', 'y', '4'}
key5 = wgtypes.Key{'k', 'e', 'y', '5'}
)
func setup(t *testing.T) (map[string]*Node, map[string]*Peer, wgtypes.Key, int) {
key := wgtypes.Key{'p', 'r', 'i', 'v'}
e1 := &net.IPNet{IP: net.ParseIP("10.1.0.1").To4(), Mask: net.CIDRMask(16, 32)} e1 := &net.IPNet{IP: net.ParseIP("10.1.0.1").To4(), Mask: net.CIDRMask(16, 32)}
e2 := &net.IPNet{IP: net.ParseIP("10.1.0.2").To4(), Mask: net.CIDRMask(16, 32)} e2 := &net.IPNet{IP: net.ParseIP("10.1.0.2").To4(), Mask: net.CIDRMask(16, 32)}
e3 := &net.IPNet{IP: net.ParseIP("10.1.0.3").To4(), Mask: net.CIDRMask(16, 32)} e3 := &net.IPNet{IP: net.ParseIP("10.1.0.3").To4(), Mask: net.CIDRMask(16, 32)}
@ -55,63 +50,62 @@ func setup(t *testing.T) (map[string]*Node, map[string]*Peer, wgtypes.Key, int)
nodes := map[string]*Node{ nodes := map[string]*Node{
"a": { "a": {
Name: "a", Name: "a",
Endpoint: wireguard.NewEndpoint(e1.IP, DefaultKiloPort), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e1.IP}, Port: DefaultKiloPort},
InternalIP: i1, InternalIP: i1,
Location: "1", Location: "1",
Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)}, Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)},
Key: key1, Key: []byte("key1"),
PersistentKeepalive: 25, PersistentKeepalive: 25,
}, },
"b": { "b": {
Name: "b", Name: "b",
Endpoint: wireguard.NewEndpoint(e2.IP, DefaultKiloPort), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e2.IP}, Port: DefaultKiloPort},
InternalIP: i1, InternalIP: i1,
Location: "2", Location: "2",
Subnet: &net.IPNet{IP: net.ParseIP("10.2.2.0"), Mask: net.CIDRMask(24, 32)}, Subnet: &net.IPNet{IP: net.ParseIP("10.2.2.0"), Mask: net.CIDRMask(24, 32)},
Key: key2, Key: []byte("key2"),
AllowedLocationIPs: []net.IPNet{*i3}, AllowedLocationIPs: []*net.IPNet{i3},
}, },
"c": { "c": {
Name: "c", Name: "c",
Endpoint: wireguard.NewEndpoint(e3.IP, DefaultKiloPort), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e3.IP}, Port: DefaultKiloPort},
InternalIP: i2, InternalIP: i2,
// Same location as node b. // Same location as node b.
Location: "2", Location: "2",
Subnet: &net.IPNet{IP: net.ParseIP("10.2.3.0"), Mask: net.CIDRMask(24, 32)}, Subnet: &net.IPNet{IP: net.ParseIP("10.2.3.0"), Mask: net.CIDRMask(24, 32)},
Key: key3, Key: []byte("key3"),
}, },
"d": { "d": {
Name: "d", Name: "d",
Endpoint: wireguard.NewEndpoint(e4.IP, DefaultKiloPort), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e4.IP}, Port: DefaultKiloPort},
// Same location as node a, but without private IP // Same location as node a, but without private IP
Location: "1", Location: "1",
Subnet: &net.IPNet{IP: net.ParseIP("10.2.4.0"), Mask: net.CIDRMask(24, 32)}, Subnet: &net.IPNet{IP: net.ParseIP("10.2.4.0"), Mask: net.CIDRMask(24, 32)},
Key: key4, Key: []byte("key4"),
}, },
} }
peers := map[string]*Peer{ peers := map[string]*Peer{
"a": { "a": {
Name: "a", Name: "a",
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ AllowedIPs: []*net.IPNet{
AllowedIPs: []net.IPNet{ {IP: net.ParseIP("10.5.0.1"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.5.0.1"), Mask: net.CIDRMask(24, 32)}, {IP: net.ParseIP("10.5.0.2"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.5.0.2"), Mask: net.CIDRMask(24, 32)},
},
PublicKey: key4,
}, },
PublicKey: []byte("key4"),
}, },
}, },
"b": { "b": {
Name: "b", Name: "b",
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ AllowedIPs: []*net.IPNet{
AllowedIPs: []net.IPNet{ {IP: net.ParseIP("10.5.0.3"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.5.0.3"), Mask: net.CIDRMask(24, 32)},
},
PublicKey: key5,
}, },
Endpoint: wireguard.NewEndpoint(net.ParseIP("192.168.0.1"), DefaultKiloPort), Endpoint: &wireguard.Endpoint{
DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("192.168.0.1")},
Port: DefaultKiloPort,
},
PublicKey: []byte("key5"),
}, },
}, },
} }
@ -144,7 +138,7 @@ func TestNewTopology(t *testing.T) {
wireGuardCIDR: &net.IPNet{IP: w1, Mask: net.CIDRMask(16, 32)}, wireGuardCIDR: &net.IPNet{IP: w1, Mask: net.CIDRMask(16, 32)},
segments: []*segment{ segments: []*segment{
{ {
allowedIPs: []net.IPNet{*nodes["a"].Subnet, *nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint, endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key, key: nodes["a"].Key,
persistentKeepalive: nodes["a"].PersistentKeepalive, persistentKeepalive: nodes["a"].PersistentKeepalive,
@ -155,7 +149,7 @@ func TestNewTopology(t *testing.T) {
wireGuardIP: w1, wireGuardIP: w1,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["b"].Subnet, *nodes["b"].InternalIP, *nodes["c"].Subnet, *nodes["c"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["b"].Endpoint, endpoint: nodes["b"].Endpoint,
key: nodes["b"].Key, key: nodes["b"].Key,
persistentKeepalive: nodes["b"].PersistentKeepalive, persistentKeepalive: nodes["b"].PersistentKeepalive,
@ -167,7 +161,7 @@ func TestNewTopology(t *testing.T) {
allowedLocationIPs: nodes["b"].AllowedLocationIPs, allowedLocationIPs: nodes["b"].AllowedLocationIPs,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["d"].Subnet, {IP: w3, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["d"].Subnet, {IP: w3, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["d"].Endpoint, endpoint: nodes["d"].Endpoint,
key: nodes["d"].Key, key: nodes["d"].Key,
persistentKeepalive: nodes["d"].PersistentKeepalive, persistentKeepalive: nodes["d"].PersistentKeepalive,
@ -195,7 +189,7 @@ func TestNewTopology(t *testing.T) {
wireGuardCIDR: &net.IPNet{IP: w2, Mask: net.CIDRMask(16, 32)}, wireGuardCIDR: &net.IPNet{IP: w2, Mask: net.CIDRMask(16, 32)},
segments: []*segment{ segments: []*segment{
{ {
allowedIPs: []net.IPNet{*nodes["a"].Subnet, *nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint, endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key, key: nodes["a"].Key,
persistentKeepalive: nodes["a"].PersistentKeepalive, persistentKeepalive: nodes["a"].PersistentKeepalive,
@ -206,7 +200,7 @@ func TestNewTopology(t *testing.T) {
wireGuardIP: w1, wireGuardIP: w1,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["b"].Subnet, *nodes["b"].InternalIP, *nodes["c"].Subnet, *nodes["c"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["b"].Endpoint, endpoint: nodes["b"].Endpoint,
key: nodes["b"].Key, key: nodes["b"].Key,
persistentKeepalive: nodes["b"].PersistentKeepalive, persistentKeepalive: nodes["b"].PersistentKeepalive,
@ -218,7 +212,7 @@ func TestNewTopology(t *testing.T) {
allowedLocationIPs: nodes["b"].AllowedLocationIPs, allowedLocationIPs: nodes["b"].AllowedLocationIPs,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["d"].Subnet, {IP: w3, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["d"].Subnet, {IP: w3, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["d"].Endpoint, endpoint: nodes["d"].Endpoint,
key: nodes["d"].Key, key: nodes["d"].Key,
persistentKeepalive: nodes["d"].PersistentKeepalive, persistentKeepalive: nodes["d"].PersistentKeepalive,
@ -246,7 +240,7 @@ func TestNewTopology(t *testing.T) {
wireGuardCIDR: DefaultKiloSubnet, wireGuardCIDR: DefaultKiloSubnet,
segments: []*segment{ segments: []*segment{
{ {
allowedIPs: []net.IPNet{*nodes["a"].Subnet, *nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint, endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key, key: nodes["a"].Key,
persistentKeepalive: nodes["a"].PersistentKeepalive, persistentKeepalive: nodes["a"].PersistentKeepalive,
@ -257,7 +251,7 @@ func TestNewTopology(t *testing.T) {
wireGuardIP: w1, wireGuardIP: w1,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["b"].Subnet, *nodes["b"].InternalIP, *nodes["c"].Subnet, *nodes["c"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["b"].Endpoint, endpoint: nodes["b"].Endpoint,
key: nodes["b"].Key, key: nodes["b"].Key,
persistentKeepalive: nodes["b"].PersistentKeepalive, persistentKeepalive: nodes["b"].PersistentKeepalive,
@ -269,7 +263,7 @@ func TestNewTopology(t *testing.T) {
allowedLocationIPs: nodes["b"].AllowedLocationIPs, allowedLocationIPs: nodes["b"].AllowedLocationIPs,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["d"].Subnet, {IP: w3, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["d"].Subnet, {IP: w3, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["d"].Endpoint, endpoint: nodes["d"].Endpoint,
key: nodes["d"].Key, key: nodes["d"].Key,
persistentKeepalive: nodes["d"].PersistentKeepalive, persistentKeepalive: nodes["d"].PersistentKeepalive,
@ -297,7 +291,7 @@ func TestNewTopology(t *testing.T) {
wireGuardCIDR: &net.IPNet{IP: w1, Mask: net.CIDRMask(16, 32)}, wireGuardCIDR: &net.IPNet{IP: w1, Mask: net.CIDRMask(16, 32)},
segments: []*segment{ segments: []*segment{
{ {
allowedIPs: []net.IPNet{*nodes["a"].Subnet, *nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint, endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key, key: nodes["a"].Key,
persistentKeepalive: nodes["a"].PersistentKeepalive, persistentKeepalive: nodes["a"].PersistentKeepalive,
@ -308,7 +302,7 @@ func TestNewTopology(t *testing.T) {
wireGuardIP: w1, wireGuardIP: w1,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["b"].Subnet, *nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["b"].Endpoint, endpoint: nodes["b"].Endpoint,
key: nodes["b"].Key, key: nodes["b"].Key,
persistentKeepalive: nodes["b"].PersistentKeepalive, persistentKeepalive: nodes["b"].PersistentKeepalive,
@ -320,7 +314,7 @@ func TestNewTopology(t *testing.T) {
allowedLocationIPs: nodes["b"].AllowedLocationIPs, allowedLocationIPs: nodes["b"].AllowedLocationIPs,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["c"].Subnet, *nodes["c"].InternalIP, {IP: w3, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w3, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["c"].Endpoint, endpoint: nodes["c"].Endpoint,
key: nodes["c"].Key, key: nodes["c"].Key,
persistentKeepalive: nodes["c"].PersistentKeepalive, persistentKeepalive: nodes["c"].PersistentKeepalive,
@ -331,7 +325,7 @@ func TestNewTopology(t *testing.T) {
wireGuardIP: w3, wireGuardIP: w3,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["d"].Subnet, {IP: w4, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["d"].Subnet, {IP: w4, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["d"].Endpoint, endpoint: nodes["d"].Endpoint,
key: nodes["d"].Key, key: nodes["d"].Key,
persistentKeepalive: nodes["d"].PersistentKeepalive, persistentKeepalive: nodes["d"].PersistentKeepalive,
@ -359,7 +353,7 @@ func TestNewTopology(t *testing.T) {
wireGuardCIDR: &net.IPNet{IP: w2, Mask: net.CIDRMask(16, 32)}, wireGuardCIDR: &net.IPNet{IP: w2, Mask: net.CIDRMask(16, 32)},
segments: []*segment{ segments: []*segment{
{ {
allowedIPs: []net.IPNet{*nodes["a"].Subnet, *nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint, endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key, key: nodes["a"].Key,
persistentKeepalive: nodes["a"].PersistentKeepalive, persistentKeepalive: nodes["a"].PersistentKeepalive,
@ -370,7 +364,7 @@ func TestNewTopology(t *testing.T) {
wireGuardIP: w1, wireGuardIP: w1,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["b"].Subnet, *nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["b"].Endpoint, endpoint: nodes["b"].Endpoint,
key: nodes["b"].Key, key: nodes["b"].Key,
persistentKeepalive: nodes["b"].PersistentKeepalive, persistentKeepalive: nodes["b"].PersistentKeepalive,
@ -382,7 +376,7 @@ func TestNewTopology(t *testing.T) {
allowedLocationIPs: nodes["b"].AllowedLocationIPs, allowedLocationIPs: nodes["b"].AllowedLocationIPs,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["c"].Subnet, *nodes["c"].InternalIP, {IP: w3, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w3, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["c"].Endpoint, endpoint: nodes["c"].Endpoint,
key: nodes["c"].Key, key: nodes["c"].Key,
persistentKeepalive: nodes["c"].PersistentKeepalive, persistentKeepalive: nodes["c"].PersistentKeepalive,
@ -393,7 +387,7 @@ func TestNewTopology(t *testing.T) {
wireGuardIP: w3, wireGuardIP: w3,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["d"].Subnet, {IP: w4, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["d"].Subnet, {IP: w4, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["d"].Endpoint, endpoint: nodes["d"].Endpoint,
key: nodes["d"].Key, key: nodes["d"].Key,
persistentKeepalive: nodes["d"].PersistentKeepalive, persistentKeepalive: nodes["d"].PersistentKeepalive,
@ -421,7 +415,7 @@ func TestNewTopology(t *testing.T) {
wireGuardCIDR: &net.IPNet{IP: w3, Mask: net.CIDRMask(16, 32)}, wireGuardCIDR: &net.IPNet{IP: w3, Mask: net.CIDRMask(16, 32)},
segments: []*segment{ segments: []*segment{
{ {
allowedIPs: []net.IPNet{*nodes["a"].Subnet, *nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint, endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key, key: nodes["a"].Key,
persistentKeepalive: nodes["a"].PersistentKeepalive, persistentKeepalive: nodes["a"].PersistentKeepalive,
@ -432,7 +426,7 @@ func TestNewTopology(t *testing.T) {
wireGuardIP: w1, wireGuardIP: w1,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["b"].Subnet, *nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["b"].Endpoint, endpoint: nodes["b"].Endpoint,
key: nodes["b"].Key, key: nodes["b"].Key,
persistentKeepalive: nodes["b"].PersistentKeepalive, persistentKeepalive: nodes["b"].PersistentKeepalive,
@ -444,7 +438,7 @@ func TestNewTopology(t *testing.T) {
allowedLocationIPs: nodes["b"].AllowedLocationIPs, allowedLocationIPs: nodes["b"].AllowedLocationIPs,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["c"].Subnet, *nodes["c"].InternalIP, {IP: w3, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w3, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["c"].Endpoint, endpoint: nodes["c"].Endpoint,
key: nodes["c"].Key, key: nodes["c"].Key,
persistentKeepalive: nodes["c"].PersistentKeepalive, persistentKeepalive: nodes["c"].PersistentKeepalive,
@ -455,7 +449,7 @@ func TestNewTopology(t *testing.T) {
wireGuardIP: w3, wireGuardIP: w3,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["d"].Subnet, {IP: w4, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["d"].Subnet, {IP: w4, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["d"].Endpoint, endpoint: nodes["d"].Endpoint,
key: nodes["d"].Key, key: nodes["d"].Key,
persistentKeepalive: nodes["d"].PersistentKeepalive, persistentKeepalive: nodes["d"].PersistentKeepalive,
@ -483,7 +477,7 @@ func TestNewTopology(t *testing.T) {
wireGuardCIDR: &net.IPNet{IP: w4, Mask: net.CIDRMask(16, 32)}, wireGuardCIDR: &net.IPNet{IP: w4, Mask: net.CIDRMask(16, 32)},
segments: []*segment{ segments: []*segment{
{ {
allowedIPs: []net.IPNet{*nodes["a"].Subnet, *nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint, endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key, key: nodes["a"].Key,
persistentKeepalive: nodes["a"].PersistentKeepalive, persistentKeepalive: nodes["a"].PersistentKeepalive,
@ -494,7 +488,7 @@ func TestNewTopology(t *testing.T) {
wireGuardIP: w1, wireGuardIP: w1,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["b"].Subnet, *nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["b"].Endpoint, endpoint: nodes["b"].Endpoint,
key: nodes["b"].Key, key: nodes["b"].Key,
persistentKeepalive: nodes["b"].PersistentKeepalive, persistentKeepalive: nodes["b"].PersistentKeepalive,
@ -506,7 +500,7 @@ func TestNewTopology(t *testing.T) {
allowedLocationIPs: nodes["b"].AllowedLocationIPs, allowedLocationIPs: nodes["b"].AllowedLocationIPs,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["c"].Subnet, *nodes["c"].InternalIP, {IP: w3, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w3, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["c"].Endpoint, endpoint: nodes["c"].Endpoint,
key: nodes["c"].Key, key: nodes["c"].Key,
persistentKeepalive: nodes["c"].PersistentKeepalive, persistentKeepalive: nodes["c"].PersistentKeepalive,
@ -517,7 +511,7 @@ func TestNewTopology(t *testing.T) {
wireGuardIP: w3, wireGuardIP: w3,
}, },
{ {
allowedIPs: []net.IPNet{*nodes["d"].Subnet, {IP: w4, Mask: net.CIDRMask(32, 32)}}, allowedIPs: []*net.IPNet{nodes["d"].Subnet, {IP: w4, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["d"].Endpoint, endpoint: nodes["d"].Endpoint,
key: nodes["d"].Key, key: nodes["d"].Key,
persistentKeepalive: nodes["d"].PersistentKeepalive, persistentKeepalive: nodes["d"].PersistentKeepalive,
@ -545,7 +539,7 @@ func TestNewTopology(t *testing.T) {
} }
} }
func mustTopo(t *testing.T, nodes map[string]*Node, peers map[string]*Peer, granularity Granularity, hostname string, port int, key wgtypes.Key, subnet *net.IPNet, persistentKeepalive time.Duration) *Topology { func mustTopo(t *testing.T, nodes map[string]*Node, peers map[string]*Peer, granularity Granularity, hostname string, port uint32, key []byte, subnet *net.IPNet, persistentKeepalive int) *Topology {
topo, err := NewTopology(nodes, peers, granularity, hostname, port, key, subnet, persistentKeepalive, nil) topo, err := NewTopology(nodes, peers, granularity, hostname, port, key, subnet, persistentKeepalive, nil)
if err != nil { if err != nil {
t.Errorf("failed to generate Topology: %v", err) t.Errorf("failed to generate Topology: %v", err)
@ -553,6 +547,211 @@ func mustTopo(t *testing.T, nodes map[string]*Node, peers map[string]*Peer, gran
return topo return topo
} }
func TestConf(t *testing.T) {
nodes, peers, key, port := setup(t)
for _, tc := range []struct {
name string
topology *Topology
result string
}{
{
name: "logical from a",
topology: mustTopo(t, nodes, peers, LogicalGranularity, nodes["a"].Name, port, key, DefaultKiloSubnet, nodes["a"].PersistentKeepalive),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
[Peer]
PublicKey = key2
Endpoint = 10.1.0.2:51820
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32, 192.168.178.3/32
PersistentKeepalive = 25
[Peer]
PublicKey = key4
Endpoint = 10.1.0.4:51820
AllowedIPs = 10.2.4.0/24, 10.4.0.3/32
PersistentKeepalive = 25
[Peer]
PublicKey = key4
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
PersistentKeepalive = 25
[Peer]
PublicKey = key5
Endpoint = 192.168.0.1:51820
AllowedIPs = 10.5.0.3/24
PersistentKeepalive = 25
`,
},
{
name: "logical from b",
topology: mustTopo(t, nodes, peers, LogicalGranularity, nodes["b"].Name, port, key, DefaultKiloSubnet, nodes["b"].PersistentKeepalive),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
[Peer]
PublicKey = key1
Endpoint = 10.1.0.1:51820
AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32
[Peer]
PublicKey = key4
Endpoint = 10.1.0.4:51820
AllowedIPs = 10.2.4.0/24, 10.4.0.3/32
[Peer]
PublicKey = key4
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
[Peer]
PublicKey = key5
Endpoint = 192.168.0.1:51820
AllowedIPs = 10.5.0.3/24
`,
},
{
name: "logical from c",
topology: mustTopo(t, nodes, peers, LogicalGranularity, nodes["c"].Name, port, key, DefaultKiloSubnet, nodes["c"].PersistentKeepalive),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
[Peer]
PublicKey = key1
Endpoint = 10.1.0.1:51820
AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32
[Peer]
PublicKey = key4
Endpoint = 10.1.0.4:51820
AllowedIPs = 10.2.4.0/24, 10.4.0.3/32
[Peer]
PublicKey = key4
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
[Peer]
PublicKey = key5
Endpoint = 192.168.0.1:51820
AllowedIPs = 10.5.0.3/24
`,
},
{
name: "full from a",
topology: mustTopo(t, nodes, peers, FullGranularity, nodes["a"].Name, port, key, DefaultKiloSubnet, nodes["a"].PersistentKeepalive),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
[Peer]
PublicKey = key2
Endpoint = 10.1.0.2:51820
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.4.0.2/32, 192.168.178.3/32
PersistentKeepalive = 25
[Peer]
PublicKey = key3
Endpoint = 10.1.0.3:51820
AllowedIPs = 10.2.3.0/24, 192.168.0.2/32, 10.4.0.3/32
PersistentKeepalive = 25
[Peer]
PublicKey = key4
Endpoint = 10.1.0.4:51820
AllowedIPs = 10.2.4.0/24, 10.4.0.4/32
PersistentKeepalive = 25
[Peer]
PublicKey = key4
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
PersistentKeepalive = 25
[Peer]
PublicKey = key5
Endpoint = 192.168.0.1:51820
AllowedIPs = 10.5.0.3/24
PersistentKeepalive = 25
`,
},
{
name: "full from b",
topology: mustTopo(t, nodes, peers, FullGranularity, nodes["b"].Name, port, key, DefaultKiloSubnet, nodes["b"].PersistentKeepalive),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
[Peer]
PublicKey = key1
Endpoint = 10.1.0.1:51820
AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32
[Peer]
PublicKey = key3
Endpoint = 10.1.0.3:51820
AllowedIPs = 10.2.3.0/24, 192.168.0.2/32, 10.4.0.3/32
[Peer]
PublicKey = key4
Endpoint = 10.1.0.4:51820
AllowedIPs = 10.2.4.0/24, 10.4.0.4/32
[Peer]
PublicKey = key4
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
[Peer]
PublicKey = key5
Endpoint = 192.168.0.1:51820
AllowedIPs = 10.5.0.3/24
`,
},
{
name: "full from c",
topology: mustTopo(t, nodes, peers, FullGranularity, nodes["c"].Name, port, key, DefaultKiloSubnet, nodes["c"].PersistentKeepalive),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
[Peer]
PublicKey = key1
Endpoint = 10.1.0.1:51820
AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32
[Peer]
PublicKey = key2
Endpoint = 10.1.0.2:51820
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.4.0.2/32, 192.168.178.3/32
[Peer]
PublicKey = key4
Endpoint = 10.1.0.4:51820
AllowedIPs = 10.2.4.0/24, 10.4.0.4/32
[Peer]
PublicKey = key4
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
[Peer]
PublicKey = key5
Endpoint = 192.168.0.1:51820
AllowedIPs = 10.5.0.3/24
`,
},
} {
conf := tc.topology.Conf()
if !conf.Equal(wireguard.Parse([]byte(tc.result))) {
buf, err := conf.Bytes()
if err != nil {
t.Errorf("test case %q: failed to render conf: %v", tc.name, err)
}
t.Errorf("test case %q: expected %s got %s", tc.name, tc.result, string(buf))
}
}
}
func TestFindLeader(t *testing.T) { func TestFindLeader(t *testing.T) {
ip, e1, err := net.ParseCIDR("10.0.0.1/32") ip, e1, err := net.ParseCIDR("10.0.0.1/32")
if err != nil { if err != nil {
@ -568,24 +767,24 @@ func TestFindLeader(t *testing.T) {
nodes := []*Node{ nodes := []*Node{
{ {
Name: "a", Name: "a",
Endpoint: wireguard.NewEndpoint(e1.IP, DefaultKiloPort), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e1.IP}, Port: DefaultKiloPort},
}, },
{ {
Name: "b", Name: "b",
Endpoint: wireguard.NewEndpoint(e2.IP, DefaultKiloPort), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e2.IP}, Port: DefaultKiloPort},
}, },
{ {
Name: "c", Name: "c",
Endpoint: wireguard.NewEndpoint(e2.IP, DefaultKiloPort), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e2.IP}, Port: DefaultKiloPort},
}, },
{ {
Name: "d", Name: "d",
Endpoint: wireguard.NewEndpoint(e1.IP, DefaultKiloPort), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e1.IP}, Port: DefaultKiloPort},
Leader: true, Leader: true,
}, },
{ {
Name: "2", Name: "2",
Endpoint: wireguard.NewEndpoint(e2.IP, DefaultKiloPort), Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e2.IP}, Port: DefaultKiloPort},
Leader: true, Leader: true,
}, },
} }
@ -641,38 +840,31 @@ func TestDeduplicatePeerIPs(t *testing.T) {
p1 := &Peer{ p1 := &Peer{
Name: "1", Name: "1",
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ PublicKey: []byte("key1"),
AllowedIPs: []*net.IPNet{
PublicKey: key1, {IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
AllowedIPs: []net.IPNet{ {IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)},
},
}, },
}, },
} }
p2 := &Peer{ p2 := &Peer{
Name: "2", Name: "2",
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ PublicKey: []byte("key2"),
PublicKey: key2, AllowedIPs: []*net.IPNet{
AllowedIPs: []net.IPNet{ {IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)}, {IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
},
}, },
}, },
} }
p3 := &Peer{ p3 := &Peer{
Name: "3", Name: "3",
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ PublicKey: []byte("key3"),
PublicKey: key3, AllowedIPs: []*net.IPNet{
AllowedIPs: []net.IPNet{ {IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)}, {IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)}, {IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
},
}, },
}, },
} }
@ -680,12 +872,10 @@ func TestDeduplicatePeerIPs(t *testing.T) {
p4 := &Peer{ p4 := &Peer{
Name: "4", Name: "4",
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ PublicKey: []byte("key4"),
PublicKey: key4, AllowedIPs: []*net.IPNet{
AllowedIPs: []net.IPNet{ {IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)}, {IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
},
}, },
}, },
} }
@ -708,11 +898,9 @@ func TestDeduplicatePeerIPs(t *testing.T) {
{ {
Name: "2", Name: "2",
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ PublicKey: []byte("key2"),
PublicKey: key2, AllowedIPs: []*net.IPNet{
AllowedIPs: []net.IPNet{ {IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
},
}, },
}, },
}, },
@ -726,11 +914,9 @@ func TestDeduplicatePeerIPs(t *testing.T) {
{ {
Name: "1", Name: "1",
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ PublicKey: []byte("key1"),
PublicKey: key1, AllowedIPs: []*net.IPNet{
AllowedIPs: []net.IPNet{ {IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)},
},
}, },
}, },
}, },
@ -744,25 +930,19 @@ func TestDeduplicatePeerIPs(t *testing.T) {
{ {
Name: "2", Name: "2",
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ PublicKey: []byte("key2"),
PublicKey: key2,
},
}, },
}, },
{ {
Name: "1", Name: "1",
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ PublicKey: []byte("key1"),
PublicKey: key1,
},
}, },
}, },
{ {
Name: "4", Name: "4",
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ PublicKey: []byte("key4"),
PublicKey: key4,
},
}, },
}, },
}, },
@ -774,23 +954,19 @@ func TestDeduplicatePeerIPs(t *testing.T) {
{ {
Name: "4", Name: "4",
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ PublicKey: []byte("key4"),
PublicKey: key4, AllowedIPs: []*net.IPNet{
AllowedIPs: []net.IPNet{ {IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
},
}, },
}, },
}, },
{ {
Name: "1", Name: "1",
Peer: wireguard.Peer{ Peer: wireguard.Peer{
PeerConfig: wgtypes.PeerConfig{ PublicKey: []byte("key1"),
PublicKey: key1, AllowedIPs: []*net.IPNet{
AllowedIPs: []net.IPNet{ {IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)}, {IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)},
},
}, },
}, },
}, },
@ -809,12 +985,12 @@ func TestFilterAllowedIPs(t *testing.T) {
topo := mustTopo(t, nodes, peers, LogicalGranularity, nodes["a"].Name, port, key, DefaultKiloSubnet, nodes["a"].PersistentKeepalive) topo := mustTopo(t, nodes, peers, LogicalGranularity, nodes["a"].Name, port, key, DefaultKiloSubnet, nodes["a"].PersistentKeepalive)
for _, tc := range []struct { for _, tc := range []struct {
name string name string
allowedLocationIPs map[int][]net.IPNet allowedLocationIPs map[int][]*net.IPNet
result map[int][]net.IPNet result map[int][]*net.IPNet
}{ }{
{ {
name: "nothing to filter", name: "nothing to filter",
allowedLocationIPs: map[int][]net.IPNet{ allowedLocationIPs: map[int][]*net.IPNet{
0: { 0: {
mustParseCIDR("192.168.178.4/32"), mustParseCIDR("192.168.178.4/32"),
}, },
@ -826,7 +1002,7 @@ func TestFilterAllowedIPs(t *testing.T) {
mustParseCIDR("192.168.178.7/32"), mustParseCIDR("192.168.178.7/32"),
}, },
}, },
result: map[int][]net.IPNet{ result: map[int][]*net.IPNet{
0: { 0: {
mustParseCIDR("192.168.178.4/32"), mustParseCIDR("192.168.178.4/32"),
}, },
@ -841,7 +1017,7 @@ func TestFilterAllowedIPs(t *testing.T) {
}, },
{ {
name: "intersections between segments", name: "intersections between segments",
allowedLocationIPs: map[int][]net.IPNet{ allowedLocationIPs: map[int][]*net.IPNet{
0: { 0: {
mustParseCIDR("192.168.178.4/32"), mustParseCIDR("192.168.178.4/32"),
mustParseCIDR("192.168.178.8/32"), mustParseCIDR("192.168.178.8/32"),
@ -855,7 +1031,7 @@ func TestFilterAllowedIPs(t *testing.T) {
mustParseCIDR("192.168.178.4/32"), mustParseCIDR("192.168.178.4/32"),
}, },
}, },
result: map[int][]net.IPNet{ result: map[int][]*net.IPNet{
0: { 0: {
mustParseCIDR("192.168.178.8/32"), mustParseCIDR("192.168.178.8/32"),
}, },
@ -871,7 +1047,7 @@ func TestFilterAllowedIPs(t *testing.T) {
}, },
{ {
name: "intersections with wireGuardCIDR", name: "intersections with wireGuardCIDR",
allowedLocationIPs: map[int][]net.IPNet{ allowedLocationIPs: map[int][]*net.IPNet{
0: { 0: {
mustParseCIDR("10.4.0.1/32"), mustParseCIDR("10.4.0.1/32"),
mustParseCIDR("192.168.178.8/32"), mustParseCIDR("192.168.178.8/32"),
@ -884,7 +1060,7 @@ func TestFilterAllowedIPs(t *testing.T) {
mustParseCIDR("192.168.178.7/32"), mustParseCIDR("192.168.178.7/32"),
}, },
}, },
result: map[int][]net.IPNet{ result: map[int][]*net.IPNet{
0: { 0: {
mustParseCIDR("192.168.178.8/32"), mustParseCIDR("192.168.178.8/32"),
}, },
@ -899,7 +1075,7 @@ func TestFilterAllowedIPs(t *testing.T) {
}, },
{ {
name: "intersections with more than one allowedLocationIPs", name: "intersections with more than one allowedLocationIPs",
allowedLocationIPs: map[int][]net.IPNet{ allowedLocationIPs: map[int][]*net.IPNet{
0: { 0: {
mustParseCIDR("192.168.178.8/32"), mustParseCIDR("192.168.178.8/32"),
}, },
@ -910,7 +1086,7 @@ func TestFilterAllowedIPs(t *testing.T) {
mustParseCIDR("192.168.178.7/24"), mustParseCIDR("192.168.178.7/24"),
}, },
}, },
result: map[int][]net.IPNet{ result: map[int][]*net.IPNet{
0: {}, 0: {},
1: {}, 1: {},
2: { 2: {

View File

@ -15,15 +15,16 @@
package wireguard package wireguard
import ( import (
"bufio"
"bytes" "bytes"
"errors" "errors"
"fmt" "fmt"
"net" "net"
"sort" "sort"
"strconv" "strconv"
"strings"
"time" "time"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation"
) )
@ -31,6 +32,10 @@ type section string
type key string type key string
const ( const (
separator = "="
dumpSeparator = "\t"
dumpNone = "(none)"
dumpOff = "off"
interfaceSection section = "Interface" interfaceSection section = "Interface"
peerSection section = "Peer" peerSection section = "Peer"
listenPortKey key = "ListenPort" listenPortKey key = "ListenPort"
@ -42,209 +47,56 @@ const (
publicKeyKey key = "PublicKey" publicKeyKey key = "PublicKey"
) )
type dumpInterfaceIndex int
const (
dumpInterfacePrivateKeyIndex = iota
dumpInterfacePublicKeyIndex
dumpInterfaceListenPortIndex
dumpInterfaceFWMarkIndex
dumpInterfaceLen
)
type dumpPeerIndex int
const (
dumpPeerPublicKeyIndex = iota
dumpPeerPresharedKeyIndex
dumpPeerEndpointIndex
dumpPeerAllowedIPsIndex
dumpPeerLatestHandshakeIndex
dumpPeerTransferRXIndex
dumpPeerTransferTXIndex
dumpPeerPersistentKeepaliveIndex
dumpPeerLen
)
// Conf represents a WireGuard configuration file. // Conf represents a WireGuard configuration file.
type Conf struct { type Conf struct {
wgtypes.Config Interface *Interface
// The Peers field is shadowed because every Peer needs the Endpoint field that contains a DNS endpoint. Peers []*Peer
Peers []Peer
} }
// WGConfig returns a wgytpes.Config from a Conf. // Interface represents the `interface` section of a WireGuard configuration.
func (c *Conf) WGConfig() wgtypes.Config { type Interface struct {
if c == nil { ListenPort uint32
// The empty Config will do nothing, when applied. PrivateKey []byte
return wgtypes.Config{}
}
r := c.Config
wgPs := make([]wgtypes.PeerConfig, len(c.Peers))
for i, p := range c.Peers {
wgPs[i] = p.PeerConfig
if p.Endpoint.Resolved() {
// We can ingore the error because we already checked if the Endpoint was resolved in the above line.
wgPs[i].Endpoint, _ = p.Endpoint.UDPAddr(false)
}
wgPs[i].ReplaceAllowedIPs = true
}
r.Peers = wgPs
r.ReplacePeers = true
return r
}
// Endpoint represents a WireGuard endpoint.
type Endpoint struct {
udpAddr *net.UDPAddr
addr string
}
// ParseEndpoint returns an Endpoint from a string.
// The input should look like "10.0.0.0:100", "[ff10::10]:100"
// or "example.com:100".
func ParseEndpoint(endpoint string) *Endpoint {
if len(endpoint) == 0 {
return nil
}
hostRaw, portRaw, err := net.SplitHostPort(endpoint)
if err != nil {
return nil
}
port, err := strconv.ParseUint(portRaw, 10, 32)
if err != nil {
return nil
}
if len(validation.IsValidPortNum(int(port))) != 0 {
return nil
}
ip := net.ParseIP(hostRaw)
if ip == nil {
if len(validation.IsDNS1123Subdomain(hostRaw)) == 0 {
return &Endpoint{
addr: endpoint,
}
}
return nil
}
// ResolveUDPAddr will not resolve the endpoint as long as a valid IP and port is given.
// This should be the case here.
u, err := net.ResolveUDPAddr("udp", endpoint)
if err != nil {
return nil
}
u.IP = cutIP(u.IP)
return &Endpoint{
udpAddr: u,
}
}
// NewEndpointFromUDPAddr returns an Endpoint from a net.UDPAddr.
func NewEndpointFromUDPAddr(u *net.UDPAddr) *Endpoint {
if u != nil {
u.IP = cutIP(u.IP)
}
return &Endpoint{
udpAddr: u,
}
}
// NewEndpoint returns an Endpoint from a net.IP and port.
func NewEndpoint(ip net.IP, port int) *Endpoint {
return &Endpoint{
udpAddr: &net.UDPAddr{
IP: cutIP(ip),
Port: port,
},
}
}
// Ready return true, if the Enpoint is ready.
// Ready means that an IP or DN and port exists.
func (e *Endpoint) Ready() bool {
if e == nil {
return false
}
return (e.udpAddr != nil && e.udpAddr.IP != nil && e.udpAddr.Port > 0) || len(e.addr) > 0
}
// Port returns the port of the Endpoint.
func (e *Endpoint) Port() int {
if !e.Ready() {
return 0
}
if e.udpAddr != nil {
return e.udpAddr.Port
}
// We can ignore the errors here bacause the returned port will be "".
// This will result to Port 0 after the conversion to and int.
_, p, _ := net.SplitHostPort(e.addr)
port, _ := strconv.ParseUint(p, 10, 32)
return int(port)
}
// HasDNS returns true if the endpoint has a DN.
func (e *Endpoint) HasDNS() bool {
return e != nil && e.addr != ""
}
// DNS returns the DN of the Endpoint.
func (e *Endpoint) DNS() string {
if e == nil {
return ""
}
_, s, _ := net.SplitHostPort(e.addr)
return s
}
// Resolved returns true, if the DN of the Endpoint was resolved
// or if the Endpoint has a resolved endpoint.
func (e *Endpoint) Resolved() bool {
return e != nil && e.udpAddr != nil
}
// UDPAddr returns the UDPAddr of the Endpoint. If resolve is false,
// UDPAddr() will not try to resolve a DN name, if the Endpoint is not yet resolved.
func (e *Endpoint) UDPAddr(resolve bool) (*net.UDPAddr, error) {
if !e.Ready() {
return nil, errors.New("endpoint is not ready")
}
if e.udpAddr != nil {
// Make a copy of the UDPAddr to protect it from modification outside this package.
h := *e.udpAddr
return &h, nil
}
if !resolve {
return nil, errors.New("endpoint is not resolved")
}
var err error
if e.udpAddr, err = net.ResolveUDPAddr("udp", e.addr); err != nil {
return nil, err
}
// Make a copy of the UDPAddr to protect it from modification outside this package.
h := *e.udpAddr
return &h, nil
}
// IP returns the IP address of the Enpoint or nil.
func (e *Endpoint) IP() net.IP {
if !e.Resolved() {
return nil
}
return e.udpAddr.IP
}
// String will return the endpoint as a string.
// If a DN exists, it will take prcedence over the resolved endpoint.
func (e *Endpoint) String() string {
return e.StringOpt(true)
}
// StringOpt will return the string of the Endpoint.
// If dnsFirst is false, the resolved Endpoint will
// take precedence over the DN.
func (e *Endpoint) StringOpt(dnsFirst bool) string {
if e == nil {
return ""
}
if e.udpAddr != nil && (!dnsFirst || e.addr == "") {
return e.udpAddr.String()
}
return e.addr
}
// Equal will return true, if the Enpoints are equal.
// If dnsFirst is false, the DN will only be compared if
// the IPs are nil.
func (e *Endpoint) Equal(b *Endpoint, dnsFirst bool) bool {
return e.StringOpt(dnsFirst) == b.StringOpt(dnsFirst)
} }
// Peer represents a `peer` section of a WireGuard configuration. // Peer represents a `peer` section of a WireGuard configuration.
type Peer struct { type Peer struct {
wgtypes.PeerConfig AllowedIPs []*net.IPNet
Endpoint *Endpoint Endpoint *Endpoint
PersistentKeepalive int
PresharedKey []byte
PublicKey []byte
// The following fields are part of the runtime information, not the configuration.
LatestHandshake time.Time
} }
// DeduplicateIPs eliminates duplicate allowed IPs. // DeduplicateIPs eliminates duplicate allowed IPs.
func (p *Peer) DeduplicateIPs() { func (p *Peer) DeduplicateIPs() {
var ips []net.IPNet var ips []*net.IPNet
seen := make(map[string]struct{}) seen := make(map[string]struct{})
for _, ip := range p.AllowedIPs { for _, ip := range p.AllowedIPs {
if _, ok := seen[ip.String()]; ok { if _, ok := seen[ip.String()]; ok {
@ -256,27 +108,181 @@ func (p *Peer) DeduplicateIPs() {
p.AllowedIPs = ips p.AllowedIPs = ips
} }
// Endpoint represents an `endpoint` key of a `peer` section.
type Endpoint struct {
DNSOrIP
Port uint32
}
// String prints the string representation of the endpoint.
func (e *Endpoint) String() string {
if e == nil {
return ""
}
dnsOrIP := e.DNSOrIP.String()
if e.IP != nil && len(e.IP) == net.IPv6len {
dnsOrIP = "[" + dnsOrIP + "]"
}
return dnsOrIP + ":" + strconv.FormatUint(uint64(e.Port), 10)
}
// Equal compares two endpoints.
func (e *Endpoint) Equal(b *Endpoint, DNSFirst bool) bool {
if (e == nil) != (b == nil) {
return false
}
if e != nil {
if e.Port != b.Port {
return false
}
if DNSFirst {
// Check the DNS name first if it was resolved.
if e.DNS != b.DNS {
return false
}
if e.DNS == "" && !e.IP.Equal(b.IP) {
return false
}
} else {
// IPs take priority, so check them first.
if !e.IP.Equal(b.IP) {
return false
}
// Only check the DNS name if the IP is empty.
if e.IP == nil && e.DNS != b.DNS {
return false
}
}
}
return true
}
// DNSOrIP represents either a DNS name or an IP address.
// IPs, as they are more specific, are preferred.
type DNSOrIP struct {
DNS string
IP net.IP
}
// String prints the string representation of the struct.
func (d DNSOrIP) String() string {
if d.IP != nil {
return d.IP.String()
}
return d.DNS
}
// Parse parses a given WireGuard configuration file and produces a Conf struct.
func Parse(buf []byte) *Conf {
var (
active section
kv []string
c Conf
err error
iface *Interface
i int
k key
line, v string
peer *Peer
port uint64
)
s := bufio.NewScanner(bytes.NewBuffer(buf))
for s.Scan() {
line = strings.TrimSpace(s.Text())
// Skip comments.
if strings.HasPrefix(line, "#") {
continue
}
// Line is a section title.
if strings.HasPrefix(line, "[") {
if peer != nil {
c.Peers = append(c.Peers, peer)
peer = nil
}
if iface != nil {
c.Interface = iface
iface = nil
}
active = section(strings.TrimSpace(strings.Trim(line, "[]")))
switch active {
case interfaceSection:
iface = new(Interface)
case peerSection:
peer = new(Peer)
}
continue
}
kv = strings.SplitN(line, separator, 2)
if len(kv) != 2 {
continue
}
k = key(strings.TrimSpace(kv[0]))
v = strings.TrimSpace(kv[1])
switch active {
case interfaceSection:
switch k {
case listenPortKey:
port, err = strconv.ParseUint(v, 10, 32)
if err != nil {
continue
}
iface.ListenPort = uint32(port)
case privateKeyKey:
iface.PrivateKey = []byte(v)
}
case peerSection:
switch k {
case allowedIPsKey:
err = peer.parseAllowedIPs(v)
if err != nil {
continue
}
case endpointKey:
err = peer.parseEndpoint(v)
if err != nil {
continue
}
case persistentKeepaliveKey:
i, err = strconv.Atoi(v)
if err != nil {
continue
}
peer.PersistentKeepalive = i
case presharedKeyKey:
peer.PresharedKey = []byte(v)
case publicKeyKey:
peer.PublicKey = []byte(v)
}
}
}
if peer != nil {
c.Peers = append(c.Peers, peer)
}
if iface != nil {
c.Interface = iface
}
return &c
}
// Bytes renders a WireGuard configuration to bytes. // Bytes renders a WireGuard configuration to bytes.
func (c *Conf) Bytes() ([]byte, error) { func (c *Conf) Bytes() ([]byte, error) {
if c == nil {
return nil, nil
}
var err error var err error
buf := bytes.NewBuffer(make([]byte, 0, 512)) buf := bytes.NewBuffer(make([]byte, 0, 512))
if c.PrivateKey != nil { if c.Interface != nil {
if err = writeSection(buf, interfaceSection); err != nil { if err = writeSection(buf, interfaceSection); err != nil {
return nil, fmt.Errorf("failed to write interface: %v", err) return nil, fmt.Errorf("failed to write interface: %v", err)
} }
if err = writePKey(buf, privateKeyKey, c.PrivateKey); err != nil { if err = writePKey(buf, privateKeyKey, c.Interface.PrivateKey); err != nil {
return nil, fmt.Errorf("failed to write private key: %v", err) return nil, fmt.Errorf("failed to write private key: %v", err)
} }
if err = writeValue(buf, listenPortKey, strconv.Itoa(*c.ListenPort)); err != nil { if err = writeValue(buf, listenPortKey, strconv.FormatUint(uint64(c.Interface.ListenPort), 10)); err != nil {
return nil, fmt.Errorf("failed to write listen port: %v", err) return nil, fmt.Errorf("failed to write listen port: %v", err)
} }
} }
for i, p := range c.Peers { for i, p := range c.Peers {
// Add newlines to make the formatting nicer. // Add newlines to make the formatting nicer.
if i == 0 && c.PrivateKey != nil || i != 0 { if i == 0 && c.Interface != nil || i != 0 {
if err = buf.WriteByte('\n'); err != nil { if err = buf.WriteByte('\n'); err != nil {
return nil, err return nil, err
} }
@ -291,97 +297,71 @@ func (c *Conf) Bytes() ([]byte, error) {
if err = writeEndpoint(buf, p.Endpoint); err != nil { if err = writeEndpoint(buf, p.Endpoint); err != nil {
return nil, fmt.Errorf("failed to write endpoint: %v", err) return nil, fmt.Errorf("failed to write endpoint: %v", err)
} }
if p.PersistentKeepaliveInterval == nil { if err = writeValue(buf, persistentKeepaliveKey, strconv.Itoa(p.PersistentKeepalive)); err != nil {
p.PersistentKeepaliveInterval = new(time.Duration)
}
if err = writeValue(buf, persistentKeepaliveKey, strconv.FormatUint(uint64(*p.PersistentKeepaliveInterval/time.Second), 10)); err != nil {
return nil, fmt.Errorf("failed to write persistent keepalive: %v", err) return nil, fmt.Errorf("failed to write persistent keepalive: %v", err)
} }
if err = writePKey(buf, presharedKeyKey, p.PresharedKey); err != nil { if err = writePKey(buf, presharedKeyKey, p.PresharedKey); err != nil {
return nil, fmt.Errorf("failed to write preshared key: %v", err) return nil, fmt.Errorf("failed to write preshared key: %v", err)
} }
if err = writePKey(buf, publicKeyKey, &p.PublicKey); err != nil { if err = writePKey(buf, publicKeyKey, p.PublicKey); err != nil {
return nil, fmt.Errorf("failed to write public key: %v", err) return nil, fmt.Errorf("failed to write public key: %v", err)
} }
} }
return buf.Bytes(), nil return buf.Bytes(), nil
} }
// Equal returns true if the Conf and wgtypes.Device are equal. // Equal checks if two WireGuard configurations are equivalent.
func (c *Conf) Equal(d *wgtypes.Device) (bool, string) { func (c *Conf) Equal(b *Conf) bool {
if c == nil || d == nil { if (c.Interface == nil) != (b.Interface == nil) {
return c == nil && d == nil, "nil values" return false
} }
if c.ListenPort == nil || *c.ListenPort != d.ListenPort { if c.Interface != nil {
return false, fmt.Sprintf("port: old=%q, new=\"%v\"", d.ListenPort, c.ListenPort) if c.Interface.ListenPort != b.Interface.ListenPort || !bytes.Equal(c.Interface.PrivateKey, b.Interface.PrivateKey) {
return false
}
} }
if c.PrivateKey == nil || *c.PrivateKey != d.PrivateKey { if len(c.Peers) != len(b.Peers) {
return false, fmt.Sprintf("private key: old=\"%s...\", new=\"%s\"", d.PrivateKey.String()[0:5], c.PrivateKey.String()[0:5]) return false
} }
if len(c.Peers) != len(d.Peers) {
return false, fmt.Sprintf("number of peers: old=%d, new=%d", len(d.Peers), len(c.Peers))
}
sortPeerConfigs(d.Peers)
sortPeers(c.Peers) sortPeers(c.Peers)
sortPeers(b.Peers)
for i := range c.Peers { for i := range c.Peers {
if len(c.Peers[i].AllowedIPs) != len(d.Peers[i].AllowedIPs) { if len(c.Peers[i].AllowedIPs) != len(b.Peers[i].AllowedIPs) {
return false, fmt.Sprintf("Peer %d allowed IP length: old=%d, new=%d", i, len(d.Peers[i].AllowedIPs), len(c.Peers[i].AllowedIPs)) return false
} }
sortCIDRs(c.Peers[i].AllowedIPs) sortCIDRs(c.Peers[i].AllowedIPs)
sortCIDRs(d.Peers[i].AllowedIPs) sortCIDRs(b.Peers[i].AllowedIPs)
for j := range c.Peers[i].AllowedIPs { for j := range c.Peers[i].AllowedIPs {
if c.Peers[i].AllowedIPs[j].String() != d.Peers[i].AllowedIPs[j].String() { if c.Peers[i].AllowedIPs[j].String() != b.Peers[i].AllowedIPs[j].String() {
return false, fmt.Sprintf("Peer %d allowed IP: old=%q, new=%q", i, d.Peers[i].AllowedIPs[j].String(), c.Peers[i].AllowedIPs[j].String()) return false
} }
} }
if c.Peers[i].Endpoint == nil || d.Peers[i].Endpoint == nil { if !c.Peers[i].Endpoint.Equal(b.Peers[i].Endpoint, false) {
return c.Peers[i].Endpoint == nil && d.Peers[i].Endpoint == nil, "peer endpoints: nil value" return false
} }
if c.Peers[i].Endpoint.StringOpt(false) != d.Peers[i].Endpoint.String() { if c.Peers[i].PersistentKeepalive != b.Peers[i].PersistentKeepalive || !bytes.Equal(c.Peers[i].PresharedKey, b.Peers[i].PresharedKey) || !bytes.Equal(c.Peers[i].PublicKey, b.Peers[i].PublicKey) {
return false, fmt.Sprintf("Peer %d endpoint: old=%q, new=%q", i, d.Peers[i].Endpoint.String(), c.Peers[i].Endpoint.StringOpt(false)) return false
}
pki := time.Duration(0)
if p := c.Peers[i].PersistentKeepaliveInterval; p != nil {
pki = *p
}
psk := wgtypes.Key{}
if p := c.Peers[i].PresharedKey; p != nil {
psk = *p
}
if pki != d.Peers[i].PersistentKeepaliveInterval || psk != d.Peers[i].PresharedKey || c.Peers[i].PublicKey != d.Peers[i].PublicKey {
return false, "persistent keepalive or pershared key"
} }
} }
return true, "" return true
} }
func sortPeerConfigs(peers []wgtypes.Peer) { func sortPeers(peers []*Peer) {
sort.Slice(peers, func(i, j int) bool { sort.Slice(peers, func(i, j int) bool {
return peers[i].PublicKey.String() < peers[j].PublicKey.String() if bytes.Compare(peers[i].PublicKey, peers[j].PublicKey) < 0 {
return true
}
return false
}) })
} }
func sortPeers(peers []Peer) { func sortCIDRs(cidrs []*net.IPNet) {
sort.Slice(peers, func(i, j int) bool {
return peers[i].PublicKey.String() < peers[j].PublicKey.String()
})
}
func sortCIDRs(cidrs []net.IPNet) {
sort.Slice(cidrs, func(i, j int) bool { sort.Slice(cidrs, func(i, j int) bool {
return cidrs[i].String() < cidrs[j].String() return cidrs[i].String() < cidrs[j].String()
}) })
} }
func cutIP(ip net.IP) net.IP { func writeAllowedIPs(buf *bytes.Buffer, ais []*net.IPNet) error {
if i4 := ip.To4(); i4 != nil {
return i4
}
return ip.To16()
}
func writeAllowedIPs(buf *bytes.Buffer, ais []net.IPNet) error {
if len(ais) == 0 { if len(ais) == 0 {
return nil return nil
} }
@ -402,16 +382,15 @@ func writeAllowedIPs(buf *bytes.Buffer, ais []net.IPNet) error {
return buf.WriteByte('\n') return buf.WriteByte('\n')
} }
func writePKey(buf *bytes.Buffer, k key, b *wgtypes.Key) error { func writePKey(buf *bytes.Buffer, k key, b []byte) error {
// Print nothing if the public key was never initialized. if len(b) == 0 {
if b == nil || (wgtypes.Key{}) == *b {
return nil return nil
} }
var err error var err error
if err = writeKey(buf, k); err != nil { if err = writeKey(buf, k); err != nil {
return err return err
} }
if _, err = buf.Write([]byte(b.String())); err != nil { if _, err = buf.Write(b); err != nil {
return err return err
} }
return buf.WriteByte('\n') return buf.WriteByte('\n')
@ -429,15 +408,14 @@ func writeValue(buf *bytes.Buffer, k key, v string) error {
} }
func writeEndpoint(buf *bytes.Buffer, e *Endpoint) error { func writeEndpoint(buf *bytes.Buffer, e *Endpoint) error {
str := e.String() if e == nil {
if str == "" {
return nil return nil
} }
var err error var err error
if err = writeKey(buf, endpointKey); err != nil { if err = writeKey(buf, endpointKey); err != nil {
return err return err
} }
if _, err = buf.WriteString(str); err != nil { if _, err = buf.WriteString(e.String()); err != nil {
return err return err
} }
return buf.WriteByte('\n') return buf.WriteByte('\n')
@ -465,3 +443,177 @@ func writeKey(buf *bytes.Buffer, k key) error {
_, err = buf.WriteString(" = ") _, err = buf.WriteString(" = ")
return err return err
} }
var (
errParseEndpoint = errors.New("could not parse Endpoint")
)
func (p *Peer) parseEndpoint(v string) error {
var (
kv []string
err error
ip, ip4 net.IP
port uint64
)
kv = strings.Split(v, ":")
if len(kv) < 2 {
return errParseEndpoint
}
port, err = strconv.ParseUint(kv[len(kv)-1], 10, 32)
if err != nil {
return err
}
d := DNSOrIP{}
ip = net.ParseIP(strings.Trim(strings.Join(kv[:len(kv)-1], ":"), "[]"))
if ip == nil {
if len(validation.IsDNS1123Subdomain(kv[0])) != 0 {
return errParseEndpoint
}
d.DNS = kv[0]
} else {
if ip4 = ip.To4(); ip4 != nil {
d.IP = ip4
} else {
d.IP = ip.To16()
}
}
p.Endpoint = &Endpoint{
DNSOrIP: d,
Port: uint32(port),
}
return nil
}
func (p *Peer) parseAllowedIPs(v string) error {
var (
ai *net.IPNet
kv []string
err error
i int
ip, ip4 net.IP
)
kv = strings.Split(v, ",")
for i = range kv {
ip, ai, err = net.ParseCIDR(strings.TrimSpace(kv[i]))
if err != nil {
return err
}
if ip4 = ip.To4(); ip4 != nil {
ip = ip4
} else {
ip = ip.To16()
}
ai.IP = ip
p.AllowedIPs = append(p.AllowedIPs, ai)
}
return nil
}
// ParseDump parses a given WireGuard dump and produces a Conf struct.
func ParseDump(buf []byte) (*Conf, error) {
// from man wg, show section:
// If dump is specified, then several lines are printed;
// the first contains in order separated by tab: private-key, public-key, listen-port, fwmark.
// Subsequent lines are printed for each peer and contain in order separated by tab:
// public-key, preshared-key, endpoint, allowed-ips, latest-handshake, transfer-rx, transfer-tx, persistent-keepalive.
var (
active section
values []string
c Conf
err error
iface *Interface
peer *Peer
port uint64
sec int64
pka int
line int
)
// First line is Interface
active = interfaceSection
s := bufio.NewScanner(bytes.NewBuffer(buf))
for s.Scan() {
values = strings.Split(s.Text(), dumpSeparator)
switch active {
case interfaceSection:
if len(values) < dumpInterfaceLen {
return nil, fmt.Errorf("invalid interface line: missing fields (%d < %d)", len(values), dumpInterfaceLen)
}
iface = new(Interface)
for i := range values {
switch i {
case dumpInterfacePrivateKeyIndex:
iface.PrivateKey = []byte(values[i])
case dumpInterfaceListenPortIndex:
port, err = strconv.ParseUint(values[i], 10, 32)
if err != nil {
return nil, fmt.Errorf("invalid interface line: error parsing listen-port: %w", err)
}
iface.ListenPort = uint32(port)
}
}
c.Interface = iface
// Next lines are Peers
active = peerSection
case peerSection:
if len(values) < dumpPeerLen {
return nil, fmt.Errorf("invalid peer line %d: missing fields (%d < %d)", line, len(values), dumpPeerLen)
}
peer = new(Peer)
for i := range values {
switch i {
case dumpPeerPublicKeyIndex:
peer.PublicKey = []byte(values[i])
case dumpPeerPresharedKeyIndex:
if values[i] == dumpNone {
continue
}
peer.PresharedKey = []byte(values[i])
case dumpPeerEndpointIndex:
if values[i] == dumpNone {
continue
}
err = peer.parseEndpoint(values[i])
if err != nil {
return nil, fmt.Errorf("invalid peer line %d: error parsing endpoint: %w", line, err)
}
case dumpPeerAllowedIPsIndex:
if values[i] == dumpNone {
continue
}
err = peer.parseAllowedIPs(values[i])
if err != nil {
return nil, fmt.Errorf("invalid peer line %d: error parsing allowed-ips: %w", line, err)
}
case dumpPeerLatestHandshakeIndex:
if values[i] == "0" {
// Use go zero value, not unix 0 timestamp.
peer.LatestHandshake = time.Time{}
continue
}
sec, err = strconv.ParseInt(values[i], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid peer line %d: error parsing latest-handshake: %w", line, err)
}
peer.LatestHandshake = time.Unix(sec, 0)
case dumpPeerPersistentKeepaliveIndex:
if values[i] == dumpOff {
continue
}
pka, err = strconv.Atoi(values[i])
if err != nil {
return nil, fmt.Errorf("invalid peer line %d: error parsing persistent-keepalive: %w", line, err)
}
peer.PersistentKeepalive = pka
}
}
c.Peers = append(c.Peers, peer)
peer = nil
}
line++
}
return &c, nil
}

View File

@ -1,4 +1,4 @@
// Copyright 2021 the Kilo authors // Copyright 2019 the Kilo authors
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -21,431 +21,336 @@ import (
"github.com/kylelemons/godebug/pretty" "github.com/kylelemons/godebug/pretty"
) )
func TestNewEndpoint(t *testing.T) { func TestCompareConf(t *testing.T) {
for i, tc := range []struct { for _, tc := range []struct {
name string name string
ip net.IP a []byte
port int b []byte
out *Endpoint out bool
}{ }{
{ {
name: "no ip, no port", name: "empty",
out: &Endpoint{ a: []byte{},
udpAddr: &net.UDPAddr{}, b: []byte{},
}, out: true,
}, },
{ {
name: "only port", name: "key and value order",
ip: nil, a: []byte(`[Interface]
port: 99, PrivateKey = private
out: &Endpoint{ ListenPort = 51820
udpAddr: &net.UDPAddr{
Port: 99, [Peer]
}, Endpoint = 10.1.0.2:51820
}, PresharedKey = psk
PublicKey = key
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
`),
b: []byte(`[Interface]
ListenPort = 51820
PrivateKey = private
[Peer]
PublicKey = key
AllowedIPs = 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32, 10.2.2.0/24
PresharedKey = psk
Endpoint = 10.1.0.2:51820
`),
out: true,
}, },
{ {
name: "only ipv4", name: "whitespace",
ip: net.ParseIP("10.0.0.0"), a: []byte(`[Interface]
out: &Endpoint{ PrivateKey = private
udpAddr: &net.UDPAddr{ ListenPort = 51820
IP: net.ParseIP("10.0.0.0").To4(),
}, [Peer]
}, Endpoint = 10.1.0.2:51820
PresharedKey = psk
PublicKey = key
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
`),
b: []byte(`[Interface]
PrivateKey=private
ListenPort=51820
[Peer]
Endpoint=10.1.0.2:51820
PresharedKey = psk
PublicKey=key
AllowedIPs=10.2.2.0/24,192.168.0.1/32,10.2.3.0/24,192.168.0.2/32,10.4.0.2/32
`),
out: true,
}, },
{ {
name: "only ipv6", name: "missing key",
ip: net.ParseIP("ff50::10"), a: []byte(`[Interface]
out: &Endpoint{ PrivateKey = private
udpAddr: &net.UDPAddr{ ListenPort = 51820
IP: net.ParseIP("ff50::10").To16(),
}, [Peer]
}, Endpoint = 10.1.0.2:51820
PublicKey = key
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
`),
b: []byte(`[Interface]
PrivateKey = private
ListenPort = 51820
[Peer]
PublicKey = key
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
`),
out: false,
}, },
{ {
name: "ipv4", name: "different value",
ip: net.ParseIP("10.0.0.0"), a: []byte(`[Interface]
port: 1000, PrivateKey = private
out: &Endpoint{ ListenPort = 51820
udpAddr: &net.UDPAddr{
IP: net.ParseIP("10.0.0.0").To4(), [Peer]
Port: 1000, Endpoint = 10.1.0.2:51820
}, PublicKey = key
}, AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
`),
b: []byte(`[Interface]
PrivateKey = private
ListenPort = 51820
[Peer]
Endpoint = 10.1.0.2:51820
PublicKey = key2
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
`),
out: false,
}, },
{ {
name: "ipv6", name: "section order",
ip: net.ParseIP("ff50::10"), a: []byte(`[Interface]
port: 1000, PrivateKey = private
out: &Endpoint{ ListenPort = 51820
udpAddr: &net.UDPAddr{
IP: net.ParseIP("ff50::10").To16(), [Peer]
Port: 1000, Endpoint = 10.1.0.2:51820
}, PresharedKey = psk
}, PublicKey = key
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
`),
b: []byte(`[Peer]
Endpoint = 10.1.0.2:51820
PresharedKey = psk
PublicKey = key
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
[Interface]
PrivateKey = private
ListenPort = 51820
`),
out: true,
}, },
{ {
name: "ipv6", name: "out of order peers",
ip: net.ParseIP("fc00:f853:ccd:e793::3"), a: []byte(`[Interface]
port: 51820, PrivateKey = private
out: &Endpoint{ ListenPort = 51820
udpAddr: &net.UDPAddr{
IP: net.ParseIP("fc00:f853:ccd:e793::3").To16(), [Peer]
Port: 51820, Endpoint = 10.1.0.2:51820
}, PresharedKey = psk2
}, PublicKey = key2
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
[Peer]
Endpoint = 10.1.0.2:51820
PresharedKey = psk1
PublicKey = key1
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
`),
b: []byte(`[Interface]
PrivateKey = private
ListenPort = 51820
[Peer]
Endpoint = 10.1.0.2:51820
PresharedKey = psk1
PublicKey = key1
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
[Peer]
Endpoint = 10.1.0.2:51820
PresharedKey = psk2
PublicKey = key2
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
`),
out: true,
},
{
name: "one empty",
a: []byte(`[Interface]
PrivateKey = private
ListenPort = 51820
[Peer]
Endpoint = 10.1.0.2:51820
PresharedKey = psk
PublicKey = key
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
`),
b: []byte(``),
out: false,
}, },
} { } {
out := NewEndpoint(tc.ip, tc.port) equal := Parse(tc.a).Equal(Parse(tc.b))
if diff := pretty.Compare(out, tc.out); diff != "" { if equal != tc.out {
t.Errorf("%d %s: got diff:\n%s\n", i, tc.name, diff) t.Errorf("test case %q: expected %t, got %t", tc.name, tc.out, equal)
} }
} }
} }
func TestParseEndpoint(t *testing.T) { func TestCompareEndpoint(t *testing.T) {
for i, tc := range []struct { for _, tc := range []struct {
name string name string
str string a *Endpoint
out *Endpoint b *Endpoint
dnsFirst bool
out bool
}{ }{
{ {
name: "no ip, no port", name: "both nil",
a: nil,
b: nil,
out: true,
}, },
{ {
name: "only port", name: "a nil",
str: ":1000", a: nil,
b: &Endpoint{},
out: false,
}, },
{ {
name: "only ipv4", name: "b nil",
str: "10.0.0.0", a: &Endpoint{},
b: nil,
out: false,
}, },
{ {
name: "only ipv6", name: "zero",
str: "ff50::10", a: &Endpoint{},
b: &Endpoint{},
out: true,
}, },
{ {
name: "ipv4", name: "diff port",
str: "10.0.0.0:1000", a: &Endpoint{Port: 1234},
out: &Endpoint{ b: &Endpoint{Port: 5678},
udpAddr: &net.UDPAddr{ out: false,
IP: net.ParseIP("10.0.0.0").To4(),
Port: 1000,
},
},
}, },
{ {
name: "ipv6", name: "same IP",
str: "[ff50::10]:1000", a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1")}},
out: &Endpoint{ b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1")}},
udpAddr: &net.UDPAddr{ out: true,
IP: net.ParseIP("ff50::10").To16(), },
Port: 1000, {
}, name: "diff IP",
}, a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1")}},
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.2")}},
out: false,
},
{
name: "same IP ignore DNS",
a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1"), DNS: "a"}},
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1"), DNS: "b"}},
out: true,
},
{
name: "no IP check DNS",
a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{DNS: "a"}},
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{DNS: "b"}},
out: false,
},
{
name: "no IP check DNS (same)",
a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{DNS: "a"}},
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{DNS: "a"}},
out: true,
},
{
name: "DNS first, ignore IP",
a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1"), DNS: "a"}},
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.2"), DNS: "a"}},
dnsFirst: true,
out: true,
},
{
name: "DNS first",
a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{DNS: "a"}},
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{DNS: "b"}},
dnsFirst: true,
out: false,
},
{
name: "DNS first, no DNS compare IP",
a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1"), DNS: ""}},
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.2"), DNS: ""}},
dnsFirst: true,
out: false,
},
{
name: "DNS first, no DNS compare IP (same)",
a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1"), DNS: ""}},
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1"), DNS: ""}},
dnsFirst: true,
out: true,
}, },
} { } {
out := ParseEndpoint(tc.str) equal := tc.a.Equal(tc.b, tc.dnsFirst)
if diff := pretty.Compare(out, tc.out); diff != "" { if equal != tc.out {
t.Errorf("ParseEndpoint %s(%d): got diff:\n%s\n", tc.name, i, diff) t.Errorf("test case %q: expected %t, got %t", tc.name, tc.out, equal)
} }
} }
} }
func TestNewEndpointFromUDPAddr(t *testing.T) { func TestCompareDumpConf(t *testing.T) {
for i, tc := range []struct { for _, tc := range []struct {
name string name string
u *net.UDPAddr d []byte
out *Endpoint c []byte
}{ }{
{ {
name: "no ip, no port", name: "empty",
out: &Endpoint{ d: []byte{},
addr: "", c: []byte{},
},
}, },
{ {
name: "only port", name: "redacted copy from wg output",
u: &net.UDPAddr{ d: []byte(`private B7qk8EMlob0nfado0ABM6HulUV607r4yqtBKjhap7S4= 51820 off
Port: 1000, key1 (none) 10.254.1.1:51820 100.64.1.0/24,192.168.0.125/32,10.4.0.1/32 1619012801 67048 34952 10
}, key2 (none) 10.254.2.1:51820 100.64.4.0/24,10.69.76.55/32,100.64.3.0/24,10.66.25.131/32,10.4.0.2/32 1619013058 1134456 10077852 10`),
out: &Endpoint{ c: []byte(`[Interface]
udpAddr: &net.UDPAddr{ ListenPort = 51820
Port: 1000, PrivateKey = private
},
addr: "",
},
},
{
name: "only ipv4",
u: &net.UDPAddr{
IP: net.ParseIP("10.0.0.0"),
},
out: &Endpoint{
udpAddr: &net.UDPAddr{
IP: net.ParseIP("10.0.0.0").To4(),
},
addr: "",
},
},
{
name: "only ipv6",
u: &net.UDPAddr{
IP: net.ParseIP("ff60::10"),
},
out: &Endpoint{
udpAddr: &net.UDPAddr{
IP: net.ParseIP("ff60::10").To16(),
},
},
},
{
name: "ipv4",
u: &net.UDPAddr{
IP: net.ParseIP("10.0.0.0"),
Port: 1000,
},
out: &Endpoint{
udpAddr: &net.UDPAddr{
IP: net.ParseIP("10.0.0.0").To4(),
Port: 1000,
},
},
},
{
name: "ipv6",
u: &net.UDPAddr{
IP: net.ParseIP("ff50::10"),
Port: 1000,
},
out: &Endpoint{
udpAddr: &net.UDPAddr{
IP: net.ParseIP("ff50::10").To16(),
Port: 1000,
},
},
},
} {
out := NewEndpointFromUDPAddr(tc.u)
if diff := pretty.Compare(out, tc.out); diff != "" {
t.Errorf("ParseEndpoint %s(%d): got diff:\n%s\n", tc.name, i, diff)
}
}
}
func TestReady(t *testing.T) { [Peer]
for i, tc := range []struct { PublicKey = key1
name string AllowedIPs = 100.64.1.0/24, 192.168.0.125/32, 10.4.0.1/32
in *Endpoint Endpoint = 10.254.1.1:51820
r bool PersistentKeepalive = 10
}{
{
name: "nil",
r: false,
},
{
name: "no ip, no port",
in: &Endpoint{
addr: "",
udpAddr: &net.UDPAddr{},
},
r: false,
},
{
name: "only port",
in: &Endpoint{
udpAddr: &net.UDPAddr{
Port: 1000,
},
},
r: false,
},
{
name: "only ipv4",
in: &Endpoint{
udpAddr: &net.UDPAddr{
IP: net.ParseIP("10.0.0.0"),
},
},
r: false,
},
{
name: "only ipv6",
in: &Endpoint{
udpAddr: &net.UDPAddr{
IP: net.ParseIP("ff60::10"),
},
},
r: false,
},
{
name: "ipv4",
in: &Endpoint{
udpAddr: &net.UDPAddr{
IP: net.ParseIP("10.0.0.0"),
Port: 1000,
},
},
r: true,
},
{
name: "ipv6",
in: &Endpoint{
udpAddr: &net.UDPAddr{
IP: net.ParseIP("ff50::10"),
Port: 1000,
},
},
r: true,
},
} {
if tc.r != tc.in.Ready() {
t.Errorf("Endpoint.Ready() %s(%d): expected=%v\tgot=%v\n", tc.name, i, tc.r, tc.in.Ready())
}
}
}
func TestEqual(t *testing.T) { [Peer]
for i, tc := range []struct { PublicKey = key2
name string AllowedIPs = 100.64.4.0/24, 10.69.76.55/32, 100.64.3.0/24, 10.66.25.131/32, 10.4.0.2/32
a *Endpoint Endpoint = 10.254.2.1:51820
b *Endpoint PersistentKeepalive = 10`),
df bool
r bool
}{
{
name: "nil dns last",
r: true,
},
{
name: "nil dns first",
df: true,
r: true,
},
{
name: "equal: only port",
a: &Endpoint{
udpAddr: &net.UDPAddr{
Port: 1000,
},
},
b: &Endpoint{
udpAddr: &net.UDPAddr{
Port: 1000,
},
},
r: true,
},
{
name: "not equal: only port",
a: &Endpoint{
udpAddr: &net.UDPAddr{
Port: 1000,
},
},
b: &Endpoint{
udpAddr: &net.UDPAddr{
Port: 1001,
},
},
r: false,
},
{
name: "equal dns first",
a: &Endpoint{
udpAddr: &net.UDPAddr{
Port: 1000,
IP: net.ParseIP("10.0.0.0"),
},
addr: "example.com:1000",
},
b: &Endpoint{
udpAddr: &net.UDPAddr{
Port: 1000,
IP: net.ParseIP("10.0.0.0"),
},
addr: "example.com:1000",
},
r: true,
},
{
name: "equal dns last",
a: &Endpoint{
udpAddr: &net.UDPAddr{
Port: 1000,
IP: net.ParseIP("10.0.0.0"),
},
addr: "example.com:1000",
},
b: &Endpoint{
udpAddr: &net.UDPAddr{
Port: 1000,
IP: net.ParseIP("10.0.0.0"),
},
addr: "foo",
},
r: true,
},
{
name: "unequal dns first",
a: &Endpoint{
udpAddr: &net.UDPAddr{
Port: 1000,
IP: net.ParseIP("10.0.0.0"),
},
addr: "example.com:1000",
},
b: &Endpoint{
udpAddr: &net.UDPAddr{
Port: 1000,
IP: net.ParseIP("10.0.0.0"),
},
addr: "foo",
},
df: true,
r: false,
},
{
name: "unequal dns last",
a: &Endpoint{
udpAddr: &net.UDPAddr{
Port: 1000,
IP: net.ParseIP("10.0.0.0"),
},
addr: "foo",
},
b: &Endpoint{
udpAddr: &net.UDPAddr{
Port: 1000,
IP: net.ParseIP("11.0.0.0"),
},
addr: "foo",
},
r: false,
},
{
name: "unequal dns last empty IP",
a: &Endpoint{
addr: "foo",
},
b: &Endpoint{
addr: "bar",
},
r: false,
},
{
name: "equal dns last empty IP",
a: &Endpoint{
addr: "foo",
},
b: &Endpoint{
addr: "foo",
},
r: true,
}, },
} { } {
if out := tc.a.Equal(tc.b, tc.df); out != tc.r {
t.Errorf("ParseEndpoint %s(%d): expected: %v\tgot: %v\n", tc.name, i, tc.r, out) dumpConf, _ := ParseDump(tc.d)
conf := Parse(tc.c)
// Equal will ignore runtime fields and only compare configuration fields.
if !dumpConf.Equal(conf) {
diff := pretty.Compare(dumpConf, conf)
t.Errorf("test case %q: got diff: %v", tc.name, diff)
} }
} }
} }

View File

@ -12,13 +12,14 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build linux
// +build linux // +build linux
package wireguard package wireguard
import ( import (
"bytes"
"fmt" "fmt"
"os/exec"
"github.com/vishvananda/netlink" "github.com/vishvananda/netlink"
) )
@ -63,3 +64,74 @@ func New(name string, mtu uint) (int, bool, error) {
} }
return link.Attrs().Index, true, nil return link.Attrs().Index, true, nil
} }
// Keys generates a WireGuard private and public key-pair.
func Keys() ([]byte, []byte, error) {
private, err := GenKey()
if err != nil {
return nil, nil, fmt.Errorf("failed to generate private key: %v", err)
}
public, err := PubKey(private)
return private, public, err
}
// GenKey generates a WireGuard private key.
func GenKey() ([]byte, error) {
key, err := exec.Command("wg", "genkey").Output()
return bytes.Trim(key, "\n"), err
}
// PubKey generates a WireGuard public key for a given private key.
func PubKey(key []byte) ([]byte, error) {
cmd := exec.Command("wg", "pubkey")
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, fmt.Errorf("failed to open pipe to stdin: %v", err)
}
go func() {
defer stdin.Close()
stdin.Write(key)
}()
public, err := cmd.Output()
if err != nil {
return nil, fmt.Errorf("failed to generate public key: %v", err)
}
return bytes.Trim(public, "\n"), nil
}
// SetConf applies a WireGuard configuration file to the given interface.
func SetConf(iface string, path string) error {
cmd := exec.Command("wg", "setconf", iface, path)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to apply the WireGuard configuration: %s", stderr.String())
}
return nil
}
// ShowConf gets the WireGuard configuration for the given interface.
func ShowConf(iface string) ([]byte, error) {
cmd := exec.Command("wg", "showconf", iface)
var stderr, stdout bytes.Buffer
cmd.Stderr = &stderr
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("failed to read the WireGuard configuration: %s", stderr.String())
}
return stdout.Bytes(), nil
}
// ShowDump gets the WireGuard configuration and runtime information for the given interface.
func ShowDump(iface string) ([]byte, error) {
cmd := exec.Command("wg", "show", iface, "dump")
var stderr, stdout bytes.Buffer
cmd.Stderr = &stderr
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("failed to read the WireGuard dump output: %s", stderr.String())
}
return stdout.Bytes(), nil
}

View File

@ -12,14 +12,13 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build tools
// +build tools // +build tools
package main package main
import ( import (
_ "github.com/campoy/embedmd" _ "github.com/campoy/embedmd"
_ "honnef.co/go/tools/cmd/staticcheck" _ "golang.org/x/lint/golint"
_ "k8s.io/code-generator/cmd/client-gen" _ "k8s.io/code-generator/cmd/client-gen"
_ "k8s.io/code-generator/cmd/deepcopy-gen" _ "k8s.io/code-generator/cmd/deepcopy-gen"
_ "k8s.io/code-generator/cmd/informer-gen" _ "k8s.io/code-generator/cmd/informer-gen"

View File

@ -1,2 +0,0 @@
toml.test
/toml-test

View File

@ -1 +0,0 @@
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2013 TOML authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,220 +0,0 @@
## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
Documentation: https://godocs.io/github.com/BurntSushi/toml
See the [releases page](https://github.com/BurntSushi/toml/releases) for a
changelog; this information is also in the git tag annotations (e.g. `git show
v0.4.0`).
This library requires Go 1.13 or newer; install it with:
$ go get github.com/BurntSushi/toml
It also comes with a TOML validator CLI tool:
$ go get github.com/BurntSushi/toml/cmd/tomlv
$ tomlv some-toml-file.toml
### Testing
This package passes all tests in
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
and the encoder.
### Examples
This package works similarly to how the Go standard library handles XML and
JSON. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys
and values:
```toml
Age = 25
Cats = [ "Cauchy", "Plato" ]
Pi = 3.14
Perfection = [ 6, 28, 496, 8128 ]
DOB = 1987-07-05T05:45:00Z
```
Which could be defined in Go as:
```go
type Config struct {
Age int
Cats []string
Pi float64
Perfection []int
DOB time.Time // requires `import time`
}
```
And then decoded with:
```go
var conf Config
if _, err := toml.Decode(tomlData, &conf); err != nil {
// handle error
}
```
You can also use struct tags if your struct field name doesn't map to a TOML
key value directly:
```toml
some_key_NAME = "wat"
```
```go
type TOML struct {
ObscureKey string `toml:"some_key_NAME"`
}
```
Beware that like other most other decoders **only exported fields** are
considered when encoding and decoding; private fields are silently ignored.
### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into
`time.Duration` values:
```toml
[[song]]
name = "Thunder Road"
duration = "4m49s"
[[song]]
name = "Stairway to Heaven"
duration = "8m03s"
```
Which can be decoded with:
```go
type song struct {
Name string
Duration duration
}
type songs struct {
Song []song
}
var favorites songs
if _, err := toml.Decode(blob, &favorites); err != nil {
log.Fatal(err)
}
for _, s := range favorites.Song {
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
}
```
And you'll also need a `duration` type that satisfies the
`encoding.TextUnmarshaler` interface:
```go
type duration struct {
time.Duration
}
func (d *duration) UnmarshalText(text []byte) error {
var err error
d.Duration, err = time.ParseDuration(string(text))
return err
}
```
To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
a similar way.
### More complex usage
Here's an example of how to load the example from the official spec page:
```toml
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]
```
And the corresponding Go types are:
```go
type tomlConfig struct {
Title string
Owner ownerInfo
DB database `toml:"database"`
Servers map[string]server
Clients clients
}
type ownerInfo struct {
Name string
Org string `toml:"organization"`
Bio string
DOB time.Time
}
type database struct {
Server string
Ports []int
ConnMax int `toml:"connection_max"`
Enabled bool
}
type server struct {
IP string
DC string
}
type clients struct {
Data [][]interface{}
Hosts []string
}
```
Note that a case insensitive match will be tried if an exact match can't be
found.
A working example of the above can be found in `_examples/example.{go,toml}`.

Some files were not shown because too many files have changed in this diff Show More