Compare commits
114 Commits
kiloio-git
...
main
Author | SHA1 | Date | |
---|---|---|---|
|
b187f32407 | ||
|
d29f930203 | ||
|
a9d5883a3a | ||
|
1921c6a212 | ||
|
cb238c85a1 | ||
|
2b1b55610d | ||
|
cd4a1ee4e0 | ||
|
0f0b0bda13 | ||
|
37b3cf1fc8 | ||
|
e328646617 | ||
|
6ebc914354 | ||
|
4be792ea54 | ||
|
50fbc2eec2 | ||
|
93f46e03ea | ||
|
59ed36e81f | ||
|
0820a9d32f | ||
|
7aeaa855e7 | ||
|
01bf238799 | ||
|
37a5aef6ea | ||
|
5424c5eb55 | ||
|
213688fd7d | ||
|
3eaacc01ae | ||
|
e20d13ace0 | ||
|
0ddeea3d78 | ||
|
bbc4fe30a6 | ||
|
7291a3bd71 | ||
|
826593d6ba | ||
|
6491d7b87f | ||
|
d04da92a23 | ||
|
fc741bf444 | ||
|
8afe1bea53 | ||
|
112772d02d | ||
|
a385f1ac82 | ||
|
1f19133ea8 | ||
|
7985ed5091 | ||
|
19c13b7401 | ||
|
3e6818d0b3 | ||
|
8cadff2b79 | ||
|
6862274e8e | ||
|
a02542b529 | ||
|
7dbbf52e1c | ||
|
9a9131d965 | ||
|
a6d50a8046 | ||
|
d47bb4f587 | ||
|
206b078c5f | ||
|
7c5f9ecc40 | ||
|
69fb81bcd3 | ||
|
c00cf69b55 | ||
|
0dfb744630 | ||
|
d95e590f5c | ||
|
d3710399f8 | ||
|
0eb9df178a | ||
|
e782d1be98 | ||
|
fb03520fb5 | ||
|
ed1e9ea400 | ||
|
df8d2cb68f | ||
|
38a5dd22e9 | ||
|
e598102f04 | ||
|
5de689ea1f | ||
|
887ea026bb | ||
|
75fb31a947 | ||
|
a1af9790ea | ||
|
96029a584f | ||
|
3bf7eacc7e | ||
|
6d6c62ae49 | ||
|
02d49ded39 | ||
|
3e7fe47131 | ||
|
038a6d7450 | ||
|
c4e3108549 | ||
|
6a696e03e7 | ||
|
797133f272 | ||
|
84da98c2b1 | ||
|
76047fe0af | ||
|
ee650342d5 | ||
|
1f8c736ba4 | ||
|
57a89b49ff | ||
|
6a5643287e | ||
|
e1a6ee9e2c | ||
|
ee480dece4 | ||
|
05e8ded744 | ||
|
ac65330c71 | ||
|
8a2c82267c | ||
|
fb70091169 | ||
|
f03a0bb247 | ||
|
bb3554a3c6 | ||
|
edb8f63848 | ||
|
bcb722b0b9 | ||
|
70b7eb52fa | ||
|
c59ac10e15 | ||
|
584a8bf13d | ||
|
b88ca7f8cd | ||
|
8f7894e598 | ||
|
3de4bf527b | ||
|
f90288133d | ||
|
70d2751030 | ||
|
9b14c227a9 | ||
|
e2745b453f | ||
|
a6eef5a8cf | ||
|
3174467751 | ||
|
df8d1aba5c | ||
|
c099a70c20 | ||
|
79e96bbe37 | ||
|
b9823943e3 | ||
|
c8ed21cac4 | ||
|
6b93cc2ad9 | ||
|
086b2e1ddd | ||
|
2b4487ba9a | ||
|
cad15d9961 | ||
|
9ec155b843 | ||
|
e886f5d24e | ||
|
acc3696057 | ||
|
288bb824aa | ||
|
6fe0beabcd | ||
|
0fbd33788e |
3
.dockerignore
Normal file
3
.dockerignore
Normal file
@ -0,0 +1,3 @@
|
||||
**
|
||||
|
||||
!/bin/linux
|
47
.github/workflows/ci.yml
vendored
47
.github/workflows/ci.yml
vendored
@ -6,13 +6,25 @@ on:
|
||||
tags:
|
||||
- "*"
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
|
||||
vendor:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18
|
||||
- name: Vendor
|
||||
run: |
|
||||
make vendor
|
||||
git diff --exit-code
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@ -20,10 +32,23 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
go-version: 1.18
|
||||
- name: Build
|
||||
run: make
|
||||
|
||||
docs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18
|
||||
- name: Build docs
|
||||
run: |
|
||||
make gen-docs
|
||||
git diff --exit-code
|
||||
|
||||
linux:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@ -31,7 +56,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
go-version: 1.18
|
||||
- name: Build kg and kgctl for all Linux Architectures
|
||||
run: make all-build
|
||||
|
||||
@ -42,7 +67,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
go-version: 1.18
|
||||
- name: Build kgctl for Darwin amd64
|
||||
run: make OS=darwin ARCH=amd64
|
||||
- name: Build kgctl for Darwin arm64
|
||||
@ -55,7 +80,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
go-version: 1.18
|
||||
- name: Build kgctl for Windows
|
||||
run: make OS=windows
|
||||
|
||||
@ -66,19 +91,18 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
go-version: 1.18
|
||||
- name: Run Unit Tests
|
||||
run: make unit
|
||||
|
||||
e2e:
|
||||
if: github.event_name == 'pull_request'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
go-version: 1.18
|
||||
- name: Run e2e Tests
|
||||
run: make e2e
|
||||
|
||||
@ -89,7 +113,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
go-version: 1.18
|
||||
- name: Lint Code
|
||||
run: make lint
|
||||
|
||||
@ -100,7 +124,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
go-version: 1.18
|
||||
- name: Enable Experimental Docker CLI
|
||||
run: |
|
||||
echo $'{\n "experimental": true\n}' | sudo tee /etc/docker/daemon.json
|
||||
@ -116,6 +140,7 @@ jobs:
|
||||
push:
|
||||
if: github.event_name != 'pull_request'
|
||||
needs:
|
||||
- vendor
|
||||
- build
|
||||
- linux
|
||||
- darwin
|
||||
@ -129,7 +154,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
go-version: 1.18
|
||||
- name: Enable Experimental Docker CLI
|
||||
run: |
|
||||
echo $'{\n "experimental": true\n}' | sudo tee /etc/docker/daemon.json
|
||||
|
6
.github/workflows/release.yaml
vendored
6
.github/workflows/release.yaml
vendored
@ -3,15 +3,15 @@ on:
|
||||
types: [created]
|
||||
name: Handle Release
|
||||
jobs:
|
||||
linux:
|
||||
kgctl:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.5
|
||||
- name: Make Directory with kgctl Binaries to Be Released
|
||||
go-version: 1.18
|
||||
- name: Build kgctl Binaries to Be Released
|
||||
run: make release
|
||||
- name: Publish Release
|
||||
uses: skx/github-action-publish-binaries@master
|
||||
|
@ -1,7 +1,7 @@
|
||||
ARG FROM=alpine
|
||||
FROM $FROM AS cni
|
||||
ARG GOARCH=amd64
|
||||
ARG CNI_PLUGINS_VERSION=v0.9.1
|
||||
ARG CNI_PLUGINS_VERSION=v1.1.1
|
||||
RUN apk add --no-cache curl && \
|
||||
curl -Lo cni.tar.gz https://github.com/containernetworking/plugins/releases/download/$CNI_PLUGINS_VERSION/cni-plugins-linux-$GOARCH-$CNI_PLUGINS_VERSION.tgz && \
|
||||
tar -xf cni.tar.gz
|
||||
@ -11,7 +11,9 @@ ARG GOARCH
|
||||
ARG ALPINE_VERSION=v3.12
|
||||
LABEL maintainer="squat <lserven@gmail.com>"
|
||||
RUN echo -e "https://alpine.global.ssl.fastly.net/alpine/$ALPINE_VERSION/main\nhttps://alpine.global.ssl.fastly.net/alpine/$ALPINE_VERSION/community" > /etc/apk/repositories && \
|
||||
apk add --no-cache ipset iptables ip6tables wireguard-tools graphviz font-noto
|
||||
apk add --no-cache ipset iptables ip6tables graphviz font-noto
|
||||
COPY --from=cni bridge host-local loopback portmap /opt/cni/bin/
|
||||
ADD https://raw.githubusercontent.com/kubernetes-sigs/iptables-wrappers/e139a115350974aac8a82ec4b815d2845f86997e/iptables-wrapper-installer.sh /
|
||||
RUN chmod 700 /iptables-wrapper-installer.sh && /iptables-wrapper-installer.sh --no-sanity-check
|
||||
COPY bin/linux/$GOARCH/kg /opt/bin/
|
||||
ENTRYPOINT ["/opt/bin/kg"]
|
||||
|
26
Makefile
26
Makefile
@ -38,15 +38,15 @@ DOCS_GEN_BINARY := bin/docs-gen
|
||||
DEEPCOPY_GEN_BINARY := bin/deepcopy-gen
|
||||
INFORMER_GEN_BINARY := bin/informer-gen
|
||||
LISTER_GEN_BINARY := bin/lister-gen
|
||||
GOLINT_BINARY := bin/golint
|
||||
STATICCHECK_BINARY := bin/staticcheck
|
||||
EMBEDMD_BINARY := bin/embedmd
|
||||
KIND_BINARY := $(shell pwd)/bin/kind
|
||||
KUBECTL_BINARY := $(shell pwd)/bin/kubectl
|
||||
BASH_UNIT := $(shell pwd)/bin/bash_unit
|
||||
BASH_UNIT_FLAGS :=
|
||||
|
||||
BUILD_IMAGE ?= golang:1.16.5-alpine
|
||||
BASE_IMAGE ?= alpine:3.13
|
||||
BUILD_IMAGE ?= golang:1.18.0
|
||||
BASE_IMAGE ?= alpine:3.15
|
||||
|
||||
build: $(BINS)
|
||||
|
||||
@ -81,7 +81,7 @@ crd: manifests/crds.yaml
|
||||
manifests/crds.yaml: pkg/k8s/apis/kilo/v1alpha1/types.go $(CONTROLLER_GEN_BINARY)
|
||||
$(CONTROLLER_GEN_BINARY) crd \
|
||||
paths=./pkg/k8s/apis/kilo/... \
|
||||
output:crd:stdout | tail -n +3 > $@
|
||||
output:crd:stdout > $@
|
||||
|
||||
client: pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/peer.go
|
||||
pkg/k8s/clientset/versioned/typed/kilo/v1alpha1/peer.go: .header pkg/k8s/apis/kilo/v1alpha1/types.go $(CLIENT_GEN_BINARY)
|
||||
@ -139,7 +139,7 @@ pkg/k8s/listers/kilo/v1alpha1/peer.go: .header pkg/k8s/apis/kilo/v1alpha1/types.
|
||||
rm -r github.com || true
|
||||
go fmt ./pkg/k8s/listers/...
|
||||
|
||||
gen-docs: generate docs/api.md
|
||||
gen-docs: generate docs/api.md docs/kg.md
|
||||
docs/api.md: pkg/k8s/apis/kilo/v1alpha1/types.go $(DOCS_GEN_BINARY)
|
||||
$(DOCS_GEN_BINARY) $< > $@
|
||||
|
||||
@ -165,7 +165,7 @@ fmt:
|
||||
@echo $(GO_PKGS)
|
||||
gofmt -w -s $(GO_FILES)
|
||||
|
||||
lint: header $(GOLINT_BINARY)
|
||||
lint: header $(STATICCHECK_BINARY)
|
||||
@echo 'go vet $(GO_PKGS)'
|
||||
@vet_res=$$(GO111MODULE=on go vet -mod=vendor $(GO_PKGS) 2>&1); if [ -n "$$vet_res" ]; then \
|
||||
echo ""; \
|
||||
@ -174,10 +174,10 @@ lint: header $(GOLINT_BINARY)
|
||||
echo "$$vet_res"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo '$(GOLINT_BINARY) $(GO_PKGS)'
|
||||
@lint_res=$$($(GOLINT_BINARY) $(GO_PKGS)); if [ -n "$$lint_res" ]; then \
|
||||
@echo '$(STATICCHECK_BINARY) $(GO_PKGS)'
|
||||
@lint_res=$$($(STATICCHECK_BINARY) $(GO_PKGS)); if [ -n "$$lint_res" ]; then \
|
||||
echo ""; \
|
||||
echo "Golint found style issues. Please check the reported issues"; \
|
||||
echo "Staticcheck found style issues. Please check the reported issues"; \
|
||||
echo "and fix them if necessary before submitting the code for review:"; \
|
||||
echo "$$lint_res"; \
|
||||
exit 1; \
|
||||
@ -209,7 +209,7 @@ $(BASH_UNIT):
|
||||
chmod +x $@
|
||||
|
||||
e2e: container $(KIND_BINARY) $(KUBECTL_BINARY) $(BASH_UNIT) bin/$(OS)/$(ARCH)/kgctl
|
||||
KILO_IMAGE=$(IMAGE):$(ARCH)-$(VERSION) KIND_BINARY=$(KIND_BINARY) KUBECTL_BINARY=$(KUBECTL_BINARY) KGCTL_BINARY=$(shell pwd)/bin/$(OS)/$(ARCH)/kgctl $(BASH_UNIT) $(BASH_UNIT_FLAGS) ./e2e/setup.sh ./e2e/full-mesh.sh ./e2e/location-mesh.sh ./e2e/multi-cluster.sh ./e2e/handlers.sh ./e2e/teardown.sh
|
||||
KILO_IMAGE=$(IMAGE):$(ARCH)-$(VERSION) KIND_BINARY=$(KIND_BINARY) KUBECTL_BINARY=$(KUBECTL_BINARY) KGCTL_BINARY=$(shell pwd)/bin/$(OS)/$(ARCH)/kgctl $(BASH_UNIT) $(BASH_UNIT_FLAGS) ./e2e/setup.sh ./e2e/full-mesh.sh ./e2e/location-mesh.sh ./e2e/multi-cluster.sh ./e2e/handlers.sh ./e2e/kgctl.sh ./e2e/teardown.sh
|
||||
|
||||
header: .header
|
||||
@HEADER=$$(cat .header); \
|
||||
@ -242,7 +242,7 @@ website/docs/README.md: README.md
|
||||
cat README.md >> $@
|
||||
cp -r docs/graphs website/static/img/
|
||||
sed -i 's/\.\/docs\///g' $@
|
||||
find $(@D) -type f -name '*.md' | xargs -I{} sed -i 's/\.\/\(.\+\.svg\)/\/img\/\1/g' {}
|
||||
find $(@D) -type f -name '*.md' | xargs -I{} sed -i 's/\.\/\(.\+\.\(svg\|png\)\)/\/img\/\1/g' {}
|
||||
sed -i 's/graphs\//\/img\/graphs\//g' $@
|
||||
# The next line is a workaround until mdx, docusaurus' markdown parser, can parse links with preceding brackets.
|
||||
sed -i 's/\[\]\(\[.*\](.*)\)/\[\]\1/g' website/docs/api.md
|
||||
@ -358,8 +358,8 @@ $(LISTER_GEN_BINARY):
|
||||
$(DOCS_GEN_BINARY): cmd/docs-gen/main.go
|
||||
go build -mod=vendor -o $@ ./cmd/docs-gen
|
||||
|
||||
$(GOLINT_BINARY):
|
||||
go build -mod=vendor -o $@ golang.org/x/lint/golint
|
||||
$(STATICCHECK_BINARY):
|
||||
go build -mod=vendor -o $@ honnef.co/go/tools/cmd/staticcheck
|
||||
|
||||
$(EMBEDMD_BINARY):
|
||||
go build -mod=vendor -o $@ github.com/campoy/embedmd
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2019 the Kilo authors
|
||||
// Copyright 2021 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -24,6 +24,8 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
|
||||
"github.com/squat/kilo/pkg/mesh"
|
||||
)
|
||||
|
||||
@ -62,7 +64,7 @@ func (h *graphHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
peers[p.Name] = p
|
||||
}
|
||||
}
|
||||
topo, err := mesh.NewTopology(nodes, peers, h.granularity, *h.hostname, 0, []byte{}, h.subnet, nodes[*h.hostname].PersistentKeepalive, nil)
|
||||
topo, err := mesh.NewTopology(nodes, peers, h.granularity, *h.hostname, 0, wgtypes.Key{}, h.subnet, nodes[*h.hostname].PersistentKeepalive, nil)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("failed to create topology: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
|
213
cmd/kg/main.go
213
cmd/kg/main.go
@ -1,4 +1,4 @@
|
||||
// Copyright 2019 the Kilo authors
|
||||
// Copyright 2021 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -15,6 +15,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
@ -27,10 +28,11 @@ import (
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/metalmatze/signal/internalserver"
|
||||
"github.com/oklog/run"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
flag "github.com/spf13/pflag"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/spf13/cobra"
|
||||
apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
@ -78,52 +80,79 @@ var (
|
||||
}, ", ")
|
||||
)
|
||||
|
||||
// Main is the principal function for the binary, wrapped only by `main` for convenience.
|
||||
func Main() error {
|
||||
backend := flag.String("backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends))
|
||||
cleanUpIface := flag.Bool("clean-up-interface", false, "Should Kilo delete its interface when it shuts down?")
|
||||
createIface := flag.Bool("create-interface", true, "Should kilo create an interface on startup?")
|
||||
cni := flag.Bool("cni", true, "Should Kilo manage the node's CNI configuration?")
|
||||
cniPath := flag.String("cni-path", mesh.DefaultCNIPath, "Path to CNI config.")
|
||||
compatibility := flag.String("compatibility", "", fmt.Sprintf("Should Kilo run in compatibility mode? Possible values: %s", availableCompatibilities))
|
||||
encapsulate := flag.String("encapsulate", string(encapsulation.Always), fmt.Sprintf("When should Kilo encapsulate packets within a location? Possible values: %s", availableEncapsulations))
|
||||
granularity := flag.String("mesh-granularity", string(mesh.LogicalGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", availableGranularities))
|
||||
kubeconfig := flag.String("kubeconfig", "", "Path to kubeconfig.")
|
||||
hostname := flag.String("hostname", "", "Hostname of the node on which this process is running.")
|
||||
iface := flag.String("interface", mesh.DefaultKiloInterface, "Name of the Kilo interface to use; if it does not exist, it will be created.")
|
||||
listen := flag.String("listen", ":1107", "The address at which to listen for health and metrics.")
|
||||
local := flag.Bool("local", true, "Should Kilo manage routes within a location?")
|
||||
logLevel := flag.String("log-level", logLevelInfo, fmt.Sprintf("Log level to use. Possible values: %s", availableLogLevels))
|
||||
master := flag.String("master", "", "The address of the Kubernetes API server (overrides any value in kubeconfig).")
|
||||
mtu := flag.Uint("mtu", wireguard.DefaultMTU, "The MTU of the WireGuard interface created by Kilo.")
|
||||
topologyLabel := flag.String("topology-label", k8s.RegionLabelKey, "Kubernetes node label used to group nodes into logical locations.")
|
||||
var port uint
|
||||
flag.UintVar(&port, "port", mesh.DefaultKiloPort, "The port over which WireGuard peers should communicate.")
|
||||
subnet := flag.String("subnet", mesh.DefaultKiloSubnet.String(), "CIDR from which to allocate addresses for WireGuard interfaces.")
|
||||
resyncPeriod := flag.Duration("resync-period", 30*time.Second, "How often should the Kilo controllers reconcile?")
|
||||
printVersion := flag.Bool("version", false, "Print version and exit")
|
||||
flag.Parse()
|
||||
var cmd = &cobra.Command{
|
||||
Use: "kg",
|
||||
Short: "kg is the Kilo agent",
|
||||
Long: `kg is the Kilo agent.
|
||||
It runs on every node of a cluster,
|
||||
setting up the public and private keys for the VPN
|
||||
as well as the necessary rules to route packets between locations.`,
|
||||
PreRunE: preRun,
|
||||
RunE: runRoot,
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
}
|
||||
|
||||
if *printVersion {
|
||||
fmt.Println(version.Version)
|
||||
return nil
|
||||
}
|
||||
var (
|
||||
backend string
|
||||
cleanUpIface bool
|
||||
createIface bool
|
||||
cni bool
|
||||
cniPath string
|
||||
compatibility string
|
||||
encapsulate string
|
||||
granularity string
|
||||
hostname string
|
||||
kubeconfig string
|
||||
iface string
|
||||
listen string
|
||||
local bool
|
||||
master string
|
||||
mtu uint
|
||||
topologyLabel string
|
||||
port int
|
||||
subnet string
|
||||
resyncPeriod time.Duration
|
||||
iptablesForwardRule bool
|
||||
prioritisePrivateAddr bool
|
||||
|
||||
_, s, err := net.ParseCIDR(*subnet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse %q as CIDR: %v", *subnet, err)
|
||||
}
|
||||
printVersion bool
|
||||
logLevel string
|
||||
|
||||
if *hostname == "" {
|
||||
var err error
|
||||
*hostname, err = os.Hostname()
|
||||
if *hostname == "" || err != nil {
|
||||
return errors.New("failed to determine hostname")
|
||||
}
|
||||
}
|
||||
logger log.Logger
|
||||
registry *prometheus.Registry
|
||||
)
|
||||
|
||||
logger := log.NewJSONLogger(log.NewSyncWriter(os.Stdout))
|
||||
switch *logLevel {
|
||||
func init() {
|
||||
cmd.Flags().StringVar(&backend, "backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends))
|
||||
cmd.Flags().BoolVar(&cleanUpIface, "clean-up-interface", false, "Should Kilo delete its interface when it shuts down?")
|
||||
cmd.Flags().BoolVar(&createIface, "create-interface", true, "Should kilo create an interface on startup?")
|
||||
cmd.Flags().BoolVar(&cni, "cni", true, "Should Kilo manage the node's CNI configuration?")
|
||||
cmd.Flags().StringVar(&cniPath, "cni-path", mesh.DefaultCNIPath, "Path to CNI config.")
|
||||
cmd.Flags().StringVar(&compatibility, "compatibility", "", fmt.Sprintf("Should Kilo run in compatibility mode? Possible values: %s", availableCompatibilities))
|
||||
cmd.Flags().StringVar(&encapsulate, "encapsulate", string(encapsulation.Always), fmt.Sprintf("When should Kilo encapsulate packets within a location? Possible values: %s", availableEncapsulations))
|
||||
cmd.Flags().StringVar(&granularity, "mesh-granularity", string(mesh.LogicalGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", availableGranularities))
|
||||
cmd.Flags().StringVar(&kubeconfig, "kubeconfig", "", "Path to kubeconfig.")
|
||||
cmd.Flags().StringVar(&hostname, "hostname", "", "Hostname of the node on which this process is running.")
|
||||
cmd.Flags().StringVar(&iface, "interface", mesh.DefaultKiloInterface, "Name of the Kilo interface to use; if it does not exist, it will be created.")
|
||||
cmd.Flags().StringVar(&listen, "listen", ":1107", "The address at which to listen for health and metrics.")
|
||||
cmd.Flags().BoolVar(&local, "local", true, "Should Kilo manage routes within a location?")
|
||||
cmd.Flags().StringVar(&master, "master", "", "The address of the Kubernetes API server (overrides any value in kubeconfig).")
|
||||
cmd.Flags().UintVar(&mtu, "mtu", wireguard.DefaultMTU, "The MTU of the WireGuard interface created by Kilo.")
|
||||
cmd.Flags().StringVar(&topologyLabel, "topology-label", k8s.RegionLabelKey, "Kubernetes node label used to group nodes into logical locations.")
|
||||
cmd.Flags().IntVar(&port, "port", mesh.DefaultKiloPort, "The port over which WireGuard peers should communicate.")
|
||||
cmd.Flags().StringVar(&subnet, "subnet", mesh.DefaultKiloSubnet.String(), "CIDR from which to allocate addresses for WireGuard interfaces.")
|
||||
cmd.Flags().DurationVar(&resyncPeriod, "resync-period", 30*time.Second, "How often should the Kilo controllers reconcile?")
|
||||
cmd.Flags().BoolVar(&iptablesForwardRule, "iptables-forward-rules", false, "Add default accept rules to the FORWARD chain in iptables. Warning: this may break firewalls with a deny all policy and is potentially insecure!")
|
||||
cmd.Flags().BoolVar(&prioritisePrivateAddr, "prioritise-private-addresses", false, "Prefer to assign a private IP address to the node's endpoint.")
|
||||
|
||||
cmd.PersistentFlags().BoolVar(&printVersion, "version", false, "Print version and exit")
|
||||
cmd.PersistentFlags().StringVar(&logLevel, "log-level", logLevelInfo, fmt.Sprintf("Log level to use. Possible values: %s", availableLogLevels))
|
||||
}
|
||||
|
||||
func preRun(_ *cobra.Command, _ []string) error {
|
||||
logger = log.NewJSONLogger(log.NewSyncWriter(os.Stdout))
|
||||
switch logLevel {
|
||||
case logLevelAll:
|
||||
logger = level.NewFilter(logger, level.AllowAll())
|
||||
case logLevelDebug:
|
||||
@ -137,77 +166,107 @@ func Main() error {
|
||||
case logLevelNone:
|
||||
logger = level.NewFilter(logger, level.AllowNone())
|
||||
default:
|
||||
return fmt.Errorf("log level %v unknown; possible values are: %s", *logLevel, availableLogLevels)
|
||||
return fmt.Errorf("log level %v unknown; possible values are: %s", logLevel, availableLogLevels)
|
||||
}
|
||||
logger = log.With(logger, "ts", log.DefaultTimestampUTC)
|
||||
logger = log.With(logger, "caller", log.DefaultCaller)
|
||||
|
||||
e := encapsulation.Strategy(*encapsulate)
|
||||
registry = prometheus.NewRegistry()
|
||||
registry.MustRegister(
|
||||
collectors.NewGoCollector(),
|
||||
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runRoot is the principal function for the binary.
|
||||
func runRoot(_ *cobra.Command, _ []string) error {
|
||||
if printVersion {
|
||||
fmt.Println(version.Version)
|
||||
return nil
|
||||
}
|
||||
|
||||
_, s, err := net.ParseCIDR(subnet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse %q as CIDR: %v", subnet, err)
|
||||
}
|
||||
|
||||
if hostname == "" {
|
||||
var err error
|
||||
hostname, err = os.Hostname()
|
||||
if hostname == "" || err != nil {
|
||||
return errors.New("failed to determine hostname")
|
||||
}
|
||||
}
|
||||
|
||||
e := encapsulation.Strategy(encapsulate)
|
||||
switch e {
|
||||
case encapsulation.Never:
|
||||
case encapsulation.CrossSubnet:
|
||||
case encapsulation.Always:
|
||||
default:
|
||||
return fmt.Errorf("encapsulation %v unknown; possible values are: %s", *encapsulate, availableEncapsulations)
|
||||
return fmt.Errorf("encapsulation %v unknown; possible values are: %s", encapsulate, availableEncapsulations)
|
||||
}
|
||||
|
||||
var enc encapsulation.Encapsulator
|
||||
switch *compatibility {
|
||||
switch compatibility {
|
||||
case "flannel":
|
||||
enc = encapsulation.NewFlannel(e)
|
||||
case "cilium":
|
||||
enc = encapsulation.NewCilium(e)
|
||||
default:
|
||||
enc = encapsulation.NewIPIP(e)
|
||||
}
|
||||
|
||||
gr := mesh.Granularity(*granularity)
|
||||
gr := mesh.Granularity(granularity)
|
||||
switch gr {
|
||||
case mesh.LogicalGranularity:
|
||||
case mesh.FullGranularity:
|
||||
default:
|
||||
return fmt.Errorf("mesh granularity %v unknown; possible values are: %s", *granularity, availableGranularities)
|
||||
return fmt.Errorf("mesh granularity %v unknown; possible values are: %s", granularity, availableGranularities)
|
||||
}
|
||||
|
||||
var b mesh.Backend
|
||||
switch *backend {
|
||||
switch backend {
|
||||
case k8s.Backend:
|
||||
config, err := clientcmd.BuildConfigFromFlags(*master, *kubeconfig)
|
||||
config, err := clientcmd.BuildConfigFromFlags(master, kubeconfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Kubernetes config: %v", err)
|
||||
}
|
||||
c := kubernetes.NewForConfigOrDie(config)
|
||||
kc := kiloclient.NewForConfigOrDie(config)
|
||||
ec := apiextensions.NewForConfigOrDie(config)
|
||||
b = k8s.New(c, kc, ec, *topologyLabel)
|
||||
b = k8s.New(c, kc, ec, topologyLabel, log.With(logger, "component", "k8s backend"))
|
||||
default:
|
||||
return fmt.Errorf("backend %v unknown; possible values are: %s", *backend, availableBackends)
|
||||
return fmt.Errorf("backend %v unknown; possible values are: %s", backend, availableBackends)
|
||||
}
|
||||
|
||||
m, err := mesh.New(b, enc, gr, *hostname, uint32(port), s, *local, *cni, *cniPath, *iface, *cleanUpIface, *createIface, *mtu, *resyncPeriod, log.With(logger, "component", "kilo"))
|
||||
if port < 1 || port > 1<<16-1 {
|
||||
return fmt.Errorf("invalid port: port mus be in range [%d:%d], but got %d", 1, 1<<16-1, port)
|
||||
}
|
||||
m, err := mesh.New(b, enc, gr, hostname, port, s, local, cni, cniPath, iface, cleanUpIface, createIface, mtu, resyncPeriod, prioritisePrivateAddr, iptablesForwardRule, log.With(logger, "component", "kilo"), registry)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Kilo mesh: %v", err)
|
||||
}
|
||||
|
||||
r := prometheus.NewRegistry()
|
||||
r.MustRegister(
|
||||
prometheus.NewGoCollector(),
|
||||
prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}),
|
||||
)
|
||||
m.RegisterMetrics(r)
|
||||
|
||||
var g run.Group
|
||||
{
|
||||
h := internalserver.NewHandler(
|
||||
internalserver.WithName("Internal Kilo API"),
|
||||
internalserver.WithPrometheusRegistry(registry),
|
||||
internalserver.WithPProf(),
|
||||
)
|
||||
h.AddEndpoint("/health", "Exposes health checks", healthHandler)
|
||||
h.AddEndpoint("/graph", "Exposes Kilo mesh topology graph", (&graphHandler{m, gr, &hostname, s}).ServeHTTP)
|
||||
// Run the HTTP server.
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/health", healthHandler)
|
||||
mux.Handle("/graph", &graphHandler{m, gr, hostname, s})
|
||||
mux.Handle("/metrics", promhttp.HandlerFor(r, promhttp.HandlerOpts{}))
|
||||
l, err := net.Listen("tcp", *listen)
|
||||
l, err := net.Listen("tcp", listen)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to listen on %s: %v", *listen, err)
|
||||
return fmt.Errorf("failed to listen on %s: %v", listen, err)
|
||||
}
|
||||
|
||||
g.Add(func() error {
|
||||
if err := http.Serve(l, mux); err != nil && err != http.ErrServerClosed {
|
||||
if err := http.Serve(l, h); err != nil && err != http.ErrServerClosed {
|
||||
return fmt.Errorf("error: server exited unexpectedly: %v", err)
|
||||
}
|
||||
return nil
|
||||
@ -217,15 +276,16 @@ func Main() error {
|
||||
}
|
||||
|
||||
{
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
// Start the mesh.
|
||||
g.Add(func() error {
|
||||
logger.Log("msg", fmt.Sprintf("Starting Kilo network mesh '%v'.", version.Version))
|
||||
if err := m.Run(); err != nil {
|
||||
if err := m.Run(ctx); err != nil {
|
||||
return fmt.Errorf("error: Kilo exited unexpectedly: %v", err)
|
||||
}
|
||||
return nil
|
||||
}, func(error) {
|
||||
m.Stop()
|
||||
cancel()
|
||||
})
|
||||
}
|
||||
|
||||
@ -252,8 +312,15 @@ func Main() error {
|
||||
return g.Run()
|
||||
}
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print the version and exit.",
|
||||
Run: func(_ *cobra.Command, _ []string) { fmt.Println(version.Version) },
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := Main(); err != nil {
|
||||
cmd.AddCommand(webhookCmd, versionCmd)
|
||||
if err := cmd.Execute(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
273
cmd/kg/webhook.go
Normal file
273
cmd/kg/webhook.go
Normal file
@ -0,0 +1,273 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/oklog/run"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/api/admission/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
|
||||
kilo "github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1"
|
||||
"github.com/squat/kilo/pkg/version"
|
||||
)
|
||||
|
||||
var webhookCmd = &cobra.Command{
|
||||
Use: "webhook",
|
||||
PreRunE: func(c *cobra.Command, a []string) error {
|
||||
if c.HasParent() {
|
||||
return c.Parent().PreRunE(c, a)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Short: "webhook starts a HTTPS server to validate updates and creations of Kilo peers.",
|
||||
RunE: webhook,
|
||||
}
|
||||
|
||||
var (
|
||||
certPath string
|
||||
keyPath string
|
||||
metricsAddr string
|
||||
listenAddr string
|
||||
)
|
||||
|
||||
func init() {
|
||||
webhookCmd.Flags().StringVar(&certPath, "cert-file", "", "The path to a certificate file")
|
||||
webhookCmd.Flags().StringVar(&keyPath, "key-file", "", "The path to a key file")
|
||||
webhookCmd.Flags().StringVar(&metricsAddr, "listen-metrics", ":1107", "The metrics server will be listening to that address")
|
||||
webhookCmd.Flags().StringVar(&listenAddr, "listen", ":8443", "The webhook server will be listening to that address")
|
||||
}
|
||||
|
||||
var deserializer = serializer.NewCodecFactory(runtime.NewScheme()).UniversalDeserializer()
|
||||
|
||||
var (
|
||||
validationCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "admission_requests_total",
|
||||
Help: "The number of received admission reviews requests",
|
||||
},
|
||||
[]string{"operation", "response"},
|
||||
)
|
||||
requestCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_requests_total",
|
||||
Help: "The number of received http requests",
|
||||
},
|
||||
[]string{"handler", "method"},
|
||||
)
|
||||
errorCounter = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "errors_total",
|
||||
Help: "The total number of errors",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func validationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
level.Debug(logger).Log("msg", "handling request", "source", r.RemoteAddr)
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
errorCounter.Inc()
|
||||
level.Error(logger).Log("err", "failed to parse body from incoming request", "source", r.RemoteAddr)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var admissionReview v1.AdmissionReview
|
||||
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType != "application/json" {
|
||||
errorCounter.Inc()
|
||||
msg := fmt.Sprintf("received Content-Type=%s, expected application/json", contentType)
|
||||
level.Error(logger).Log("err", msg)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
response := v1.AdmissionReview{}
|
||||
|
||||
_, gvk, err := deserializer.Decode(body, nil, &admissionReview)
|
||||
if err != nil {
|
||||
errorCounter.Inc()
|
||||
msg := fmt.Sprintf("Request could not be decoded: %v", err)
|
||||
level.Error(logger).Log("err", msg)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if *gvk != v1.SchemeGroupVersion.WithKind("AdmissionReview") {
|
||||
errorCounter.Inc()
|
||||
msg := "only API v1 is supported"
|
||||
level.Error(logger).Log("err", msg)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
response.SetGroupVersionKind(*gvk)
|
||||
response.Response = &v1.AdmissionResponse{
|
||||
UID: admissionReview.Request.UID,
|
||||
}
|
||||
|
||||
rawExtension := admissionReview.Request.Object
|
||||
var peer kilo.Peer
|
||||
|
||||
if err := json.Unmarshal(rawExtension.Raw, &peer); err != nil {
|
||||
errorCounter.Inc()
|
||||
msg := fmt.Sprintf("could not unmarshal extension to peer spec: %v:", err)
|
||||
level.Error(logger).Log("err", msg)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := peer.Validate(); err == nil {
|
||||
level.Debug(logger).Log("msg", "got valid peer spec", "spec", peer.Spec, "name", peer.ObjectMeta.Name)
|
||||
validationCounter.With(prometheus.Labels{"operation": string(admissionReview.Request.Operation), "response": "allowed"}).Inc()
|
||||
response.Response.Allowed = true
|
||||
} else {
|
||||
level.Debug(logger).Log("msg", "got invalid peer spec", "spec", peer.Spec, "name", peer.ObjectMeta.Name)
|
||||
validationCounter.With(prometheus.Labels{"operation": string(admissionReview.Request.Operation), "response": "denied"}).Inc()
|
||||
response.Response.Result = &metav1.Status{
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
res, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
errorCounter.Inc()
|
||||
msg := fmt.Sprintf("failed to marshal response: %v", err)
|
||||
level.Error(logger).Log("err", msg)
|
||||
http.Error(w, msg, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if _, err := w.Write(res); err != nil {
|
||||
level.Error(logger).Log("err", err, "msg", "failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
func metricsMiddleWare(path string, next func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
requestCounter.With(prometheus.Labels{"method": r.Method, "handler": path}).Inc()
|
||||
next(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func webhook(_ *cobra.Command, _ []string) error {
|
||||
if printVersion {
|
||||
fmt.Println(version.Version)
|
||||
os.Exit(0)
|
||||
}
|
||||
registry.MustRegister(
|
||||
errorCounter,
|
||||
validationCounter,
|
||||
requestCounter,
|
||||
)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer func() {
|
||||
cancel()
|
||||
}()
|
||||
var g run.Group
|
||||
g.Add(run.SignalHandler(ctx, syscall.SIGINT, syscall.SIGTERM))
|
||||
{
|
||||
mm := http.NewServeMux()
|
||||
mm.Handle("/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{}))
|
||||
msrv := &http.Server{
|
||||
Addr: metricsAddr,
|
||||
Handler: mm,
|
||||
}
|
||||
|
||||
g.Add(
|
||||
func() error {
|
||||
level.Info(logger).Log("msg", "starting metrics server", "address", msrv.Addr)
|
||||
err := msrv.ListenAndServe()
|
||||
level.Info(logger).Log("msg", "metrics server exited", "err", err)
|
||||
return err
|
||||
|
||||
},
|
||||
func(err error) {
|
||||
var serr run.SignalError
|
||||
if ok := errors.As(err, &serr); ok {
|
||||
level.Info(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
|
||||
} else {
|
||||
level.Error(logger).Log("msg", "received error", "err", err.Error())
|
||||
}
|
||||
level.Info(logger).Log("msg", "shutting down metrics server gracefully")
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer func() {
|
||||
cancel()
|
||||
}()
|
||||
if err := msrv.Shutdown(ctx); err != nil {
|
||||
level.Error(logger).Log("msg", "failed to shut down metrics server gracefully", "err", err.Error())
|
||||
msrv.Close()
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
{
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/validate", metricsMiddleWare("/validate", validationHandler))
|
||||
srv := &http.Server{
|
||||
Addr: listenAddr,
|
||||
Handler: mux,
|
||||
}
|
||||
g.Add(
|
||||
func() error {
|
||||
level.Info(logger).Log("msg", "starting webhook server", "address", srv.Addr)
|
||||
err := srv.ListenAndServeTLS(certPath, keyPath)
|
||||
level.Info(logger).Log("msg", "webhook server exited", "err", err)
|
||||
return err
|
||||
},
|
||||
func(err error) {
|
||||
var serr run.SignalError
|
||||
if ok := errors.As(err, &serr); ok {
|
||||
level.Info(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
|
||||
} else {
|
||||
level.Error(logger).Log("msg", "received error", "err", err.Error())
|
||||
}
|
||||
level.Info(logger).Log("msg", "shutting down webhook server gracefully")
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer func() {
|
||||
cancel()
|
||||
}()
|
||||
if err := srv.Shutdown(ctx); err != nil {
|
||||
level.Error(logger).Log("msg", "failed to shut down webhook server gracefully", "err", err.Error())
|
||||
srv.Close()
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
err := g.Run()
|
||||
var serr run.SignalError
|
||||
if ok := errors.As(err, &serr); ok {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
372
cmd/kgctl/connect_linux.go
Normal file
372
cmd/kgctl/connect_linux.go
Normal file
@ -0,0 +1,372 @@
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/oklog/run"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.zx2c4.com/wireguard/wgctrl"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/squat/kilo/pkg/iproute"
|
||||
"github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1"
|
||||
"github.com/squat/kilo/pkg/mesh"
|
||||
"github.com/squat/kilo/pkg/route"
|
||||
"github.com/squat/kilo/pkg/wireguard"
|
||||
)
|
||||
|
||||
var (
|
||||
logLevel string
|
||||
connectOpts struct {
|
||||
allowedIP net.IPNet
|
||||
allowedIPs []net.IPNet
|
||||
privateKey string
|
||||
cleanUp bool
|
||||
mtu uint
|
||||
resyncPeriod time.Duration
|
||||
interfaceName string
|
||||
persistentKeepalive int
|
||||
}
|
||||
)
|
||||
|
||||
func takeIPNet(_ net.IP, i *net.IPNet, err error) *net.IPNet {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func connect() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "connect",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runConnect,
|
||||
Short: "connect to a Kilo cluster as a peer over WireGuard",
|
||||
SilenceUsage: true,
|
||||
}
|
||||
cmd.Flags().IPNetVarP(&connectOpts.allowedIP, "allowed-ip", "a", *takeIPNet(net.ParseCIDR("10.10.10.10/32")), "Allowed IP of the peer.")
|
||||
cmd.Flags().StringSliceVar(&allowedIPs, "allowed-ips", []string{}, "Additional allowed IPs of the cluster, e.g. the service CIDR.")
|
||||
cmd.Flags().StringVar(&logLevel, "log-level", logLevelInfo, fmt.Sprintf("Log level to use. Possible values: %s", availableLogLevels))
|
||||
cmd.Flags().StringVar(&connectOpts.privateKey, "private-key", "", "Path to an existing WireGuard private key file.")
|
||||
cmd.Flags().BoolVar(&connectOpts.cleanUp, "clean-up", true, "Should Kilo clean up the routes and interface when it shuts down?")
|
||||
cmd.Flags().UintVar(&connectOpts.mtu, "mtu", uint(1420), "The MTU for the WireGuard interface.")
|
||||
cmd.Flags().DurationVar(&connectOpts.resyncPeriod, "resync-period", 30*time.Second, "How often should Kilo reconcile?")
|
||||
cmd.Flags().StringVarP(&connectOpts.interfaceName, "interface", "i", mesh.DefaultKiloInterface, "Name of the Kilo interface to use; if it does not exist, it will be created.")
|
||||
cmd.Flags().IntVar(&connectOpts.persistentKeepalive, "persistent-keepalive", 10, "How often should WireGuard send keepalives? Setting to 0 will disable sending keepalives.")
|
||||
|
||||
availableLogLevels = strings.Join([]string{
|
||||
logLevelAll,
|
||||
logLevelDebug,
|
||||
logLevelInfo,
|
||||
logLevelWarn,
|
||||
logLevelError,
|
||||
logLevelNone,
|
||||
}, ", ")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runConnect(cmd *cobra.Command, args []string) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
logger := log.NewJSONLogger(log.NewSyncWriter(os.Stdout))
|
||||
switch logLevel {
|
||||
case logLevelAll:
|
||||
logger = level.NewFilter(logger, level.AllowAll())
|
||||
case logLevelDebug:
|
||||
logger = level.NewFilter(logger, level.AllowDebug())
|
||||
case logLevelInfo:
|
||||
logger = level.NewFilter(logger, level.AllowInfo())
|
||||
case logLevelWarn:
|
||||
logger = level.NewFilter(logger, level.AllowWarn())
|
||||
case logLevelError:
|
||||
logger = level.NewFilter(logger, level.AllowError())
|
||||
case logLevelNone:
|
||||
logger = level.NewFilter(logger, level.AllowNone())
|
||||
default:
|
||||
return fmt.Errorf("log level %s unknown; possible values are: %s", logLevel, availableLogLevels)
|
||||
}
|
||||
logger = log.With(logger, "ts", log.DefaultTimestampUTC)
|
||||
logger = log.With(logger, "caller", log.DefaultCaller)
|
||||
peerName := args[0]
|
||||
|
||||
for i := range allowedIPs {
|
||||
_, aip, err := net.ParseCIDR(allowedIPs[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
connectOpts.allowedIPs = append(connectOpts.allowedIPs, *aip)
|
||||
}
|
||||
|
||||
var privateKey wgtypes.Key
|
||||
var err error
|
||||
if connectOpts.privateKey == "" {
|
||||
privateKey, err = wgtypes.GeneratePrivateKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate private key: %w", err)
|
||||
}
|
||||
} else {
|
||||
raw, err := os.ReadFile(connectOpts.privateKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read private key: %w", err)
|
||||
}
|
||||
privateKey, err = wgtypes.ParseKey(string(raw))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse private key: %w", err)
|
||||
}
|
||||
}
|
||||
publicKey := privateKey.PublicKey()
|
||||
level.Info(logger).Log("msg", "generated public key", "key", publicKey)
|
||||
|
||||
if _, err := opts.kc.KiloV1alpha1().Peers().Get(ctx, peerName, metav1.GetOptions{}); apierrors.IsNotFound(err) {
|
||||
peer := &v1alpha1.Peer{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: peerName,
|
||||
},
|
||||
Spec: v1alpha1.PeerSpec{
|
||||
AllowedIPs: []string{connectOpts.allowedIP.String()},
|
||||
PersistentKeepalive: connectOpts.persistentKeepalive,
|
||||
PublicKey: publicKey.String(),
|
||||
},
|
||||
}
|
||||
if _, err := opts.kc.KiloV1alpha1().Peers().Create(ctx, peer, metav1.CreateOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to create peer: %w", err)
|
||||
}
|
||||
level.Info(logger).Log("msg", "created peer", "peer", peerName)
|
||||
if connectOpts.cleanUp {
|
||||
defer func() {
|
||||
ctxWithTimeout, cancelWithTimeout := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancelWithTimeout()
|
||||
if err := opts.kc.KiloV1alpha1().Peers().Delete(ctxWithTimeout, peerName, metav1.DeleteOptions{}); err != nil {
|
||||
level.Error(logger).Log("err", fmt.Sprintf("failed to delete peer: %v", err))
|
||||
} else {
|
||||
level.Info(logger).Log("msg", "deleted peer", "peer", peerName)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("failed to get peer: %w", err)
|
||||
}
|
||||
|
||||
iface, _, err := wireguard.New(connectOpts.interfaceName, connectOpts.mtu)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create wg interface: %w", err)
|
||||
}
|
||||
level.Info(logger).Log("msg", "created WireGuard interface", "name", connectOpts.interfaceName, "index", iface)
|
||||
|
||||
table := route.NewTable()
|
||||
if connectOpts.cleanUp {
|
||||
defer cleanUp(iface, table, logger)
|
||||
}
|
||||
|
||||
if err := iproute.SetAddress(iface, &connectOpts.allowedIP); err != nil {
|
||||
return err
|
||||
}
|
||||
level.Info(logger).Log("msg", "set IP address of WireGuard interface", "IP", connectOpts.allowedIP.String())
|
||||
|
||||
if err := iproute.Set(iface, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var g run.Group
|
||||
g.Add(run.SignalHandler(ctx, syscall.SIGINT, syscall.SIGTERM))
|
||||
|
||||
{
|
||||
g.Add(
|
||||
func() error {
|
||||
errCh, err := table.Run(ctx.Done())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to watch for route table updates: %w", err)
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case err, ok := <-errCh:
|
||||
if ok {
|
||||
level.Error(logger).Log("err", err.Error())
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
},
|
||||
func(err error) {
|
||||
cancel()
|
||||
var serr run.SignalError
|
||||
if ok := errors.As(err, &serr); ok {
|
||||
level.Debug(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
|
||||
} else {
|
||||
level.Error(logger).Log("msg", "received error", "err", err.Error())
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
{
|
||||
g.Add(
|
||||
func() error {
|
||||
level.Info(logger).Log("msg", "starting syncer")
|
||||
for {
|
||||
if err := sync(table, peerName, privateKey, iface, logger); err != nil {
|
||||
level.Error(logger).Log("msg", "failed to sync", "err", err.Error())
|
||||
}
|
||||
select {
|
||||
case <-time.After(connectOpts.resyncPeriod):
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}, func(err error) {
|
||||
cancel()
|
||||
var serr run.SignalError
|
||||
if ok := errors.As(err, &serr); ok {
|
||||
level.Debug(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
|
||||
} else {
|
||||
level.Error(logger).Log("msg", "received error", "err", err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
err = g.Run()
|
||||
var serr run.SignalError
|
||||
if ok := errors.As(err, &serr); ok {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func cleanUp(iface int, t *route.Table, logger log.Logger) {
|
||||
if err := iproute.Set(iface, false); err != nil {
|
||||
level.Error(logger).Log("err", fmt.Sprintf("failed to set WireGuard interface down: %v", err))
|
||||
}
|
||||
if err := iproute.RemoveInterface(iface); err != nil {
|
||||
level.Error(logger).Log("err", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
|
||||
}
|
||||
if err := t.CleanUp(); err != nil {
|
||||
level.Error(logger).Log("failed to clean up routes: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func sync(table *route.Table, peerName string, privateKey wgtypes.Key, iface int, logger log.Logger) error {
|
||||
ns, err := opts.backend.Nodes().List()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list nodes: %w", err)
|
||||
}
|
||||
for _, n := range ns {
|
||||
_, err := n.Endpoint.UDPAddr(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ps, err := opts.backend.Peers().List()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list peers: %w", err)
|
||||
}
|
||||
// Obtain the Granularity by looking at the annotation of the first node.
|
||||
if opts.granularity, err = determineGranularity(opts.granularity, ns); err != nil {
|
||||
return fmt.Errorf("failed to determine granularity: %w", err)
|
||||
}
|
||||
var hostname string
|
||||
var subnet *net.IPNet
|
||||
nodes := make(map[string]*mesh.Node)
|
||||
var nodeNames []string
|
||||
for _, n := range ns {
|
||||
if n.Ready() {
|
||||
nodes[n.Name] = n
|
||||
hostname = n.Name
|
||||
nodeNames = append(nodeNames, n.Name)
|
||||
}
|
||||
if n.WireGuardIP != nil && subnet == nil {
|
||||
subnet = n.WireGuardIP
|
||||
}
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return errors.New("did not find any valid Kilo nodes in the cluster")
|
||||
}
|
||||
if subnet == nil {
|
||||
return errors.New("did not find a valid Kilo subnet on any node")
|
||||
}
|
||||
subnet.IP = subnet.IP.Mask(subnet.Mask)
|
||||
sort.Strings(nodeNames)
|
||||
nodes[nodeNames[0]].AllowedLocationIPs = append(nodes[nodeNames[0]].AllowedLocationIPs, connectOpts.allowedIPs...)
|
||||
peers := make(map[string]*mesh.Peer)
|
||||
for _, p := range ps {
|
||||
if p.Ready() {
|
||||
peers[p.Name] = p
|
||||
}
|
||||
}
|
||||
if _, ok := peers[peerName]; !ok {
|
||||
return fmt.Errorf("did not find any peer named %q in the cluster", peerName)
|
||||
}
|
||||
|
||||
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, opts.port, wgtypes.Key{}, subnet, *peers[peerName].PersistentKeepaliveInterval, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create topology: %w", err)
|
||||
}
|
||||
conf := t.PeerConf(peerName)
|
||||
conf.PrivateKey = &privateKey
|
||||
conf.ListenPort = &opts.port
|
||||
|
||||
wgClient, err := wgctrl.New()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer wgClient.Close()
|
||||
|
||||
current, err := wgClient.Device(connectOpts.interfaceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var equal bool
|
||||
var diff string
|
||||
equal, diff = conf.Equal(current)
|
||||
if !equal {
|
||||
// If the key is empty, then it's the first time we are running
|
||||
// so don't bother printing a diff.
|
||||
if current.PrivateKey != [wgtypes.KeyLen]byte{} {
|
||||
level.Info(logger).Log("msg", "WireGuard configurations are different", "diff", diff)
|
||||
}
|
||||
level.Debug(logger).Log("msg", "setting WireGuard config", "config", conf.WGConfig())
|
||||
if err := wgClient.ConfigureDevice(connectOpts.interfaceName, conf.WGConfig()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := table.Set(t.PeerRoutes(peerName, iface, connectOpts.allowedIPs)); err != nil {
|
||||
return fmt.Errorf("failed to update route table: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2015 CNI authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -12,16 +12,24 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ip
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"net"
|
||||
"errors"
|
||||
|
||||
"github.com/vishvananda/netlink"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// AddDefaultRoute sets the default route on the given gateway.
|
||||
func AddDefaultRoute(gw net.IP, dev netlink.Link) error {
|
||||
_, defNet, _ := net.ParseCIDR("0.0.0.0/0")
|
||||
return AddRoute(defNet, gw, dev)
|
||||
func connect() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "connect",
|
||||
Short: "not supporred on this OS",
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
return errors.New("this command is not supported on this OS")
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
@ -18,6 +18,8 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
|
||||
"github.com/squat/kilo/pkg/mesh"
|
||||
)
|
||||
|
||||
@ -32,15 +34,15 @@ func graph() *cobra.Command {
|
||||
func runGraph(_ *cobra.Command, _ []string) error {
|
||||
ns, err := opts.backend.Nodes().List()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list nodes: %v", err)
|
||||
return fmt.Errorf("failed to list nodes: %w", err)
|
||||
}
|
||||
ps, err := opts.backend.Peers().List()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list peers: %v", err)
|
||||
return fmt.Errorf("failed to list peers: %w", err)
|
||||
}
|
||||
// Obtain the Granularity by looking at the annotation of the first node.
|
||||
if opts.granularity, err = optainGranularity(opts.granularity, ns); err != nil {
|
||||
return fmt.Errorf("failed to obtain granularity: %w", err)
|
||||
if opts.granularity, err = determineGranularity(opts.granularity, ns); err != nil {
|
||||
return fmt.Errorf("failed to determine granularity: %w", err)
|
||||
}
|
||||
|
||||
var hostname string
|
||||
@ -65,13 +67,13 @@ func runGraph(_ *cobra.Command, _ []string) error {
|
||||
peers[p.Name] = p
|
||||
}
|
||||
}
|
||||
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, 0, []byte{}, subnet, nodes[hostname].PersistentKeepalive, nil)
|
||||
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, 0, wgtypes.Key{}, subnet, nodes[hostname].PersistentKeepalive, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create topology: %v", err)
|
||||
return fmt.Errorf("failed to create topology: %w", err)
|
||||
}
|
||||
g, err := t.Dot()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate graph: %v", err)
|
||||
return fmt.Errorf("failed to generate graph: %w", err)
|
||||
}
|
||||
fmt.Println(g)
|
||||
return nil
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/spf13/cobra"
|
||||
apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@ -61,7 +62,8 @@ var (
|
||||
opts struct {
|
||||
backend mesh.Backend
|
||||
granularity mesh.Granularity
|
||||
port uint32
|
||||
kc kiloclient.Interface
|
||||
port int
|
||||
}
|
||||
backend string
|
||||
granularity string
|
||||
@ -69,36 +71,40 @@ var (
|
||||
topologyLabel string
|
||||
)
|
||||
|
||||
func runRoot(_ *cobra.Command, _ []string) error {
|
||||
func runRoot(c *cobra.Command, _ []string) error {
|
||||
if opts.port < 1 || opts.port > 1<<16-1 {
|
||||
return fmt.Errorf("invalid port: port mus be in range [%d:%d], but got %d", 1, 1<<16-1, opts.port)
|
||||
}
|
||||
|
||||
opts.granularity = mesh.Granularity(granularity)
|
||||
switch opts.granularity {
|
||||
case mesh.LogicalGranularity:
|
||||
case mesh.FullGranularity:
|
||||
case mesh.AutoGranularity:
|
||||
default:
|
||||
return fmt.Errorf("mesh granularity %v unknown; posible values are: %s", granularity, availableGranularities)
|
||||
return fmt.Errorf("mesh granularity %s unknown; posible values are: %s", granularity, availableGranularities)
|
||||
}
|
||||
|
||||
switch backend {
|
||||
case k8s.Backend:
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Kubernetes config: %v", err)
|
||||
return fmt.Errorf("failed to create Kubernetes config: %w", err)
|
||||
}
|
||||
c := kubernetes.NewForConfigOrDie(config)
|
||||
kc := kiloclient.NewForConfigOrDie(config)
|
||||
opts.kc = kiloclient.NewForConfigOrDie(config)
|
||||
ec := apiextensions.NewForConfigOrDie(config)
|
||||
opts.backend = k8s.New(c, kc, ec, topologyLabel)
|
||||
opts.backend = k8s.New(c, opts.kc, ec, topologyLabel, log.NewNopLogger())
|
||||
default:
|
||||
return fmt.Errorf("backend %v unknown; posible values are: %s", backend, availableBackends)
|
||||
return fmt.Errorf("backend %s unknown; posible values are: %s", backend, availableBackends)
|
||||
}
|
||||
|
||||
if err := opts.backend.Nodes().Init(make(chan struct{})); err != nil {
|
||||
return fmt.Errorf("failed to initialize node backend: %v", err)
|
||||
if err := opts.backend.Nodes().Init(c.Context()); err != nil {
|
||||
return fmt.Errorf("failed to initialize node backend: %w", err)
|
||||
}
|
||||
|
||||
if err := opts.backend.Peers().Init(make(chan struct{})); err != nil {
|
||||
return fmt.Errorf("failed to initialize peer backend: %v", err)
|
||||
if err := opts.backend.Peers().Init(c.Context()); err != nil {
|
||||
return fmt.Errorf("failed to initialize peer backend: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -110,6 +116,7 @@ func main() {
|
||||
Long: "",
|
||||
PersistentPreRunE: runRoot,
|
||||
Version: version.Version,
|
||||
SilenceErrors: true,
|
||||
}
|
||||
cmd.PersistentFlags().StringVar(&backend, "backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends))
|
||||
cmd.PersistentFlags().StringVar(&granularity, "mesh-granularity", string(mesh.AutoGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", availableGranularities))
|
||||
@ -118,12 +125,13 @@ func main() {
|
||||
defaultKubeconfig = filepath.Join(os.Getenv("HOME"), ".kube/config")
|
||||
}
|
||||
cmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", defaultKubeconfig, "Path to kubeconfig.")
|
||||
cmd.PersistentFlags().Uint32Var(&opts.port, "port", mesh.DefaultKiloPort, "The WireGuard port over which the nodes communicate.")
|
||||
cmd.PersistentFlags().IntVar(&opts.port, "port", mesh.DefaultKiloPort, "The WireGuard port over which the nodes communicate.")
|
||||
cmd.PersistentFlags().StringVar(&topologyLabel, "topology-label", k8s.RegionLabelKey, "Kubernetes node label used to group nodes into logical locations.")
|
||||
|
||||
for _, subCmd := range []*cobra.Command{
|
||||
graph(),
|
||||
showConf(),
|
||||
connect(),
|
||||
} {
|
||||
cmd.AddCommand(subCmd)
|
||||
}
|
||||
@ -134,7 +142,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func optainGranularity(gr mesh.Granularity, ns []*mesh.Node) (mesh.Granularity, error) {
|
||||
func determineGranularity(gr mesh.Granularity, ns []*mesh.Node) (mesh.Granularity, error) {
|
||||
if gr == mesh.AutoGranularity {
|
||||
if len(ns) == 0 {
|
||||
return gr, errors.New("could not get any nodes")
|
||||
@ -144,7 +152,7 @@ func optainGranularity(gr mesh.Granularity, ns []*mesh.Node) (mesh.Granularity,
|
||||
case mesh.LogicalGranularity:
|
||||
case mesh.FullGranularity:
|
||||
default:
|
||||
return ret, fmt.Errorf("mesh granularity %v is not supported", opts.granularity)
|
||||
return ret, fmt.Errorf("mesh granularity %s is not supported", opts.granularity)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2019 the Kilo authors
|
||||
// Copyright 2021 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -15,14 +15,15 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -47,7 +48,7 @@ var (
|
||||
}, ", ")
|
||||
allowedIPs []string
|
||||
showConfOpts struct {
|
||||
allowedIPs []*net.IPNet
|
||||
allowedIPs []net.IPNet
|
||||
serializer *json.Serializer
|
||||
output string
|
||||
asPeer bool
|
||||
@ -82,14 +83,14 @@ func runShowConf(c *cobra.Command, args []string) error {
|
||||
case outputFormatYAML:
|
||||
showConfOpts.serializer = json.NewYAMLSerializer(json.DefaultMetaFactory, peerCreatorTyper{}, peerCreatorTyper{})
|
||||
default:
|
||||
return fmt.Errorf("output format %v unknown; posible values are: %s", showConfOpts.output, availableOutputFormats)
|
||||
return fmt.Errorf("output format %s unknown; posible values are: %s", showConfOpts.output, availableOutputFormats)
|
||||
}
|
||||
for i := range allowedIPs {
|
||||
_, aip, err := net.ParseCIDR(allowedIPs[i])
|
||||
if err != nil {
|
||||
return fmt.Errorf("allowed-ips must contain only valid CIDRs; got %q", allowedIPs[i])
|
||||
}
|
||||
showConfOpts.allowedIPs = append(showConfOpts.allowedIPs, aip)
|
||||
showConfOpts.allowedIPs = append(showConfOpts.allowedIPs, *aip)
|
||||
}
|
||||
return runRoot(c, args)
|
||||
}
|
||||
@ -115,15 +116,15 @@ func showConfPeer() *cobra.Command {
|
||||
func runShowConfNode(_ *cobra.Command, args []string) error {
|
||||
ns, err := opts.backend.Nodes().List()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list nodes: %v", err)
|
||||
return fmt.Errorf("failed to list nodes: %w", err)
|
||||
}
|
||||
ps, err := opts.backend.Peers().List()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list peers: %v", err)
|
||||
return fmt.Errorf("failed to list peers: %w", err)
|
||||
}
|
||||
// Obtain the Granularity by looking at the annotation of the first node.
|
||||
if opts.granularity, err = optainGranularity(opts.granularity, ns); err != nil {
|
||||
return fmt.Errorf("failed to obtain granularity: %w", err)
|
||||
if opts.granularity, err = determineGranularity(opts.granularity, ns); err != nil {
|
||||
return fmt.Errorf("failed to determine granularity: %w", err)
|
||||
}
|
||||
hostname := args[0]
|
||||
subnet := mesh.DefaultKiloSubnet
|
||||
@ -151,14 +152,14 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, opts.port, []byte{}, subnet, nodes[hostname].PersistentKeepalive, nil)
|
||||
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, int(opts.port), wgtypes.Key{}, subnet, nodes[hostname].PersistentKeepalive, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create topology: %v", err)
|
||||
return fmt.Errorf("failed to create topology: %w", err)
|
||||
}
|
||||
|
||||
var found bool
|
||||
for _, p := range t.PeerConf("").Peers {
|
||||
if bytes.Equal(p.PublicKey, nodes[hostname].Key) {
|
||||
if p.PublicKey == nodes[hostname].Key {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
@ -171,7 +172,7 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
|
||||
if !showConfOpts.asPeer {
|
||||
c, err := t.Conf().Bytes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate configuration: %v", err)
|
||||
return fmt.Errorf("failed to generate configuration: %w", err)
|
||||
}
|
||||
_, err = os.Stdout.Write(c)
|
||||
return err
|
||||
@ -182,6 +183,9 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
|
||||
fallthrough
|
||||
case outputFormatYAML:
|
||||
p := t.AsPeer()
|
||||
if p == nil {
|
||||
return errors.New("cannot generate config from nil peer")
|
||||
}
|
||||
p.AllowedIPs = append(p.AllowedIPs, showConfOpts.allowedIPs...)
|
||||
p.DeduplicateIPs()
|
||||
k8sp := translatePeer(p)
|
||||
@ -189,13 +193,16 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
|
||||
return showConfOpts.serializer.Encode(k8sp, os.Stdout)
|
||||
case outputFormatWireGuard:
|
||||
p := t.AsPeer()
|
||||
if p == nil {
|
||||
return errors.New("cannot generate config from nil peer")
|
||||
}
|
||||
p.AllowedIPs = append(p.AllowedIPs, showConfOpts.allowedIPs...)
|
||||
p.DeduplicateIPs()
|
||||
c, err := (&wireguard.Conf{
|
||||
Peers: []*wireguard.Peer{p},
|
||||
Peers: []wireguard.Peer{*p},
|
||||
}).Bytes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate configuration: %v", err)
|
||||
return fmt.Errorf("failed to generate configuration: %w", err)
|
||||
}
|
||||
_, err = os.Stdout.Write(c)
|
||||
return err
|
||||
@ -206,15 +213,15 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
|
||||
func runShowConfPeer(_ *cobra.Command, args []string) error {
|
||||
ns, err := opts.backend.Nodes().List()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list nodes: %v", err)
|
||||
return fmt.Errorf("failed to list nodes: %w", err)
|
||||
}
|
||||
ps, err := opts.backend.Peers().List()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list peers: %v", err)
|
||||
return fmt.Errorf("failed to list peers: %w", err)
|
||||
}
|
||||
// Obtain the Granularity by looking at the annotation of the first node.
|
||||
if opts.granularity, err = optainGranularity(opts.granularity, ns); err != nil {
|
||||
return fmt.Errorf("failed to obtain granularity: %w", err)
|
||||
if opts.granularity, err = determineGranularity(opts.granularity, ns); err != nil {
|
||||
return fmt.Errorf("failed to determine granularity: %w", err)
|
||||
}
|
||||
var hostname string
|
||||
subnet := mesh.DefaultKiloSubnet
|
||||
@ -244,14 +251,18 @@ func runShowConfPeer(_ *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("did not find any peer named %q in the cluster", peer)
|
||||
}
|
||||
|
||||
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, mesh.DefaultKiloPort, []byte{}, subnet, peers[peer].PersistentKeepalive, nil)
|
||||
pka := time.Duration(0)
|
||||
if p := peers[peer].PersistentKeepaliveInterval; p != nil {
|
||||
pka = *p
|
||||
}
|
||||
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, mesh.DefaultKiloPort, wgtypes.Key{}, subnet, pka, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create topology: %v", err)
|
||||
return fmt.Errorf("failed to create topology: %w", err)
|
||||
}
|
||||
if !showConfOpts.asPeer {
|
||||
c, err := t.PeerConf(peer).Bytes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate configuration: %v", err)
|
||||
return fmt.Errorf("failed to generate configuration: %w", err)
|
||||
}
|
||||
_, err = os.Stdout.Write(c)
|
||||
return err
|
||||
@ -272,10 +283,10 @@ func runShowConfPeer(_ *cobra.Command, args []string) error {
|
||||
p.AllowedIPs = append(p.AllowedIPs, showConfOpts.allowedIPs...)
|
||||
p.DeduplicateIPs()
|
||||
c, err := (&wireguard.Conf{
|
||||
Peers: []*wireguard.Peer{p},
|
||||
Peers: []wireguard.Peer{*p},
|
||||
}).Bytes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate configuration: %v", err)
|
||||
return fmt.Errorf("failed to generate configuration: %w", err)
|
||||
}
|
||||
_, err = os.Stdout.Write(c)
|
||||
return err
|
||||
@ -284,6 +295,7 @@ func runShowConfPeer(_ *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// translatePeer translates a wireguard.Peer to a Peer CRD.
|
||||
// TODO this function has many similarities to peerBackend.Set(name, peer)
|
||||
func translatePeer(peer *wireguard.Peer) *v1alpha1.Peer {
|
||||
if peer == nil {
|
||||
return &v1alpha1.Peer{}
|
||||
@ -291,36 +303,33 @@ func translatePeer(peer *wireguard.Peer) *v1alpha1.Peer {
|
||||
var aips []string
|
||||
for _, aip := range peer.AllowedIPs {
|
||||
// Skip any invalid IPs.
|
||||
if aip == nil {
|
||||
// TODO all IPs should be valid, so no need to skip here?
|
||||
if aip.String() == (&net.IPNet{}).String() {
|
||||
continue
|
||||
}
|
||||
aips = append(aips, aip.String())
|
||||
}
|
||||
var endpoint *v1alpha1.PeerEndpoint
|
||||
if peer.Endpoint != nil && peer.Endpoint.Port > 0 && (peer.Endpoint.IP != nil || peer.Endpoint.DNS != "") {
|
||||
var ip string
|
||||
if peer.Endpoint.IP != nil {
|
||||
ip = peer.Endpoint.IP.String()
|
||||
}
|
||||
if peer.Endpoint.Port() > 0 || !peer.Endpoint.HasDNS() {
|
||||
endpoint = &v1alpha1.PeerEndpoint{
|
||||
DNSOrIP: v1alpha1.DNSOrIP{
|
||||
DNS: peer.Endpoint.DNS,
|
||||
IP: ip,
|
||||
IP: peer.Endpoint.IP().String(),
|
||||
DNS: peer.Endpoint.DNS(),
|
||||
},
|
||||
Port: peer.Endpoint.Port,
|
||||
Port: uint32(peer.Endpoint.Port()),
|
||||
}
|
||||
}
|
||||
var key string
|
||||
if len(peer.PublicKey) > 0 {
|
||||
key = string(peer.PublicKey)
|
||||
if peer.PublicKey != (wgtypes.Key{}) {
|
||||
key = peer.PublicKey.String()
|
||||
}
|
||||
var psk string
|
||||
if len(peer.PresharedKey) > 0 {
|
||||
psk = string(peer.PresharedKey)
|
||||
if peer.PresharedKey != nil {
|
||||
psk = peer.PresharedKey.String()
|
||||
}
|
||||
var pka int
|
||||
if peer.PersistentKeepalive > 0 {
|
||||
pka = peer.PersistentKeepalive
|
||||
if peer.PersistentKeepaliveInterval != nil && *peer.PersistentKeepaliveInterval > time.Duration(0) {
|
||||
pka = int(*peer.PersistentKeepaliveInterval)
|
||||
}
|
||||
return &v1alpha1.Peer{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
|
962
docs/grafana/kilo.json
Normal file
962
docs/grafana/kilo.json
Normal file
@ -0,0 +1,962 @@
|
||||
{
|
||||
"__inputs": [
|
||||
{
|
||||
"name": "DS_PROMETHEUS",
|
||||
"label": "prometheus",
|
||||
"description": "",
|
||||
"type": "datasource",
|
||||
"pluginId": "prometheus",
|
||||
"pluginName": "Prometheus"
|
||||
}
|
||||
],
|
||||
"__requires": [
|
||||
{
|
||||
"type": "grafana",
|
||||
"id": "grafana",
|
||||
"name": "Grafana",
|
||||
"version": "7.5.4"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "graph",
|
||||
"name": "Graph",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"type": "datasource",
|
||||
"id": "prometheus",
|
||||
"name": "Prometheus",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "stat",
|
||||
"name": "Stat",
|
||||
"version": ""
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "Bps"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 12,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.5.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"exemplar": true,
|
||||
"expr": "sum by (pod) (rate(wireguard_received_bytes_total[1h])) + sum by (pod) (rate(wireguard_sent_bytes_total[1h]))",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"queryType": "randomWalk",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [
|
||||
{
|
||||
"$$hashKey": "object:64",
|
||||
"colorMode": "background6",
|
||||
"fill": true,
|
||||
"fillColor": "rgba(234, 112, 112, 0.12)",
|
||||
"line": false,
|
||||
"lineColor": "rgba(237, 46, 24, 0.60)",
|
||||
"op": "time"
|
||||
}
|
||||
],
|
||||
"timeShift": null,
|
||||
"title": "Throughput",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:42",
|
||||
"format": "Bps",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:43",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 10,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.5.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"exemplar": false,
|
||||
"expr": "(sum(rate(wireguard_sent_bytes_total[5m])) - sum(rate(wireguard_received_bytes_total[5m])))/(sum(rate(wireguard_sent_bytes_total[5m])) + sum(rate(wireguard_received_bytes_total[5m])))",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"queryType": "randomWalk",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Slip (send - received)",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:502",
|
||||
"format": "percentunit",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:503",
|
||||
"format": "Bps",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 16,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.5.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"exemplar": false,
|
||||
"expr": "sum by (public_key) (time() - (wireguard_latest_handshake_seconds!=0))",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"queryType": "randomWalk",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "latest handshake",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:219",
|
||||
"format": "s",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": "1000",
|
||||
"min": "0",
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:220",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 18,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.5.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"exemplar": true,
|
||||
"expr": "sum by (instance) (rate(kilo_reconciles_total[30m]))",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"queryType": "randomWalk",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "kilo reconciles",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:539",
|
||||
"decimals": null,
|
||||
"format": "hertz",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:540",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"datasource": "${DS_PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 4,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"id": 4,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "7.5.4",
|
||||
"targets": [
|
||||
{
|
||||
"exemplar": true,
|
||||
"expr": "avg(kilo_peers)",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"queryType": "randomWalk",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Kilo Peers",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": "${DS_PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 4,
|
||||
"x": 4,
|
||||
"y": 16
|
||||
},
|
||||
"id": 2,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "7.5.4",
|
||||
"targets": [
|
||||
{
|
||||
"exemplar": false,
|
||||
"expr": "avg(kilo_nodes)",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"queryType": "randomWalk",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Kilo Nodes",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": "${DS_PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 4,
|
||||
"x": 8,
|
||||
"y": 16
|
||||
},
|
||||
"id": 8,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "7.5.4",
|
||||
"targets": [
|
||||
{
|
||||
"exemplar": false,
|
||||
"expr": "sum(kilo_leader)",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"queryType": "randomWalk",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "segments",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 16
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 6,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.5.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"exemplar": false,
|
||||
"expr": "sum by (instance) (rate(kilo_errors_total[10m]))",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"queryType": "randomWalk",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Kilo Errors",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:446",
|
||||
"format": "hertz",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:447",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 24
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 20,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.5.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"exemplar": true,
|
||||
"expr": "sum by (instance) (rate(process_cpu_seconds_total{pod=~\"kilo-.*\"}[1m]))",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"queryType": "randomWalk",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "CPU usage",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:162",
|
||||
"format": "percentunit",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:163",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 24
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 22,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.5.4",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"exemplar": false,
|
||||
"expr": "sum by (instance) (process_resident_memory_bytes{pod=~\"kilo-.*\"})",
|
||||
"interval": "",
|
||||
"legendFormat": "",
|
||||
"queryType": "randomWalk",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Memory Allocation",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:231",
|
||||
"format": "decbytes",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:232",
|
||||
"format": "decmbytes",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"datasource": null,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 32
|
||||
},
|
||||
"id": 14,
|
||||
"panels": [],
|
||||
"title": "Row title",
|
||||
"type": "row"
|
||||
}
|
||||
],
|
||||
"refresh": false,
|
||||
"schemaVersion": 27,
|
||||
"style": "dark",
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-24h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {},
|
||||
"timezone": "",
|
||||
"title": "Kilo",
|
||||
"uid": "R8Lja3H7z",
|
||||
"version": 11
|
||||
}
|
BIN
docs/graphs/kilo.png
Normal file
BIN
docs/graphs/kilo.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 543 KiB |
63
docs/kg.md
63
docs/kg.md
@ -16,26 +16,45 @@ The behavior of `kg` can be configured using the command line flags listed below
|
||||
|
||||
[embedmd]:# (../tmp/help.txt)
|
||||
```txt
|
||||
Usage of bin//linux/amd64/kg:
|
||||
--backend string The backend for the mesh. Possible values: kubernetes (default "kubernetes")
|
||||
--clean-up-interface Should Kilo delete its interface when it shuts down?
|
||||
--cni Should Kilo manage the node's CNI configuration? (default true)
|
||||
--cni-path string Path to CNI config. (default "/etc/cni/net.d/10-kilo.conflist")
|
||||
--compatibility string Should Kilo run in compatibility mode? Possible values: flannel
|
||||
--create-interface Should kilo create an interface on startup? (default true)
|
||||
--encapsulate string When should Kilo encapsulate packets within a location? Possible values: never, crosssubnet, always (default "always")
|
||||
--hostname string Hostname of the node on which this process is running.
|
||||
--interface string Name of the Kilo interface to use; if it does not exist, it will be created. (default "kilo0")
|
||||
--kubeconfig string Path to kubeconfig.
|
||||
--listen string The address at which to listen for health and metrics. (default ":1107")
|
||||
--local Should Kilo manage routes within a location? (default true)
|
||||
--log-level string Log level to use. Possible values: all, debug, info, warn, error, none (default "info")
|
||||
--master string The address of the Kubernetes API server (overrides any value in kubeconfig).
|
||||
--mesh-granularity string The granularity of the network mesh to create. Possible values: location, full (default "location")
|
||||
--mtu uint The MTU of the WireGuard interface created by Kilo. (default 1420)
|
||||
--port uint The port over which WireGuard peers should communicate. (default 51820)
|
||||
--resync-period duration How often should the Kilo controllers reconcile? (default 30s)
|
||||
--subnet string CIDR from which to allocate addresses for WireGuard interfaces. (default "10.4.0.0/16")
|
||||
--topology-label string Kubernetes node label used to group nodes into logical locations. (default "topology.kubernetes.io/region")
|
||||
--version Print version and exit
|
||||
kg is the Kilo agent.
|
||||
It runs on every node of a cluster,
|
||||
setting up the public and private keys for the VPN
|
||||
as well as the necessary rules to route packets between locations.
|
||||
|
||||
Usage:
|
||||
kg [flags]
|
||||
kg [command]
|
||||
|
||||
Available Commands:
|
||||
completion generate the autocompletion script for the specified shell
|
||||
help Help about any command
|
||||
version Print the version and exit.
|
||||
webhook webhook starts a HTTPS server to validate updates and creations of Kilo peers.
|
||||
|
||||
Flags:
|
||||
--backend string The backend for the mesh. Possible values: kubernetes (default "kubernetes")
|
||||
--clean-up-interface Should Kilo delete its interface when it shuts down?
|
||||
--cni Should Kilo manage the node's CNI configuration? (default true)
|
||||
--cni-path string Path to CNI config. (default "/etc/cni/net.d/10-kilo.conflist")
|
||||
--compatibility string Should Kilo run in compatibility mode? Possible values: flannel
|
||||
--create-interface Should kilo create an interface on startup? (default true)
|
||||
--encapsulate string When should Kilo encapsulate packets within a location? Possible values: never, crosssubnet, always (default "always")
|
||||
-h, --help help for kg
|
||||
--hostname string Hostname of the node on which this process is running.
|
||||
--interface string Name of the Kilo interface to use; if it does not exist, it will be created. (default "kilo0")
|
||||
--iptables-forward-rules Add default accept rules to the FORWARD chain in iptables. Warning: this may break firewalls with a deny all policy and is potentially insecure!
|
||||
--kubeconfig string Path to kubeconfig.
|
||||
--listen string The address at which to listen for health and metrics. (default ":1107")
|
||||
--local Should Kilo manage routes within a location? (default true)
|
||||
--log-level string Log level to use. Possible values: all, debug, info, warn, error, none (default "info")
|
||||
--master string The address of the Kubernetes API server (overrides any value in kubeconfig).
|
||||
--mesh-granularity string The granularity of the network mesh to create. Possible values: location, full (default "location")
|
||||
--mtu uint The MTU of the WireGuard interface created by Kilo. (default 1420)
|
||||
--port int The port over which WireGuard peers should communicate. (default 51820)
|
||||
--prioritise-private-addresses Prefer to assign a private IP address to the node's endpoint.
|
||||
--resync-period duration How often should the Kilo controllers reconcile? (default 30s)
|
||||
--subnet string CIDR from which to allocate addresses for WireGuard interfaces. (default "10.4.0.0/16")
|
||||
--topology-label string Kubernetes node label used to group nodes into logical locations. (default "topology.kubernetes.io/region")
|
||||
--version Print version and exit
|
||||
|
||||
```
|
||||
|
@ -31,13 +31,70 @@ make
|
||||
|
||||
This will produce a `kgctl` binary at `./bin/<your-os>/<your-architecture>/kgctl`.
|
||||
|
||||
|
||||
### Binary Packages
|
||||
|
||||
#### Arch Linux
|
||||
|
||||
Install `kgctl` from the Arch User Repository using an AUR helper like `paru` or `yay`:
|
||||
|
||||
```shell
|
||||
paru -S kgctl-bin
|
||||
```
|
||||
|
||||
#### Arkade
|
||||
|
||||
The [arkade](https://github.com/alexellis/arkade) CLI can be used to install `kgctl` on any OS and architecture:
|
||||
|
||||
```shell
|
||||
arkade get kgctl
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
|Command|Syntax|Description|
|
||||
|----|----|-------|
|
||||
|[connect](#connect)|`kgctl connect <peer-name> [flags]`|Connect the host to the cluster, setting up the required interfaces, routes, and keys.|
|
||||
|[graph](#graph)|`kgctl graph [flags]`|Produce a graph in GraphViz format representing the topology of the cluster.|
|
||||
|[showconf](#showconf)|`kgctl showconf ( node \| peer ) NAME [flags]`|Show the WireGuard configuration for a node or peer in the mesh.|
|
||||
|[showconf](#showconf)|`kgctl showconf ( node \| peer ) <name> [flags]`|Show the WireGuard configuration for a node or peer in the mesh.|
|
||||
|
||||
### connect
|
||||
|
||||
The `connect` command configures the local host as a WireGuard Peer of the cluster and applies all of the necessary networking configuration to connect to the cluster.
|
||||
As long as the process is running, it will watch the cluster for changes and automatically manage the configuration for new or updated Peers and Nodes.
|
||||
If the given Peer name does not exist in the cluster, the command will register a new Peer and generate the necessary WireGuard keys.
|
||||
When the command exits, all of the configuration, including newly registered Peers, is cleaned up.
|
||||
|
||||
Example:
|
||||
|
||||
```shell
|
||||
PEER_NAME=laptop
|
||||
SERVICECIDR=10.43.0.0/16
|
||||
kgctl connect $PEER_NAME --allowed-ips $SERVICECIDR
|
||||
```
|
||||
|
||||
The local host is now connected to the cluster and all IPs from the cluster and any registered Peers are fully routable.
|
||||
When combined with the `--clean-up false` flag, the configuration produced by the command is persistent and will remain in effect even after the process is stopped.
|
||||
|
||||
With the service CIDR of the cluster routable from the local host, Kubernetes DNS names can now be resolved by the cluster DNS provider.
|
||||
For example, the following snippet could be used to resolve the clusterIP of the Kubernetes API:
|
||||
```shell
|
||||
dig @$(kubectl get service -n kube-system kube-dns -o=jsonpath='{.spec.clusterIP}') kubernetes.default.svc.cluster.local +short
|
||||
# > 10.43.0.1
|
||||
```
|
||||
|
||||
For convenience, the cluster DNS provider's IP address can be configured as the local host's DNS server, making Kubernetes DNS names easily resolvable.
|
||||
For example, if using `systemd-resolved`, the following snippet could be used:
|
||||
```shell
|
||||
systemd-resolve --interface kilo0 --set-dns $(kubectl get service -n kube-system kube-dns -o=jsonpath='{.spec.clusterIP}') --set-domain cluster.local
|
||||
# Now all lookups for DNS names ending in `.cluster.local` will be routed over the `kilo0` interface to the cluster DNS provider.
|
||||
dig kubernetes.default.svc.cluster.local +short
|
||||
# > 10.43.0.1
|
||||
```
|
||||
|
||||
> **Note**: The `connect` command is currently only supported on Linux.
|
||||
|
||||
> **Note**: The `connect` command requires the `CAP_NET_ADMIN` capability in order to configure the host's networking stack; unprivileged users will need to use `sudo` or similar tools.
|
||||
|
||||
### graph
|
||||
|
||||
|
100
docs/monitoring.md
Normal file
100
docs/monitoring.md
Normal file
@ -0,0 +1,100 @@
|
||||
# Monitoring
|
||||
|
||||
The following assumes that you have applied the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) monitoring stack onto your cluster.
|
||||
|
||||
## Kilo
|
||||
|
||||
Monitor the Kilo DaemonSet with:
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/podmonitor.yaml
|
||||
```
|
||||
|
||||
## WireGuard
|
||||
|
||||
Monitor the WireGuard interfaces with:
|
||||
```shell
|
||||
kubectl create ns kilo
|
||||
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/wg-exporter.yaml
|
||||
```
|
||||
|
||||
The manifest will deploy the [Prometheus WireGuard Exporter](https://github.com/MindFlavor/prometheus_wireguard_exporter) as a DaemonSet and a [PodMonitor](https://docs.openshift.com/container-platform/4.8/rest_api/monitoring_apis/podmonitor-monitoring-coreos-com-v1.html).
|
||||
|
||||
By default the kube-prometheus stack only monitors the `default`, `kube-system` and `monitoring` namespaces.
|
||||
In order to allow Prometheus to monitor the `kilo` namespace, apply the Role and RoleBinding with:
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/wg-exporter-role-kube-prometheus.yaml
|
||||
```
|
||||
|
||||
## Metrics
|
||||
|
||||
### Kilo
|
||||
|
||||
Kilo exports some standard metrics with the Prometheus GoCollector and ProcessCollector.
|
||||
It also exposes some Kilo-specific metrics.
|
||||
|
||||
```
|
||||
# HELP kilo_errors_total Number of errors that occurred while administering the mesh.
|
||||
# TYPE kilo_errors_total counter
|
||||
|
||||
# HELP kilo_leader Leadership status of the node.
|
||||
# TYPE kilo_leader gauge
|
||||
|
||||
# HELP kilo_nodes Number of nodes in the mesh.
|
||||
# TYPE kilo_nodes gauge
|
||||
|
||||
# HELP kilo_peers Number of peers in the mesh.
|
||||
# TYPE kilo_peers gauge
|
||||
|
||||
# HELP kilo_reconciles_total Number of reconciliation attempts.
|
||||
# TYPE kilo_reconciles_total counter
|
||||
```
|
||||
|
||||
### WireGuard
|
||||
|
||||
The [Prometheus WireGuard Exporter](https://github.com/MindFlavor/prometheus_wireguard_exporter) exports the following metrics:
|
||||
|
||||
```
|
||||
# HELP wireguard_sent_bytes_total Bytes sent to the peer
|
||||
# TYPE wireguard_sent_bytes_total counter
|
||||
|
||||
# HELP wireguard_received_bytes_total Bytes received from the peer
|
||||
# TYPE wireguard_received_bytes_total counter
|
||||
|
||||
# HELP wireguard_latest_handshake_seconds Seconds from the last handshake
|
||||
# TYPE wireguard_latest_handshake_seconds gauge
|
||||
```
|
||||
|
||||
## Display some Metrics
|
||||
|
||||
If your laptop is a Kilo peer of the cluster you can access the Prometheus UI by navigating your browser directly to the cluster IP of the `prometheus-k8s` service.
|
||||
Otherwise use `port-forward`:
|
||||
```shell
|
||||
kubectl -n monitoring port-forward svc/prometheus-k8s 9090
|
||||
```
|
||||
and navigate your browser to `localhost:9090`.
|
||||
Check if you can see the PodMonitors for Kilo and the WireGuard Exporter under **Status** -> **Targets** in the Prometheus web UI.
|
||||
|
||||
If you don't see them, check the logs of the `prometheus-k8s` Pods; it may be that Prometheus doesn't have the permission to get Pods in the `kilo` namespace.
|
||||
In this case, you need to apply the Role and RoleBinding from above.
|
||||
|
||||
Navigate to **Graph** and try to execute a simple query, e.g. type `kilo_nodes` and click on `execute`.
|
||||
You should see some data.
|
||||
|
||||
## Using Grafana
|
||||
|
||||
Let's navigate to the Grafana dashboard.
|
||||
Again, if your laptop is not a Kilo peer, use `port-forward`:
|
||||
```shell
|
||||
kubectl -n monitoring port-forward svc/grafana 3000
|
||||
```
|
||||
|
||||
Now navigate your browser to `localhost:3000`.
|
||||
The default user and password is `admin` `admin`.
|
||||
|
||||
An example configuration for a dashboard displaying Kilo metrics can be found [here](https://raw.githubusercontent.com/squat/kilo/main/docs/grafana/kilo.json).
|
||||
You can import this dashboard by hitting **+** -> **Import** on the Grafana dashboard.
|
||||
|
||||
The dashboard looks like this:
|
||||
|
||||
<img src="./graphs/kilo.png" />
|
||||
|
@ -10,7 +10,7 @@ Support for [Kubernetes network policies](https://kubernetes.io/docs/concepts/se
|
||||
The following command adds network policy support by deploying kube-router to work alongside Kilo:
|
||||
|
||||
```shell
|
||||
kubectl apply -f kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/kube-router.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/main/manifests/kube-router.yaml
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
@ -9,29 +9,14 @@ Once such a configuration is applied, the Kubernetes API server will send an Adm
|
||||
With regard to the [failure policy](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy), the API server will apply the requested changes to a resource if the request was answered with `"allowed": true`, or deny the changes if the answer was `"allowed": false`.
|
||||
|
||||
In case of Kilo Peer Validation, the specified operations are `UPDATE` and `CREATE`, the resources are `Peers`, and the default `failurePolicy` is set to `Fail`.
|
||||
View the full ValidatingWebhookConfiguration [here](https://github.com/leonnicolas/kilo-peer-validation/blob/main/deployment-no-cabundle.yaml).
|
||||
View the full ValidatingWebhookConfiguration [here](https://github.com/squat/kilo/blob/main/manifests/peer-validation.yaml).
|
||||
|
||||
## Getting Started
|
||||
|
||||
[Kilo-Peer-Validation](https://github.com/leonnicolas/kilo-peer-validation) is a webserver that rejects any AdmissionReviewRequest with a faulty Peer configuration.
|
||||
|
||||
Apply the Service, the Deployment of the actual webserver, and the ValidatingWebhookConfiguration with:
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/leonnicolas/kilo-peer-validation/main/deployment-no-cabundle.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/blob/main/manifests/peer-validation.yaml
|
||||
```
|
||||
|
||||
The Kubernetes API server will only talk to webhook servers via TLS so the Kilo-Peer-Validation server must be given a valid TLS certificate and key, and the API server must be told what certificate authority (CA) to trust.
|
||||
One way to do this is to use the [kube-webhook-certgen](https://github.com/jet/kube-webhook-certgen) project to create a Kubernetes Secret holding the TLS certificate and key for the webhook server and to make a certificate signing request to the Kubernetes API server.
|
||||
The following snippet can be used to run kube-webhook-certgen in a Docker container to create a Secret and certificate signing request:
|
||||
```shell
|
||||
docker run -v /path/to/kubeconfig:/kubeconfig.yaml:ro jettech/kube-webhook-certgen:v1.5.2 --kubeconfig /kubeconfig.yaml create --namespace kilo --secret-name peer-validation-webhook-tls --host peer-validation,peer-validation.kilo.svc --key-name tls.key --cert-name tls.config
|
||||
```
|
||||
|
||||
Now, the Kubernetes API server can be told what CA to trust by patching the ValidatingWebhookConfiguration with the newly created CA bundle:
|
||||
```shell
|
||||
docker run -v /path/to/kubeconfig:/kubeconfig.yaml:ro jettech/kube-webhook-certgen:v1.5.2 --kubeconfig /kubeconfig.yaml patch --webhook-name peer-validation.kilo.svc --secret-name peer-validation-webhook-tls --namespace kilo --patch-mutating=false
|
||||
```
|
||||
|
||||
## Alternative Method
|
||||
|
||||
An alternative method to generate a ValidatingWebhookConfiguration manifest without using Kubernetes' Certificate Signing API is described in [Kilo-Peer-Validation](https://github.com/leonnicolas/kilo-peer-validation#use-the-set-up-script).
|
||||
The above manifest will use [kube-webhook-certgen](https://github.com/jet/kube-webhook-certgen) to generate the requiered certificates and patch the [ValidatingWebhookConfiguration](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#configure-admission-webhooks-on-the-fly).
|
||||
|
@ -18,7 +18,7 @@ test_full_mesh_connectivity() {
|
||||
}
|
||||
|
||||
test_full_mesh_peer() {
|
||||
check_peer wg1 e2e 10.5.0.1/32 full
|
||||
check_peer wg99 e2e 10.5.0.1/32 full
|
||||
}
|
||||
|
||||
test_full_mesh_allowed_location_ips() {
|
||||
|
17
e2e/kgctl.sh
Normal file
17
e2e/kgctl.sh
Normal file
@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
# shellcheck disable=SC1091
|
||||
. lib.sh
|
||||
|
||||
setup_suite() {
|
||||
# shellcheck disable=SC2016
|
||||
block_until_ready_by_name kube-system kilo-userspace
|
||||
_kubectl wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
|
||||
}
|
||||
|
||||
test_connect() {
|
||||
local PEER=test
|
||||
local ALLOWED_IP=10.5.0.1/32
|
||||
docker run -d --name="$PEER" --rm --network=host --cap-add=NET_ADMIN -v "$KGCTL_BINARY":/kgctl -v "$PWD/$KUBECONFIG":/kubeconfig --entrypoint=/kgctl alpine --kubeconfig /kubeconfig connect "$PEER" --allowed-ip "$ALLOWED_IP"
|
||||
assert "retry 10 5 '' check_ping --local" "should be able to ping Pods from host"
|
||||
docker stop "$PEER"
|
||||
}
|
@ -8,7 +8,7 @@ metadata:
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"cniVersion":"0.3.1",
|
||||
"cniVersion":"0.4.0",
|
||||
"name":"kilo",
|
||||
"plugins":[
|
||||
{
|
||||
@ -136,7 +136,7 @@ spec:
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
- name: boringtun
|
||||
image: leonnicolas/boringtun:alpine
|
||||
image: leonnicolas/boringtun:cc19859
|
||||
args:
|
||||
- --disable-drop-privileges=true
|
||||
- --foreground
|
||||
|
22
e2e/lib.sh
22
e2e/lib.sh
@ -54,7 +54,7 @@ build_kind_config() {
|
||||
export API_SERVER_PORT="${2:-6443}"
|
||||
export POD_SUBNET="${3:-10.42.0.0/16}"
|
||||
export SERVICE_SUBNET="${4:-10.43.0.0/16}"
|
||||
export WORKERS=""
|
||||
export WORKERS=""
|
||||
local i=0
|
||||
while [ "$i" -lt "$WORKER_COUNT" ]; do
|
||||
WORKERS="$(printf "%s\n- role: worker" "$WORKERS")"
|
||||
@ -65,7 +65,7 @@ build_kind_config() {
|
||||
}
|
||||
|
||||
create_interface() {
|
||||
docker run -d --name="$1" --rm --network=host --cap-add=NET_ADMIN --device=/dev/net/tun -v /var/run/wireguard:/var/run/wireguard -e WG_LOG_LEVEL=debug leonnicolas/boringtun --foreground --disable-drop-privileges true "$1"
|
||||
docker run -d --name="$1" --rm --network=host --cap-add=NET_ADMIN --device=/dev/net/tun -v /var/run/wireguard:/var/run/wireguard -e WG_LOG_LEVEL=debug leonnicolas/boringtun:cc19859 --foreground --disable-drop-privileges true "$1"
|
||||
}
|
||||
|
||||
delete_interface() {
|
||||
@ -126,7 +126,7 @@ create_cluster() {
|
||||
# Apply Kilo the the cluster.
|
||||
_kubectl apply -f ../manifests/crds.yaml
|
||||
_kubectl apply -f kilo-kind-userspace.yaml
|
||||
block_until_ready_by_name kube-system kilo-userspace
|
||||
block_until_ready_by_name kube-system kilo-userspace
|
||||
_kubectl wait nodes --all --for=condition=Ready
|
||||
# Wait for CoreDNS.
|
||||
block_until_ready kube_system k8s-app=kube-dns
|
||||
@ -135,7 +135,7 @@ create_cluster() {
|
||||
block_until_ready_by_name default curl
|
||||
_kubectl taint node $KIND_CLUSTER-control-plane node-role.kubernetes.io/master:NoSchedule-
|
||||
_kubectl apply -f https://raw.githubusercontent.com/kilo-io/adjacency/main/example.yaml
|
||||
block_until_ready_by_name adjacency adjacency
|
||||
block_until_ready_by_name default adjacency
|
||||
}
|
||||
|
||||
delete_cluster () {
|
||||
@ -184,14 +184,14 @@ check_peer() {
|
||||
local ALLOWED_IP=$3
|
||||
local GRANULARITY=$4
|
||||
create_interface "$INTERFACE"
|
||||
docker run --rm --entrypoint=/usr/bin/wg "$KILO_IMAGE" genkey > "$INTERFACE"
|
||||
assert "create_peer $PEER $ALLOWED_IP 10 $(docker run --rm --entrypoint=/bin/sh -v "$PWD/$INTERFACE":/key "$KILO_IMAGE" -c 'cat /key | wg pubkey')" "should be able to create Peer"
|
||||
docker run --rm leonnicolas/wg-tools wg genkey > "$INTERFACE"
|
||||
assert "create_peer $PEER $ALLOWED_IP 10 $(docker run --rm --entrypoint=/bin/sh -v "$PWD/$INTERFACE":/key leonnicolas/wg-tools -c 'cat /key | wg pubkey')" "should be able to create Peer"
|
||||
assert "_kgctl showconf peer $PEER --mesh-granularity=$GRANULARITY > $PEER.ini" "should be able to get Peer configuration"
|
||||
assert "docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/usr/bin/wg -v /var/run/wireguard:/var/run/wireguard -v $PWD/$PEER.ini:/peer.ini $KILO_IMAGE setconf $INTERFACE /peer.ini" "should be able to apply configuration from kgctl"
|
||||
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/usr/bin/wg -v /var/run/wireguard:/var/run/wireguard -v "$PWD/$INTERFACE":/key "$KILO_IMAGE" set "$INTERFACE" private-key /key
|
||||
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" address add "$ALLOWED_IP" dev "$INTERFACE"
|
||||
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" link set "$INTERFACE" up
|
||||
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" route add 10.42/16 dev "$INTERFACE"
|
||||
assert "docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/usr/bin/wg -v /var/run/wireguard:/var/run/wireguard -v $PWD/$PEER.ini:/peer.ini leonnicolas/wg-tools setconf $INTERFACE /peer.ini" "should be able to apply configuration from kgctl"
|
||||
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/usr/bin/wg -v /var/run/wireguard:/var/run/wireguard -v "$PWD/$INTERFACE":/key leonnicolas/wg-tools set "$INTERFACE" private-key /key
|
||||
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip leonnicolas/wg-tools address add "$ALLOWED_IP" dev "$INTERFACE"
|
||||
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip leonnicolas/wg-tools link set "$INTERFACE" up
|
||||
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip leonnicolas/wg-tools route add 10.42/16 dev "$INTERFACE"
|
||||
assert "retry 10 5 '' check_ping --local" "should be able to ping Pods from host"
|
||||
assert_equals "$(_kgctl showconf peer "$PEER")" "$(_kgctl showconf peer "$PEER" --mesh-granularity="$GRANULARITY")" "kgctl should be able to auto detect the mesh granularity"
|
||||
rm "$INTERFACE" "$PEER".ini
|
||||
|
@ -18,7 +18,7 @@ test_location_mesh_connectivity() {
|
||||
}
|
||||
|
||||
test_location_mesh_peer() {
|
||||
check_peer wg1 e2e 10.5.0.1/32 location
|
||||
check_peer wg99 e2e 10.5.0.1/32 location
|
||||
}
|
||||
|
||||
test_mesh_granularity_auto_detect() {
|
||||
|
98
go.mod
98
go.mod
@ -1,28 +1,88 @@
|
||||
module github.com/squat/kilo
|
||||
|
||||
go 1.15
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/awalterschulze/gographviz v0.0.0-20181013152038-b2885df04310
|
||||
github.com/campoy/embedmd v1.0.0
|
||||
github.com/containernetworking/cni v0.6.0
|
||||
github.com/containernetworking/plugins v0.6.0
|
||||
github.com/coreos/go-iptables v0.4.0
|
||||
github.com/containernetworking/cni v1.0.1
|
||||
github.com/containernetworking/plugins v1.1.1
|
||||
github.com/coreos/go-iptables v0.6.0
|
||||
github.com/go-kit/kit v0.9.0
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348
|
||||
github.com/oklog/run v1.0.0
|
||||
github.com/prometheus/client_golang v1.7.1
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/vishvananda/netlink v1.0.0
|
||||
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007
|
||||
k8s.io/api v0.21.1
|
||||
k8s.io/apiextensions-apiserver v0.21.1
|
||||
k8s.io/apimachinery v0.21.1
|
||||
k8s.io/client-go v0.21.1
|
||||
k8s.io/code-generator v0.21.1
|
||||
sigs.k8s.io/controller-tools v0.6.0
|
||||
github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/spf13/cobra v1.2.1
|
||||
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881
|
||||
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211124212657-dd7407c86d22
|
||||
honnef.co/go/tools v0.3.1
|
||||
k8s.io/api v0.23.6
|
||||
k8s.io/apiextensions-apiserver v0.23.6
|
||||
k8s.io/apimachinery v0.23.6
|
||||
k8s.io/client-go v0.23.6
|
||||
k8s.io/code-generator v0.23.6
|
||||
sigs.k8s.io/controller-tools v0.8.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.4.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/fatih/color v1.12.0 // indirect
|
||||
github.com/go-logfmt/logfmt v0.5.0 // indirect
|
||||
github.com/go-logr/logr v1.2.0 // indirect
|
||||
github.com/gobuffalo/flect v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/uuid v1.2.0 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/imdario/mergo v0.3.11 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/josharian/native v0.0.0-20200817173448-b6b71def0850 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mattn/go-colorable v0.1.8 // indirect
|
||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/mdlayher/genetlink v1.0.0 // indirect
|
||||
github.com/mdlayher/netlink v1.4.1 // indirect
|
||||
github.com/mdlayher/socket v0.0.0-20211102153432-57e3fa563ecb // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.28.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
|
||||
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
golang.zx2c4.com/wireguard v0.0.0-20211123210315-387f7c461a16 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c // indirect
|
||||
k8s.io/klog/v2 v2.30.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
@ -1,8 +1,9 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.6.0
|
||||
controller-gen.kubebuilder.io/version: v0.8.0
|
||||
creationTimestamp: null
|
||||
name: peers.kilo.squat.ai
|
||||
spec:
|
||||
@ -12,7 +13,7 @@ spec:
|
||||
listKind: PeerList
|
||||
plural: peers
|
||||
singular: peer
|
||||
scope: Namespaced
|
||||
scope: Cluster
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
schema:
|
||||
|
@ -67,7 +67,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kilo
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
args:
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --hostname=$(NODE_NAME)
|
||||
|
@ -8,7 +8,7 @@ metadata:
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"cniVersion":"0.3.1",
|
||||
"cniVersion":"0.4.0",
|
||||
"name":"kilo",
|
||||
"plugins":[
|
||||
{
|
||||
@ -101,7 +101,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kilo
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
args:
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --hostname=$(NODE_NAME)
|
||||
@ -131,7 +131,7 @@ spec:
|
||||
readOnly: false
|
||||
initContainers:
|
||||
- name: install-cni
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
|
176
manifests/kilo-k3s-cilium.yaml
Normal file
176
manifests/kilo-k3s-cilium.yaml
Normal file
@ -0,0 +1,176 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kilo
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kilo
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- kilo.squat.ai
|
||||
resources:
|
||||
- peers
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kilo
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kilo
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kilo
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kilo-scripts
|
||||
namespace: kube-system
|
||||
data:
|
||||
init.sh: |
|
||||
#!/bin/sh
|
||||
cat > /etc/kubernetes/kubeconfig <<EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
name: kilo
|
||||
clusters:
|
||||
- cluster:
|
||||
server: $(sed -n 's/.*server: \(.*\)/\1/p' /var/lib/rancher/k3s/agent/kubelet.kubeconfig)
|
||||
certificate-authority: /var/lib/rancher/k3s/agent/server-ca.crt
|
||||
users:
|
||||
- name: kilo
|
||||
user:
|
||||
token: $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
|
||||
contexts:
|
||||
- name: kilo
|
||||
context:
|
||||
cluster: kilo
|
||||
namespace: ${NAMESPACE}
|
||||
user: kilo
|
||||
current-context: kilo
|
||||
EOF
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kilo
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app.kubernetes.io/name: kilo
|
||||
app.kubernetes.io/part-of: kilo
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: kilo
|
||||
app.kubernetes.io/part-of: kilo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: kilo
|
||||
app.kubernetes.io/part-of: kilo
|
||||
spec:
|
||||
serviceAccountName: kilo
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kilo
|
||||
image: squat/kilo:0.5.0
|
||||
args:
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --hostname=$(NODE_NAME)
|
||||
- --cni=false
|
||||
- --compatibility=cilium
|
||||
- --local=false
|
||||
- --encapsulate=crosssubnet
|
||||
- --clean-up-interface=true
|
||||
- --log-level=all
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
ports:
|
||||
- containerPort: 1107
|
||||
name: metrics
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: kilo-dir
|
||||
mountPath: /var/lib/kilo
|
||||
- name: kubeconfig
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
readOnly: true
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
readOnly: false
|
||||
initContainers:
|
||||
- name: generate-kubeconfig
|
||||
image: squat/kilo:0.5.0
|
||||
command:
|
||||
- /bin/sh
|
||||
args:
|
||||
- /scripts/init.sh
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: kubeconfig
|
||||
mountPath: /etc/kubernetes
|
||||
- name: scripts
|
||||
mountPath: /scripts/
|
||||
readOnly: true
|
||||
- name: k3s-agent
|
||||
mountPath: /var/lib/rancher/k3s/agent/
|
||||
readOnly: true
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
volumes:
|
||||
- name: kilo-dir
|
||||
hostPath:
|
||||
path: /var/lib/kilo
|
||||
- name: kubeconfig
|
||||
emptyDir: {}
|
||||
- name: scripts
|
||||
configMap:
|
||||
name: kilo-scripts
|
||||
- name: k3s-agent
|
||||
hostPath:
|
||||
path: /var/lib/rancher/k3s/agent
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
@ -96,7 +96,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kilo
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
args:
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --hostname=$(NODE_NAME)
|
||||
@ -127,7 +127,7 @@ spec:
|
||||
readOnly: false
|
||||
initContainers:
|
||||
- name: generate-kubeconfig
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
command:
|
||||
- /bin/sh
|
||||
args:
|
||||
|
@ -8,7 +8,7 @@ metadata:
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"cniVersion":"0.3.1",
|
||||
"cniVersion":"0.4.0",
|
||||
"name":"kilo",
|
||||
"plugins":[
|
||||
{
|
||||
@ -133,7 +133,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kilo
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
args:
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --hostname=$(NODE_NAME)
|
||||
@ -164,7 +164,7 @@ spec:
|
||||
readOnly: false
|
||||
initContainers:
|
||||
- name: generate-kubeconfig
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
command:
|
||||
- /bin/sh
|
||||
args:
|
||||
@ -185,7 +185,7 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: install-cni
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
@ -264,7 +264,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kilo
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
args:
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --hostname=$(NODE_NAME)
|
||||
@ -298,7 +298,7 @@ spec:
|
||||
mountPath: /var/run/wireguard
|
||||
readOnly: false
|
||||
- name: boringtun
|
||||
image: leonnicolas/boringtun
|
||||
image: leonnicolas/boringtun:cc19859
|
||||
args:
|
||||
- --disable-drop-privileges=true
|
||||
- --foreground
|
||||
@ -311,7 +311,7 @@ spec:
|
||||
readOnly: false
|
||||
initContainers:
|
||||
- name: generate-kubeconfig
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
command:
|
||||
- /bin/sh
|
||||
args:
|
||||
@ -332,7 +332,7 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: install-cni
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
@ -391,7 +391,7 @@ spec:
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
metadata:
|
||||
name: nkml
|
||||
namespace: kube-system
|
||||
labels:
|
||||
@ -410,7 +410,7 @@ spec:
|
||||
containers:
|
||||
- name: nkml
|
||||
image: leonnicolas/nkml
|
||||
args:
|
||||
args:
|
||||
- --hostname=$(NODE_NAME)
|
||||
- --label-mod=wireguard
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
@ -428,7 +428,7 @@ spec:
|
||||
readOnly: true
|
||||
initContainers:
|
||||
- name: generate-kubeconfig
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
command:
|
||||
- /bin/sh
|
||||
args:
|
||||
|
@ -8,7 +8,7 @@ metadata:
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"cniVersion":"0.3.1",
|
||||
"cniVersion":"0.4.0",
|
||||
"name":"kilo",
|
||||
"plugins":[
|
||||
{
|
||||
@ -131,7 +131,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kilo
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
args:
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --hostname=$(NODE_NAME)
|
||||
@ -165,7 +165,7 @@ spec:
|
||||
mountPath: /var/run/wireguard
|
||||
readOnly: false
|
||||
- name: boringtun
|
||||
image: leonnicolas/boringtun
|
||||
image: leonnicolas/boringtun:cc19859
|
||||
args:
|
||||
- --disable-drop-privileges=true
|
||||
- --foreground
|
||||
@ -178,7 +178,7 @@ spec:
|
||||
readOnly: false
|
||||
initContainers:
|
||||
- name: generate-kubeconfig
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
command:
|
||||
- /bin/sh
|
||||
args:
|
||||
@ -199,7 +199,7 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: install-cni
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
|
@ -8,7 +8,7 @@ metadata:
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"cniVersion":"0.3.1",
|
||||
"cniVersion":"0.4.0",
|
||||
"name":"kilo",
|
||||
"plugins":[
|
||||
{
|
||||
@ -130,7 +130,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kilo
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
args:
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --hostname=$(NODE_NAME)
|
||||
@ -160,7 +160,7 @@ spec:
|
||||
readOnly: false
|
||||
initContainers:
|
||||
- name: generate-kubeconfig
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
command:
|
||||
- /bin/sh
|
||||
args:
|
||||
@ -181,7 +181,7 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: install-cni
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
|
142
manifests/kilo-kubeadm-cilium.yaml
Normal file
142
manifests/kilo-kubeadm-cilium.yaml
Normal file
@ -0,0 +1,142 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kilo
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kilo
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- kilo.squat.ai
|
||||
resources:
|
||||
- peers
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kilo
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kilo
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kilo
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kilo
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app.kubernetes.io/name: kilo
|
||||
app.kubernetes.io/part-of: kilo
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: kilo
|
||||
app.kubernetes.io/part-of: kilo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: kilo
|
||||
app.kubernetes.io/part-of: kilo
|
||||
spec:
|
||||
serviceAccountName: kilo
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kilo
|
||||
image: squat/kilo:0.5.0
|
||||
args:
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --hostname=$(NODE_NAME)
|
||||
- --cni=false
|
||||
- --compatibility=cilium
|
||||
- --local=false
|
||||
# additional and also optional flag
|
||||
- --encapsulate=crosssubnet
|
||||
- --clean-up-interface=true
|
||||
- --subnet=172.31.254.0/24
|
||||
- --log-level=all
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
ports:
|
||||
- containerPort: 1107
|
||||
name: metrics
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: kilo-dir
|
||||
mountPath: /var/lib/kilo
|
||||
|
||||
# with kube-proxy configmap
|
||||
# - name: kubeconfig
|
||||
# mountPath: /etc/kubernetes
|
||||
# readOnly: true
|
||||
|
||||
# without kube-proxy host kubeconfig binding
|
||||
- name: kubeconfig
|
||||
mount_path: /etc/kubernetes/kubeconfig
|
||||
sub_path: admin.conf
|
||||
read_only: true
|
||||
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
readOnly: true
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
readOnly: false
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
volumes:
|
||||
- name: kilo-dir
|
||||
hostPath:
|
||||
path: /var/lib/kilo
|
||||
|
||||
# with kube-proxy configmap
|
||||
# - name: kubeconfig
|
||||
# configMap:
|
||||
# name: kube-proxy
|
||||
# items:
|
||||
# - key: kubeconfig.conf
|
||||
# path: kubeconfig
|
||||
|
||||
# without kube-proxy host kubeconfig binding
|
||||
- name: kubeconfig
|
||||
host_path:
|
||||
path: /etc/kubernetes
|
||||
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
142
manifests/kilo-kubeadm-flannel-userspace.yaml
Normal file
142
manifests/kilo-kubeadm-flannel-userspace.yaml
Normal file
@ -0,0 +1,142 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kilo
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kilo
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- kilo.squat.ai
|
||||
resources:
|
||||
- peers
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kilo
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kilo
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kilo
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kilo
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app.kubernetes.io/name: kilo
|
||||
app.kubernetes.io/part-of: kilo
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: kilo
|
||||
app.kubernetes.io/part-of: kilo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: kilo
|
||||
app.kubernetes.io/part-of: kilo
|
||||
spec:
|
||||
serviceAccountName: kilo
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: boringtun
|
||||
image: leonnicolas/boringtun:cc19859
|
||||
args:
|
||||
- --disable-drop-privileges=true
|
||||
- --foreground
|
||||
- kilo0
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: wireguard
|
||||
mountPath: /var/run/wireguard
|
||||
readOnly: false
|
||||
- name: kilo
|
||||
image: squat/kilo:0.5.0
|
||||
args:
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --hostname=$(NODE_NAME)
|
||||
- --create-interface=false
|
||||
- --interface=kilo0
|
||||
- --cni=false
|
||||
- --compatibility=flannel
|
||||
- --local=false
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
ports:
|
||||
- containerPort: 1107
|
||||
name: metrics
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: cni-conf-dir
|
||||
mountPath: /etc/cni/net.d
|
||||
- name: kilo-dir
|
||||
mountPath: /var/lib/kilo
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
readOnly: true
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
readOnly: false
|
||||
- name: wireguard
|
||||
mountPath: /var/run/wireguard
|
||||
readOnly: false
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
volumes:
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: cni-conf-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: kilo-dir
|
||||
hostPath:
|
||||
path: /var/lib/kilo
|
||||
- name: kubeconfig
|
||||
configMap:
|
||||
name: kube-proxy
|
||||
items:
|
||||
- key: kubeconfig.conf
|
||||
path: kubeconfig
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
- name: wireguard
|
||||
hostPath:
|
||||
path: /var/run/wireguard
|
@ -67,7 +67,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kilo
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
args:
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --hostname=$(NODE_NAME)
|
||||
|
207
manifests/kilo-kubeadm-userspace.yaml
Normal file
207
manifests/kilo-kubeadm-userspace.yaml
Normal file
@ -0,0 +1,207 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kilo
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app.kubernetes.io/name: kilo
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"cniVersion":"0.4.0",
|
||||
"name":"kilo",
|
||||
"plugins":[
|
||||
{
|
||||
"name":"kubernetes",
|
||||
"type":"bridge",
|
||||
"bridge":"kube-bridge",
|
||||
"isDefaultGateway":true,
|
||||
"forceAddress":true,
|
||||
"mtu": 1420,
|
||||
"ipam":{
|
||||
"type":"host-local"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type":"portmap",
|
||||
"snat":true,
|
||||
"capabilities":{
|
||||
"portMappings":true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kilo
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kilo
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- kilo.squat.ai
|
||||
resources:
|
||||
- peers
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kilo
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kilo
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kilo
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kilo
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app.kubernetes.io/name: kilo
|
||||
app.kubernetes.io/part-of: kilo
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: kilo
|
||||
app.kubernetes.io/part-of: kilo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: kilo
|
||||
app.kubernetes.io/part-of: kilo
|
||||
spec:
|
||||
serviceAccountName: kilo
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: boringtun
|
||||
image: leonnicolas/boringtun:cc19859
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --disable-drop-privileges=true
|
||||
- --foreground
|
||||
- kilo0
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: wireguard
|
||||
mountPath: /var/run/wireguard
|
||||
readOnly: false
|
||||
- name: kilo
|
||||
image: squat/kilo:0.5.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --hostname=$(NODE_NAME)
|
||||
- --create-interface=false
|
||||
- --interface=kilo0
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
ports:
|
||||
- containerPort: 1107
|
||||
name: metrics
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: cni-conf-dir
|
||||
mountPath: /etc/cni/net.d
|
||||
- name: kilo-dir
|
||||
mountPath: /var/lib/kilo
|
||||
- name: kubeconfig
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
readOnly: true
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
readOnly: false
|
||||
- name: wireguard
|
||||
mountPath: /var/run/wireguard
|
||||
readOnly: false
|
||||
initContainers:
|
||||
- name: install-cni
|
||||
image: squat/kilo:0.5.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- set -e -x;
|
||||
cp /opt/cni/bin/* /host/opt/cni/bin/;
|
||||
TMP_CONF="$CNI_CONF_NAME".tmp;
|
||||
echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;
|
||||
rm -f /host/etc/cni/net.d/*;
|
||||
mv $TMP_CONF /host/etc/cni/net.d/$CNI_CONF_NAME
|
||||
env:
|
||||
- name: CNI_CONF_NAME
|
||||
value: 10-kilo.conflist
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: kilo
|
||||
key: cni-conf.json
|
||||
volumeMounts:
|
||||
- name: cni-bin-dir
|
||||
mountPath: /host/opt/cni/bin
|
||||
- name: cni-conf-dir
|
||||
mountPath: /host/etc/cni/net.d
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
volumes:
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: cni-conf-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: kilo-dir
|
||||
hostPath:
|
||||
path: /var/lib/kilo
|
||||
- name: kubeconfig
|
||||
configMap:
|
||||
name: kube-proxy
|
||||
items:
|
||||
- key: kubeconfig.conf
|
||||
path: kubeconfig
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
- name: wireguard
|
||||
hostPath:
|
||||
path: /var/run/wireguard
|
@ -8,7 +8,7 @@ metadata:
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"cniVersion":"0.3.1",
|
||||
"cniVersion":"0.4.0",
|
||||
"name":"kilo",
|
||||
"plugins":[
|
||||
{
|
||||
@ -101,7 +101,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kilo
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
args:
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --hostname=$(NODE_NAME)
|
||||
@ -131,7 +131,7 @@ spec:
|
||||
readOnly: false
|
||||
initContainers:
|
||||
- name: install-cni
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
|
@ -67,7 +67,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kilo
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
args:
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --hostname=$(NODE_NAME)
|
||||
|
@ -8,7 +8,7 @@ metadata:
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"cniVersion":"0.3.1",
|
||||
"cniVersion":"0.4.0",
|
||||
"name":"kilo",
|
||||
"plugins":[
|
||||
{
|
||||
@ -101,7 +101,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kilo
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
args:
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --hostname=$(NODE_NAME)
|
||||
@ -131,7 +131,7 @@ spec:
|
||||
readOnly: false
|
||||
initContainers:
|
||||
- name: install-cni
|
||||
image: squat/kilo
|
||||
image: squat/kilo:0.5.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
|
173
manifests/peer-validation.yaml
Normal file
173
manifests/peer-validation.yaml
Normal file
@ -0,0 +1,173 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kilo
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
name: "peers.kilo.squat.ai"
|
||||
webhooks:
|
||||
- name: "peers.kilo.squat.ai"
|
||||
rules:
|
||||
- apiGroups: ["kilo.squat.ai"]
|
||||
apiVersions: ["v1alpha1"]
|
||||
operations: ["CREATE","UPDATE"]
|
||||
resources: ["peers"]
|
||||
scope: "Cluster"
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: "kilo"
|
||||
name: "peer-validation"
|
||||
path: "/validate"
|
||||
admissionReviewVersions: ["v1"]
|
||||
sideEffects: None
|
||||
timeoutSeconds: 5
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: peer-validation-server
|
||||
namespace: kilo
|
||||
labels:
|
||||
app.kubernetes.io/name: peer-validation-server
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: peer-validation-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: peer-validation-server
|
||||
spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
containers:
|
||||
- name: server
|
||||
image: squat/kilo:0.5.0
|
||||
args:
|
||||
- webhook
|
||||
- --cert-file=/run/secrets/tls/tls.crt
|
||||
- --key-file=/run/secrets/tls/tls.key
|
||||
- --listen-metrics=:1107
|
||||
- --listen=:8443
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
name: webhook
|
||||
- containerPort: 1107
|
||||
name: metrics
|
||||
volumeMounts:
|
||||
- name: tls
|
||||
mountPath: /run/secrets/tls
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: tls
|
||||
secret:
|
||||
secretName: peer-validation-webhook-tls
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: peer-validation
|
||||
namespace: kilo
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: peer-validation-server
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: webhook
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kilo-peer-validation
|
||||
namespace: kilo
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kilo-peer-validation
|
||||
rules:
|
||||
- apiGroups:
|
||||
- admissionregistration.k8s.io
|
||||
resources:
|
||||
- validatingwebhookconfigurations
|
||||
resourceNames:
|
||||
- peers.kilo.squat.ai
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kilo-peer-validation
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kilo-peer-validation
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
namespace: kilo
|
||||
name: kilo-peer-validation
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: kilo-peer-validation
|
||||
namespace: kilo
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kilo-peer-validation
|
||||
namespace: kilo
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kilo-peer-validation
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
namespace: kilo
|
||||
name: kilo-peer-validation
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: cert-gen
|
||||
namespace: kilo
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
serviceAccountName: kilo-peer-validation
|
||||
initContainers:
|
||||
- name: create
|
||||
image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0
|
||||
args:
|
||||
- create
|
||||
- --namespace=kilo
|
||||
- --secret-name=peer-validation-webhook-tls
|
||||
- --host=peer-validation,peer-validation.kilo.svc
|
||||
- --key-name=tls.key
|
||||
- --cert-name=tls.crt
|
||||
containers:
|
||||
- name: patch
|
||||
image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0
|
||||
args:
|
||||
- patch
|
||||
- --webhook-name=peers.kilo.squat.ai
|
||||
- --secret-name=peer-validation-webhook-tls
|
||||
- --namespace=kilo
|
||||
- --patch-mutating=false
|
||||
restartPolicy: OnFailure
|
||||
backoffLimit: 4
|
56
manifests/wg-exporter-role-kube-prometheus.yaml
Normal file
56
manifests/wg-exporter-role-kube-prometheus.yaml
Normal file
@ -0,0 +1,56 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: prometheus
|
||||
app.kubernetes.io/name: prometheus
|
||||
app.kubernetes.io/part-of: kube-prometheus
|
||||
app.kubernetes.io/version: 2.26.0
|
||||
name: prometheus-k8s
|
||||
namespace: kilo
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: prometheus
|
||||
app.kubernetes.io/name: prometheus
|
||||
app.kubernetes.io/part-of: kube-prometheus
|
||||
app.kubernetes.io/version: 2.26.0
|
||||
name: prometheus-k8s
|
||||
namespace: kilo
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: prometheus-k8s
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus-k8s
|
||||
namespace: monitoring
|
67
manifests/wg-exporter.yaml
Normal file
67
manifests/wg-exporter.yaml
Normal file
@ -0,0 +1,67 @@
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: wg-exporter
|
||||
app.kubernetes.io/part-of: kilo
|
||||
name: wg-exporter
|
||||
namespace: kilo
|
||||
spec:
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- kilo
|
||||
podMetricsEndpoints:
|
||||
- interval: 15s
|
||||
port: metrics
|
||||
path: /metrics
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/part-of: kilo
|
||||
app.kubernetes.io/name: wg-exporter
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: wg-exporter
|
||||
app.kubernetes.io/part-of: kilo
|
||||
name: wg-exporter
|
||||
namespace: kilo
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: wg-exporter
|
||||
app.kubernetes.io/part-of: kilo
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: wg-exporter
|
||||
app.kubernetes.io/part-of: kilo
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- -a
|
||||
- -i=kilo0
|
||||
- -p=9586
|
||||
image: mindflavor/prometheus-wireguard-exporter
|
||||
name: wg-exporter
|
||||
ports:
|
||||
- containerPort: 9586
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
securityContext:
|
||||
privileged: true
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
volumeMounts:
|
||||
- name: wireguard
|
||||
mountPath: /var/run/wireguard
|
||||
volumes:
|
||||
- name: wireguard
|
||||
hostPath:
|
||||
path: /var/run/wireguard
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
111
pkg/encapsulation/cilium.go
Normal file
111
pkg/encapsulation/cilium.go
Normal file
@ -0,0 +1,111 @@
|
||||
// Copyright 2019 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package encapsulation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/vishvananda/netlink"
|
||||
|
||||
"github.com/squat/kilo/pkg/iptables"
|
||||
)
|
||||
|
||||
const ciliumDeviceName = "cilium_host"
|
||||
|
||||
type cilium struct {
|
||||
iface int
|
||||
strategy Strategy
|
||||
ch chan netlink.LinkUpdate
|
||||
done chan struct{}
|
||||
// mu guards updates to the iface field.
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewCilium returns an encapsulator that uses Cilium.
|
||||
func NewCilium(strategy Strategy) Encapsulator {
|
||||
return &cilium{
|
||||
ch: make(chan netlink.LinkUpdate),
|
||||
done: make(chan struct{}),
|
||||
strategy: strategy,
|
||||
}
|
||||
}
|
||||
|
||||
// CleanUp close done channel
|
||||
func (f *cilium) CleanUp() error {
|
||||
close(f.done)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gw returns the correct gateway IP associated with the given node.
|
||||
func (f *cilium) Gw(_, _ net.IP, subnet *net.IPNet) net.IP {
|
||||
return subnet.IP
|
||||
}
|
||||
|
||||
// Index returns the index of the Cilium interface.
|
||||
func (f *cilium) Index() int {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
return f.iface
|
||||
}
|
||||
|
||||
// Init finds the Cilium interface index.
|
||||
func (f *cilium) Init(_ int) error {
|
||||
if err := netlink.LinkSubscribe(f.ch, f.done); err != nil {
|
||||
return fmt.Errorf("failed to subscribe to updates to %s: %v", ciliumDeviceName, err)
|
||||
}
|
||||
go func() {
|
||||
var lu netlink.LinkUpdate
|
||||
for {
|
||||
select {
|
||||
case lu = <-f.ch:
|
||||
if lu.Attrs().Name == ciliumDeviceName {
|
||||
f.mu.Lock()
|
||||
f.iface = lu.Attrs().Index
|
||||
f.mu.Unlock()
|
||||
}
|
||||
case <-f.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
i, err := netlink.LinkByName(ciliumDeviceName)
|
||||
if _, ok := err.(netlink.LinkNotFoundError); ok {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query for Cilium interface: %v", err)
|
||||
}
|
||||
f.mu.Lock()
|
||||
f.iface = i.Attrs().Index
|
||||
f.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rules is a no-op.
|
||||
func (f *cilium) Rules(_ []*net.IPNet) []iptables.Rule {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set is a no-op.
|
||||
func (f *cilium) Set(_ *net.IPNet) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Strategy returns the configured strategy for encapsulation.
|
||||
func (f *cilium) Strategy() Strategy {
|
||||
return f.strategy
|
||||
}
|
@ -56,6 +56,8 @@ func (f *flannel) Gw(_, _ net.IP, subnet *net.IPNet) net.IP {
|
||||
|
||||
// Index returns the index of the Flannel interface.
|
||||
func (f *flannel) Index() int {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
return f.iface
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ func (i *ipip) Rules(nodes []*net.IPNet) []iptables.Rule {
|
||||
rules = append(rules, iptables.NewIPv6Rule("filter", "INPUT", "-p", proto, "-m", "comment", "--comment", "Kilo: jump to IPIP chain", "-j", "KILO-IPIP"))
|
||||
for _, n := range nodes {
|
||||
// Accept encapsulated traffic from peers.
|
||||
rules = append(rules, iptables.NewRule(iptables.GetProtocol(len(n.IP)), "filter", "KILO-IPIP", "-s", n.String(), "-m", "comment", "--comment", "Kilo: allow IPIP traffic", "-j", "ACCEPT"))
|
||||
rules = append(rules, iptables.NewRule(iptables.GetProtocol(n.IP), "filter", "KILO-IPIP", "-s", n.String(), "-m", "comment", "--comment", "Kilo: allow IPIP traffic", "-j", "ACCEPT"))
|
||||
}
|
||||
// Drop all other IPIP traffic.
|
||||
rules = append(rules, iptables.NewIPv4Rule("filter", "INPUT", "-p", proto, "-m", "comment", "--comment", "Kilo: reject other IPIP traffic", "-j", "DROP"))
|
||||
|
@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build cgo
|
||||
// +build cgo
|
||||
|
||||
package encapsulation
|
||||
|
@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !cgo
|
||||
// +build !cgo
|
||||
|
||||
package encapsulation
|
||||
|
@ -16,15 +16,33 @@ package iptables
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-iptables/iptables"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const ipv6ModuleDisabledPath = "/sys/module/ipv6/parameters/disable"
|
||||
|
||||
func ipv6Disabled() (bool, error) {
|
||||
f, err := os.Open(ipv6ModuleDisabledPath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer f.Close()
|
||||
disabled := make([]byte, 1)
|
||||
if _, err = io.ReadFull(f, disabled); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return disabled[0] == '1', nil
|
||||
}
|
||||
|
||||
// Protocol represents an IP protocol.
|
||||
type Protocol byte
|
||||
|
||||
@ -36,11 +54,11 @@ const (
|
||||
)
|
||||
|
||||
// GetProtocol will return a protocol from the length of an IP address.
|
||||
func GetProtocol(length int) Protocol {
|
||||
if length == net.IPv6len {
|
||||
return ProtocolIPv6
|
||||
func GetProtocol(ip net.IP) Protocol {
|
||||
if len(ip) == net.IPv4len || ip.To4() != nil {
|
||||
return ProtocolIPv4
|
||||
}
|
||||
return ProtocolIPv4
|
||||
return ProtocolIPv6
|
||||
}
|
||||
|
||||
// Client represents any type that can administer iptables rules.
|
||||
@ -203,6 +221,7 @@ type Controller struct {
|
||||
errors chan error
|
||||
logger log.Logger
|
||||
resyncPeriod time.Duration
|
||||
registerer prometheus.Registerer
|
||||
|
||||
sync.Mutex
|
||||
rules []Rule
|
||||
@ -234,6 +253,12 @@ func WithClients(v4, v6 Client) ControllerOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithRegisterer(registerer prometheus.Registerer) ControllerOption {
|
||||
return func(c *Controller) {
|
||||
c.registerer = registerer
|
||||
}
|
||||
}
|
||||
|
||||
// New generates a new iptables rules controller.
|
||||
// If no options are given, IPv4 and IPv6 clients
|
||||
// will be instantiated using the regular iptables backend.
|
||||
@ -250,14 +275,23 @@ func New(opts ...ControllerOption) (*Controller, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create iptables IPv4 client: %v", err)
|
||||
}
|
||||
c.v4 = v4
|
||||
c.v4 = wrapWithMetrics(v4, "IPv4", c.registerer)
|
||||
}
|
||||
if c.v6 == nil {
|
||||
v6, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
|
||||
disabled, err := ipv6Disabled()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create iptables IPv6 client: %v", err)
|
||||
return nil, fmt.Errorf("failed to check IPv6 status: %v", err)
|
||||
}
|
||||
if disabled {
|
||||
level.Info(c.logger).Log("msg", "IPv6 is disabled in the kernel; disabling the IPv6 iptables controller")
|
||||
c.v6 = &fakeClient{}
|
||||
} else {
|
||||
v6, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create iptables IPv6 client: %v", err)
|
||||
}
|
||||
c.v6 = wrapWithMetrics(v6, "IPv6", c.registerer)
|
||||
}
|
||||
c.v6 = v6
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
115
pkg/iptables/metrics.go
Normal file
115
pkg/iptables/metrics.go
Normal file
@ -0,0 +1,115 @@
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package iptables
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type metricsClientWrapper struct {
|
||||
client Client
|
||||
operationCounter *prometheus.CounterVec
|
||||
}
|
||||
|
||||
func wrapWithMetrics(client Client, protocol string, registerer prometheus.Registerer) Client {
|
||||
if registerer == nil {
|
||||
return client
|
||||
}
|
||||
|
||||
labelNames := []string{
|
||||
"operation",
|
||||
"table",
|
||||
"chain",
|
||||
}
|
||||
counter := prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "kilo_iptables_operations_total",
|
||||
Help: "Number of iptables operations.",
|
||||
ConstLabels: prometheus.Labels{"protocol": protocol},
|
||||
}, labelNames)
|
||||
registerer.MustRegister(counter)
|
||||
return &metricsClientWrapper{client, counter}
|
||||
}
|
||||
|
||||
func (m *metricsClientWrapper) AppendUnique(table string, chain string, rule ...string) error {
|
||||
m.operationCounter.With(prometheus.Labels{
|
||||
"operation": "AppendUnique",
|
||||
"table": table,
|
||||
"chain": chain,
|
||||
}).Inc()
|
||||
return m.client.AppendUnique(table, chain, rule...)
|
||||
}
|
||||
|
||||
func (m *metricsClientWrapper) Delete(table string, chain string, rule ...string) error {
|
||||
m.operationCounter.With(prometheus.Labels{
|
||||
"operation": "Delete",
|
||||
"table": table,
|
||||
"chain": chain,
|
||||
}).Inc()
|
||||
return m.client.Delete(table, chain, rule...)
|
||||
}
|
||||
|
||||
func (m *metricsClientWrapper) Exists(table string, chain string, rule ...string) (bool, error) {
|
||||
m.operationCounter.With(prometheus.Labels{
|
||||
"operation": "Exists",
|
||||
"table": table,
|
||||
"chain": chain,
|
||||
}).Inc()
|
||||
return m.client.Exists(table, chain, rule...)
|
||||
}
|
||||
|
||||
func (m *metricsClientWrapper) List(table string, chain string) ([]string, error) {
|
||||
m.operationCounter.With(prometheus.Labels{
|
||||
"operation": "List",
|
||||
"table": table,
|
||||
"chain": chain,
|
||||
}).Inc()
|
||||
return m.client.List(table, chain)
|
||||
}
|
||||
|
||||
func (m *metricsClientWrapper) ClearChain(table string, chain string) error {
|
||||
m.operationCounter.With(prometheus.Labels{
|
||||
"operation": "ClearChain",
|
||||
"table": table,
|
||||
"chain": chain,
|
||||
}).Inc()
|
||||
return m.client.ClearChain(table, chain)
|
||||
}
|
||||
|
||||
func (m *metricsClientWrapper) DeleteChain(table string, chain string) error {
|
||||
m.operationCounter.With(prometheus.Labels{
|
||||
"operation": "DeleteChain",
|
||||
"table": table,
|
||||
"chain": chain,
|
||||
}).Inc()
|
||||
return m.client.DeleteChain(table, chain)
|
||||
}
|
||||
|
||||
func (m *metricsClientWrapper) NewChain(table string, chain string) error {
|
||||
m.operationCounter.With(prometheus.Labels{
|
||||
"operation": "NewChain",
|
||||
"table": table,
|
||||
"chain": chain,
|
||||
}).Inc()
|
||||
return m.client.NewChain(table, chain)
|
||||
}
|
||||
|
||||
func (m *metricsClientWrapper) ListChains(table string) ([]string, error) {
|
||||
m.operationCounter.With(prometheus.Labels{
|
||||
"operation": "ListChains",
|
||||
"table": table,
|
||||
"chain": "*",
|
||||
}).Inc()
|
||||
return m.client.ListChains(table)
|
||||
}
|
@ -48,6 +48,7 @@ var PeerShortNames = []string{"peer"}
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:openapi-gen=true
|
||||
// +kubebuilder:resource:scope=Cluster
|
||||
|
||||
// Peer is a WireGuard peer that should have access to the VPN.
|
||||
type Peer struct {
|
||||
|
@ -1,6 +1,7 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -25,13 +25,15 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
v1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
v1listers "k8s.io/client-go/listers/core/v1"
|
||||
@ -67,6 +69,8 @@ const (
|
||||
jsonRemovePatch = `{"op": "remove", "path": "%s"}`
|
||||
)
|
||||
|
||||
var logger = log.NewNopLogger()
|
||||
|
||||
type backend struct {
|
||||
nodes *nodeBackend
|
||||
peers *peerBackend
|
||||
@ -99,10 +103,12 @@ type peerBackend struct {
|
||||
}
|
||||
|
||||
// New creates a new instance of a mesh.Backend.
|
||||
func New(c kubernetes.Interface, kc kiloclient.Interface, ec apiextensions.Interface, topologyLabel string) mesh.Backend {
|
||||
func New(c kubernetes.Interface, kc kiloclient.Interface, ec apiextensions.Interface, topologyLabel string, l log.Logger) mesh.Backend {
|
||||
ni := v1informers.NewNodeInformer(c, 5*time.Minute, nil)
|
||||
pi := v1alpha1informers.NewPeerInformer(kc, 5*time.Minute, nil)
|
||||
|
||||
logger = l
|
||||
|
||||
return &backend{
|
||||
&nodeBackend{
|
||||
client: c,
|
||||
@ -122,7 +128,7 @@ func New(c kubernetes.Interface, kc kiloclient.Interface, ec apiextensions.Inter
|
||||
}
|
||||
|
||||
// CleanUp removes configuration applied to the backend.
|
||||
func (nb *nodeBackend) CleanUp(name string) error {
|
||||
func (nb *nodeBackend) CleanUp(ctx context.Context, name string) error {
|
||||
patch := []byte("[" + strings.Join([]string{
|
||||
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(endpointAnnotationKey, "/", jsonPatchSlash, 1))),
|
||||
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(internalIPAnnotationKey, "/", jsonPatchSlash, 1))),
|
||||
@ -132,7 +138,7 @@ func (nb *nodeBackend) CleanUp(name string) error {
|
||||
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(discoveredEndpointsKey, "/", jsonPatchSlash, 1))),
|
||||
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(granularityKey, "/", jsonPatchSlash, 1))),
|
||||
}, ",") + "]")
|
||||
if _, err := nb.client.CoreV1().Nodes().Patch(context.TODO(), name, types.JSONPatchType, patch, metav1.PatchOptions{}); err != nil {
|
||||
if _, err := nb.client.CoreV1().Nodes().Patch(ctx, name, types.JSONPatchType, patch, metav1.PatchOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to patch node: %v", err)
|
||||
}
|
||||
return nil
|
||||
@ -149,9 +155,9 @@ func (nb *nodeBackend) Get(name string) (*mesh.Node, error) {
|
||||
|
||||
// Init initializes the backend; for this backend that means
|
||||
// syncing the informer cache.
|
||||
func (nb *nodeBackend) Init(stop <-chan struct{}) error {
|
||||
go nb.informer.Run(stop)
|
||||
if ok := cache.WaitForCacheSync(stop, func() bool {
|
||||
func (nb *nodeBackend) Init(ctx context.Context) error {
|
||||
go nb.informer.Run(ctx.Done())
|
||||
if ok := cache.WaitForCacheSync(ctx.Done(), func() bool {
|
||||
return nb.informer.HasSynced()
|
||||
}); !ok {
|
||||
return errors.New("failed to sync node cache")
|
||||
@ -206,7 +212,7 @@ func (nb *nodeBackend) List() ([]*mesh.Node, error) {
|
||||
}
|
||||
|
||||
// Set sets the fields of a node.
|
||||
func (nb *nodeBackend) Set(name string, node *mesh.Node) error {
|
||||
func (nb *nodeBackend) Set(ctx context.Context, name string, node *mesh.Node) error {
|
||||
old, err := nb.lister.Get(name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find node: %v", err)
|
||||
@ -218,7 +224,7 @@ func (nb *nodeBackend) Set(name string, node *mesh.Node) error {
|
||||
} else {
|
||||
n.ObjectMeta.Annotations[internalIPAnnotationKey] = node.InternalIP.String()
|
||||
}
|
||||
n.ObjectMeta.Annotations[keyAnnotationKey] = string(node.Key)
|
||||
n.ObjectMeta.Annotations[keyAnnotationKey] = node.Key.String()
|
||||
n.ObjectMeta.Annotations[lastSeenAnnotationKey] = strconv.FormatInt(node.LastSeen, 10)
|
||||
if node.WireGuardIP == nil {
|
||||
n.ObjectMeta.Annotations[wireGuardIPAnnotationKey] = ""
|
||||
@ -247,7 +253,7 @@ func (nb *nodeBackend) Set(name string, node *mesh.Node) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create patch for node %q: %v", n.Name, err)
|
||||
}
|
||||
if _, err = nb.client.CoreV1().Nodes().Patch(context.TODO(), name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}); err != nil {
|
||||
if _, err = nb.client.CoreV1().Nodes().Patch(ctx, name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to patch node: %v", err)
|
||||
}
|
||||
return nil
|
||||
@ -276,9 +282,9 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
|
||||
location = node.ObjectMeta.Labels[topologyLabel]
|
||||
}
|
||||
// Allow the endpoint to be overridden.
|
||||
endpoint := parseEndpoint(node.ObjectMeta.Annotations[forceEndpointAnnotationKey])
|
||||
endpoint := wireguard.ParseEndpoint(node.ObjectMeta.Annotations[forceEndpointAnnotationKey])
|
||||
if endpoint == nil {
|
||||
endpoint = parseEndpoint(node.ObjectMeta.Annotations[endpointAnnotationKey])
|
||||
endpoint = wireguard.ParseEndpoint(node.ObjectMeta.Annotations[endpointAnnotationKey])
|
||||
}
|
||||
// Allow the internal IP to be overridden.
|
||||
internalIP := normalizeIP(node.ObjectMeta.Annotations[forceInternalIPAnnotationKey])
|
||||
@ -292,13 +298,11 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
|
||||
internalIP = nil
|
||||
}
|
||||
// Set Wireguard PersistentKeepalive setting for the node.
|
||||
var persistentKeepalive int64
|
||||
if keepAlive, ok := node.ObjectMeta.Annotations[persistentKeepaliveKey]; !ok {
|
||||
persistentKeepalive = 0
|
||||
} else {
|
||||
if persistentKeepalive, err = strconv.ParseInt(keepAlive, 10, 64); err != nil {
|
||||
persistentKeepalive = 0
|
||||
}
|
||||
var persistentKeepalive time.Duration
|
||||
if keepAlive, ok := node.ObjectMeta.Annotations[persistentKeepaliveKey]; ok {
|
||||
// We can ignore the error, because p will be set to 0 if an error occures.
|
||||
p, _ := strconv.ParseInt(keepAlive, 10, 64)
|
||||
persistentKeepalive = time.Duration(p) * time.Second
|
||||
}
|
||||
var lastSeen int64
|
||||
if ls, ok := node.ObjectMeta.Annotations[lastSeenAnnotationKey]; !ok {
|
||||
@ -308,7 +312,7 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
|
||||
lastSeen = 0
|
||||
}
|
||||
}
|
||||
var discoveredEndpoints map[string]*wireguard.Endpoint
|
||||
var discoveredEndpoints map[string]*net.UDPAddr
|
||||
if de, ok := node.ObjectMeta.Annotations[discoveredEndpointsKey]; ok {
|
||||
err := json.Unmarshal([]byte(de), &discoveredEndpoints)
|
||||
if err != nil {
|
||||
@ -316,11 +320,11 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
|
||||
}
|
||||
}
|
||||
// Set allowed IPs for a location.
|
||||
var allowedLocationIPs []*net.IPNet
|
||||
var allowedLocationIPs []net.IPNet
|
||||
if str, ok := node.ObjectMeta.Annotations[allowedLocationIPsKey]; ok {
|
||||
for _, ip := range strings.Split(str, ",") {
|
||||
if ipnet := normalizeIP(ip); ipnet != nil {
|
||||
allowedLocationIPs = append(allowedLocationIPs, ipnet)
|
||||
allowedLocationIPs = append(allowedLocationIPs, *ipnet)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -335,6 +339,9 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO log some error or warning.
|
||||
key, _ := wgtypes.ParseKey(node.ObjectMeta.Annotations[keyAnnotationKey])
|
||||
|
||||
return &mesh.Node{
|
||||
// Endpoint and InternalIP should only ever fail to parse if the
|
||||
// remote node's agent has not yet set its IP address;
|
||||
@ -345,12 +352,12 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
|
||||
Endpoint: endpoint,
|
||||
NoInternalIP: noInternalIP,
|
||||
InternalIP: internalIP,
|
||||
Key: []byte(node.ObjectMeta.Annotations[keyAnnotationKey]),
|
||||
Key: key,
|
||||
LastSeen: lastSeen,
|
||||
Leader: leader,
|
||||
Location: location,
|
||||
Name: node.Name,
|
||||
PersistentKeepalive: int(persistentKeepalive),
|
||||
PersistentKeepalive: persistentKeepalive,
|
||||
Subnet: subnet,
|
||||
// WireGuardIP can fail to parse if the node is not a leader or if
|
||||
// the node's agent has not yet reconciled. In either case, the IP
|
||||
@ -367,14 +374,14 @@ func translatePeer(peer *v1alpha1.Peer) *mesh.Peer {
|
||||
if peer == nil {
|
||||
return nil
|
||||
}
|
||||
var aips []*net.IPNet
|
||||
var aips []net.IPNet
|
||||
for _, aip := range peer.Spec.AllowedIPs {
|
||||
aip := normalizeIP(aip)
|
||||
// Skip any invalid IPs.
|
||||
if aip == nil {
|
||||
continue
|
||||
}
|
||||
aips = append(aips, aip)
|
||||
aips = append(aips, *aip)
|
||||
}
|
||||
var endpoint *wireguard.Endpoint
|
||||
if peer.Spec.Endpoint != nil {
|
||||
@ -384,42 +391,47 @@ func translatePeer(peer *v1alpha1.Peer) *mesh.Peer {
|
||||
} else {
|
||||
ip = ip.To16()
|
||||
}
|
||||
if peer.Spec.Endpoint.Port > 0 && (ip != nil || peer.Spec.Endpoint.DNS != "") {
|
||||
endpoint = &wireguard.Endpoint{
|
||||
DNSOrIP: wireguard.DNSOrIP{
|
||||
DNS: peer.Spec.Endpoint.DNS,
|
||||
IP: ip,
|
||||
},
|
||||
Port: peer.Spec.Endpoint.Port,
|
||||
if peer.Spec.Endpoint.Port > 0 {
|
||||
if ip != nil {
|
||||
endpoint = wireguard.NewEndpoint(ip, int(peer.Spec.Endpoint.Port))
|
||||
}
|
||||
if peer.Spec.Endpoint.DNS != "" {
|
||||
endpoint = wireguard.ParseEndpoint(fmt.Sprintf("%s:%d", peer.Spec.Endpoint.DNS, peer.Spec.Endpoint.Port))
|
||||
}
|
||||
}
|
||||
}
|
||||
var key []byte
|
||||
if len(peer.Spec.PublicKey) > 0 {
|
||||
key = []byte(peer.Spec.PublicKey)
|
||||
|
||||
key, err := wgtypes.ParseKey(peer.Spec.PublicKey)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "failed to parse public key", "peer", peer.Name, "err", err.Error())
|
||||
}
|
||||
var psk []byte
|
||||
if len(peer.Spec.PresharedKey) > 0 {
|
||||
psk = []byte(peer.Spec.PresharedKey)
|
||||
var psk *wgtypes.Key
|
||||
if k, err := wgtypes.ParseKey(peer.Spec.PresharedKey); err != nil {
|
||||
// Set key to nil to avoid setting a key to the zero value wgtypes.Key{}
|
||||
psk = nil
|
||||
} else {
|
||||
psk = &k
|
||||
}
|
||||
var pka int
|
||||
var pka time.Duration
|
||||
if peer.Spec.PersistentKeepalive > 0 {
|
||||
pka = peer.Spec.PersistentKeepalive
|
||||
pka = time.Duration(peer.Spec.PersistentKeepalive) * time.Second
|
||||
}
|
||||
return &mesh.Peer{
|
||||
Name: peer.Name,
|
||||
Peer: wireguard.Peer{
|
||||
AllowedIPs: aips,
|
||||
Endpoint: endpoint,
|
||||
PersistentKeepalive: pka,
|
||||
PresharedKey: psk,
|
||||
PublicKey: key,
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
AllowedIPs: aips,
|
||||
PersistentKeepaliveInterval: &pka,
|
||||
PresharedKey: psk,
|
||||
PublicKey: key,
|
||||
},
|
||||
Endpoint: endpoint,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CleanUp removes configuration applied to the backend.
|
||||
func (pb *peerBackend) CleanUp(name string) error {
|
||||
func (pb *peerBackend) CleanUp(_ context.Context, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -434,14 +446,14 @@ func (pb *peerBackend) Get(name string) (*mesh.Peer, error) {
|
||||
|
||||
// Init initializes the backend; for this backend that means
|
||||
// syncing the informer cache.
|
||||
func (pb *peerBackend) Init(stop <-chan struct{}) error {
|
||||
func (pb *peerBackend) Init(ctx context.Context) error {
|
||||
// Check the presents of the CRD peers.kilo.squat.ai.
|
||||
if _, err := pb.extensionsClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), strings.Join([]string{v1alpha1.PeerPlural, v1alpha1.GroupName}, "."), metav1.GetOptions{}); err != nil {
|
||||
if _, err := pb.extensionsClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, strings.Join([]string{v1alpha1.PeerPlural, v1alpha1.GroupName}, "."), metav1.GetOptions{}); err != nil {
|
||||
return fmt.Errorf("CRD is not present: %v", err)
|
||||
}
|
||||
|
||||
go pb.informer.Run(stop)
|
||||
if ok := cache.WaitForCacheSync(stop, func() bool {
|
||||
go pb.informer.Run(ctx.Done())
|
||||
if ok := cache.WaitForCacheSync(ctx.Done(), func() bool {
|
||||
return pb.informer.HasSynced()
|
||||
}); !ok {
|
||||
return errors.New("failed to sync peer cache")
|
||||
@ -500,7 +512,7 @@ func (pb *peerBackend) List() ([]*mesh.Peer, error) {
|
||||
}
|
||||
|
||||
// Set sets the fields of a peer.
|
||||
func (pb *peerBackend) Set(name string, peer *mesh.Peer) error {
|
||||
func (pb *peerBackend) Set(ctx context.Context, name string, peer *mesh.Peer) error {
|
||||
old, err := pb.lister.Get(name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find peer: %v", err)
|
||||
@ -511,22 +523,26 @@ func (pb *peerBackend) Set(name string, peer *mesh.Peer) error {
|
||||
p.Spec.AllowedIPs[i] = peer.AllowedIPs[i].String()
|
||||
}
|
||||
if peer.Endpoint != nil {
|
||||
var ip string
|
||||
if peer.Endpoint.IP != nil {
|
||||
ip = peer.Endpoint.IP.String()
|
||||
}
|
||||
p.Spec.Endpoint = &v1alpha1.PeerEndpoint{
|
||||
DNSOrIP: v1alpha1.DNSOrIP{
|
||||
IP: ip,
|
||||
DNS: peer.Endpoint.DNS,
|
||||
IP: peer.Endpoint.IP().String(),
|
||||
DNS: peer.Endpoint.DNS(),
|
||||
},
|
||||
Port: peer.Endpoint.Port,
|
||||
Port: uint32(peer.Endpoint.Port()),
|
||||
}
|
||||
}
|
||||
p.Spec.PersistentKeepalive = peer.PersistentKeepalive
|
||||
p.Spec.PresharedKey = string(peer.PresharedKey)
|
||||
p.Spec.PublicKey = string(peer.PublicKey)
|
||||
if _, err = pb.client.KiloV1alpha1().Peers().Update(context.TODO(), p, metav1.UpdateOptions{}); err != nil {
|
||||
if peer.PersistentKeepaliveInterval == nil {
|
||||
p.Spec.PersistentKeepalive = 0
|
||||
} else {
|
||||
p.Spec.PersistentKeepalive = int(*peer.PersistentKeepaliveInterval / time.Second)
|
||||
}
|
||||
if peer.PresharedKey == nil {
|
||||
p.Spec.PresharedKey = ""
|
||||
} else {
|
||||
p.Spec.PresharedKey = peer.PresharedKey.String()
|
||||
}
|
||||
p.Spec.PublicKey = peer.PublicKey.String()
|
||||
if _, err = pb.client.KiloV1alpha1().Peers().Update(ctx, p, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to update peer: %v", err)
|
||||
}
|
||||
return nil
|
||||
@ -549,35 +565,3 @@ func normalizeIP(ip string) *net.IPNet {
|
||||
ipNet.IP = i.To16()
|
||||
return ipNet
|
||||
}
|
||||
|
||||
func parseEndpoint(endpoint string) *wireguard.Endpoint {
|
||||
if len(endpoint) == 0 {
|
||||
return nil
|
||||
}
|
||||
parts := strings.Split(endpoint, ":")
|
||||
if len(parts) < 2 {
|
||||
return nil
|
||||
}
|
||||
portRaw := parts[len(parts)-1]
|
||||
hostRaw := strings.Trim(strings.Join(parts[:len(parts)-1], ":"), "[]")
|
||||
port, err := strconv.ParseUint(portRaw, 10, 32)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if len(validation.IsValidPortNum(int(port))) != 0 {
|
||||
return nil
|
||||
}
|
||||
ip := net.ParseIP(hostRaw)
|
||||
if ip == nil {
|
||||
if len(validation.IsDNS1123Subdomain(hostRaw)) == 0 {
|
||||
return &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{DNS: hostRaw}, Port: uint32(port)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if ip4 := ip.To4(); ip4 != nil {
|
||||
ip = ip4
|
||||
} else {
|
||||
ip = ip.To16()
|
||||
}
|
||||
return &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: ip}, Port: uint32(port)}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2019 the Kilo authors
|
||||
// Copyright 2021 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -17,8 +17,10 @@ package k8s
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kylelemons/godebug/pretty"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1"
|
||||
@ -26,6 +28,30 @@ import (
|
||||
"github.com/squat/kilo/pkg/wireguard"
|
||||
)
|
||||
|
||||
func mustKey() (k wgtypes.Key) {
|
||||
var err error
|
||||
if k, err = wgtypes.GeneratePrivateKey(); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func mustPSKKey() (key *wgtypes.Key) {
|
||||
if k, err := wgtypes.GenerateKey(); err != nil {
|
||||
panic(err.Error())
|
||||
} else {
|
||||
key = &k
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
fooKey = mustKey()
|
||||
pskKey = mustPSKKey()
|
||||
second = time.Second
|
||||
zero = time.Duration(0)
|
||||
)
|
||||
|
||||
func TestTranslateNode(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
@ -54,8 +80,19 @@ func TestTranslateNode(t *testing.T) {
|
||||
internalIPAnnotationKey: "10.0.0.2/32",
|
||||
},
|
||||
out: &mesh.Node{
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("10.0.0.1")}, Port: mesh.DefaultKiloPort},
|
||||
InternalIP: &net.IPNet{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(32, 32)},
|
||||
Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.1").To4(), mesh.DefaultKiloPort),
|
||||
InternalIP: &net.IPNet{IP: net.ParseIP("10.0.0.2").To4(), Mask: net.CIDRMask(32, 32)},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid ips with ipv6",
|
||||
annotations: map[string]string{
|
||||
endpointAnnotationKey: "[ff10::10]:51820",
|
||||
internalIPAnnotationKey: "ff60::10/64",
|
||||
},
|
||||
out: &mesh.Node{
|
||||
Endpoint: wireguard.NewEndpoint(net.ParseIP("ff10::10").To16(), mesh.DefaultKiloPort),
|
||||
InternalIP: &net.IPNet{IP: net.ParseIP("ff60::10").To16(), Mask: net.CIDRMask(64, 128)},
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -68,7 +105,7 @@ func TestTranslateNode(t *testing.T) {
|
||||
name: "normalize subnet",
|
||||
annotations: map[string]string{},
|
||||
out: &mesh.Node{
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(24, 32)},
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0").To4(), Mask: net.CIDRMask(24, 32)},
|
||||
},
|
||||
subnet: "10.2.0.1/24",
|
||||
},
|
||||
@ -76,7 +113,7 @@ func TestTranslateNode(t *testing.T) {
|
||||
name: "valid subnet",
|
||||
annotations: map[string]string{},
|
||||
out: &mesh.Node{
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)},
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0").To4(), Mask: net.CIDRMask(24, 32)},
|
||||
},
|
||||
subnet: "10.2.1.0/24",
|
||||
},
|
||||
@ -108,7 +145,7 @@ func TestTranslateNode(t *testing.T) {
|
||||
forceEndpointAnnotationKey: "-10.0.0.2:51821",
|
||||
},
|
||||
out: &mesh.Node{
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("10.0.0.1")}, Port: mesh.DefaultKiloPort},
|
||||
Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.1").To4(), mesh.DefaultKiloPort),
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -118,7 +155,7 @@ func TestTranslateNode(t *testing.T) {
|
||||
forceEndpointAnnotationKey: "10.0.0.2:51821",
|
||||
},
|
||||
out: &mesh.Node{
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("10.0.0.2")}, Port: 51821},
|
||||
Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.2").To4(), 51821),
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -127,7 +164,7 @@ func TestTranslateNode(t *testing.T) {
|
||||
persistentKeepaliveKey: "25",
|
||||
},
|
||||
out: &mesh.Node{
|
||||
PersistentKeepalive: 25,
|
||||
PersistentKeepalive: 25 * time.Second,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -137,7 +174,7 @@ func TestTranslateNode(t *testing.T) {
|
||||
forceInternalIPAnnotationKey: "-10.1.0.2/24",
|
||||
},
|
||||
out: &mesh.Node{
|
||||
InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.1"), Mask: net.CIDRMask(24, 32)},
|
||||
InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.1").To4(), Mask: net.CIDRMask(24, 32)},
|
||||
NoInternalIP: false,
|
||||
},
|
||||
},
|
||||
@ -148,7 +185,7 @@ func TestTranslateNode(t *testing.T) {
|
||||
forceInternalIPAnnotationKey: "10.1.0.2/24",
|
||||
},
|
||||
out: &mesh.Node{
|
||||
InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.2"), Mask: net.CIDRMask(24, 32)},
|
||||
InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.2").To4(), Mask: net.CIDRMask(24, 32)},
|
||||
NoInternalIP: false,
|
||||
},
|
||||
},
|
||||
@ -166,7 +203,7 @@ func TestTranslateNode(t *testing.T) {
|
||||
forceEndpointAnnotationKey: "10.0.0.2:51821",
|
||||
forceInternalIPAnnotationKey: "10.1.0.2/32",
|
||||
internalIPAnnotationKey: "10.1.0.1/32",
|
||||
keyAnnotationKey: "foo",
|
||||
keyAnnotationKey: fooKey.String(),
|
||||
lastSeenAnnotationKey: "1000000000",
|
||||
leaderAnnotationKey: "",
|
||||
locationAnnotationKey: "b",
|
||||
@ -177,14 +214,45 @@ func TestTranslateNode(t *testing.T) {
|
||||
RegionLabelKey: "a",
|
||||
},
|
||||
out: &mesh.Node{
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("10.0.0.2")}, Port: 51821},
|
||||
Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.2").To4(), 51821),
|
||||
NoInternalIP: false,
|
||||
InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.2"), Mask: net.CIDRMask(32, 32)},
|
||||
Key: []byte("foo"),
|
||||
InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.2").To4(), Mask: net.CIDRMask(32, 32)},
|
||||
Key: fooKey,
|
||||
LastSeen: 1000000000,
|
||||
Leader: true,
|
||||
Location: "b",
|
||||
PersistentKeepalive: 25,
|
||||
PersistentKeepalive: 25 * time.Second,
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0").To4(), Mask: net.CIDRMask(24, 32)},
|
||||
WireGuardIP: &net.IPNet{IP: net.ParseIP("10.4.0.1").To4(), Mask: net.CIDRMask(16, 32)},
|
||||
},
|
||||
subnet: "10.2.1.0/24",
|
||||
},
|
||||
{
|
||||
name: "complete with ipv6",
|
||||
annotations: map[string]string{
|
||||
endpointAnnotationKey: "10.0.0.1:51820",
|
||||
forceEndpointAnnotationKey: "[1100::10]:51821",
|
||||
forceInternalIPAnnotationKey: "10.1.0.2/32",
|
||||
internalIPAnnotationKey: "10.1.0.1/32",
|
||||
keyAnnotationKey: fooKey.String(),
|
||||
lastSeenAnnotationKey: "1000000000",
|
||||
leaderAnnotationKey: "",
|
||||
locationAnnotationKey: "b",
|
||||
persistentKeepaliveKey: "25",
|
||||
wireGuardIPAnnotationKey: "10.4.0.1/16",
|
||||
},
|
||||
labels: map[string]string{
|
||||
RegionLabelKey: "a",
|
||||
},
|
||||
out: &mesh.Node{
|
||||
Endpoint: wireguard.NewEndpoint(net.ParseIP("1100::10"), 51821),
|
||||
NoInternalIP: false,
|
||||
InternalIP: &net.IPNet{IP: net.ParseIP("10.1.0.2"), Mask: net.CIDRMask(32, 32)},
|
||||
Key: fooKey,
|
||||
LastSeen: 1000000000,
|
||||
Leader: true,
|
||||
Location: "b",
|
||||
PersistentKeepalive: 25 * time.Second,
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)},
|
||||
WireGuardIP: &net.IPNet{IP: net.ParseIP("10.4.0.1"), Mask: net.CIDRMask(16, 32)},
|
||||
},
|
||||
@ -195,7 +263,7 @@ func TestTranslateNode(t *testing.T) {
|
||||
annotations: map[string]string{
|
||||
endpointAnnotationKey: "10.0.0.1:51820",
|
||||
internalIPAnnotationKey: "",
|
||||
keyAnnotationKey: "foo",
|
||||
keyAnnotationKey: fooKey.String(),
|
||||
lastSeenAnnotationKey: "1000000000",
|
||||
locationAnnotationKey: "b",
|
||||
persistentKeepaliveKey: "25",
|
||||
@ -205,13 +273,13 @@ func TestTranslateNode(t *testing.T) {
|
||||
RegionLabelKey: "a",
|
||||
},
|
||||
out: &mesh.Node{
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("10.0.0.1")}, Port: 51820},
|
||||
Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.1"), 51820),
|
||||
InternalIP: nil,
|
||||
Key: []byte("foo"),
|
||||
Key: fooKey,
|
||||
LastSeen: 1000000000,
|
||||
Leader: false,
|
||||
Location: "b",
|
||||
PersistentKeepalive: 25,
|
||||
PersistentKeepalive: 25 * time.Second,
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)},
|
||||
WireGuardIP: &net.IPNet{IP: net.ParseIP("10.4.0.1"), Mask: net.CIDRMask(16, 32)},
|
||||
},
|
||||
@ -223,7 +291,7 @@ func TestTranslateNode(t *testing.T) {
|
||||
endpointAnnotationKey: "10.0.0.1:51820",
|
||||
internalIPAnnotationKey: "10.1.0.1/32",
|
||||
forceInternalIPAnnotationKey: "",
|
||||
keyAnnotationKey: "foo",
|
||||
keyAnnotationKey: fooKey.String(),
|
||||
lastSeenAnnotationKey: "1000000000",
|
||||
locationAnnotationKey: "b",
|
||||
persistentKeepaliveKey: "25",
|
||||
@ -233,14 +301,14 @@ func TestTranslateNode(t *testing.T) {
|
||||
RegionLabelKey: "a",
|
||||
},
|
||||
out: &mesh.Node{
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("10.0.0.1")}, Port: 51820},
|
||||
Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.1"), 51820),
|
||||
NoInternalIP: true,
|
||||
InternalIP: nil,
|
||||
Key: []byte("foo"),
|
||||
Key: fooKey,
|
||||
LastSeen: 1000000000,
|
||||
Leader: false,
|
||||
Location: "b",
|
||||
PersistentKeepalive: 25,
|
||||
PersistentKeepalive: 25 * time.Second,
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)},
|
||||
WireGuardIP: &net.IPNet{IP: net.ParseIP("10.4.0.1"), Mask: net.CIDRMask(16, 32)},
|
||||
},
|
||||
@ -266,7 +334,13 @@ func TestTranslatePeer(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
out: &mesh.Peer{},
|
||||
out: &mesh.Peer{
|
||||
Peer: wireguard.Peer{
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PersistentKeepaliveInterval: &zero,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid ips",
|
||||
@ -276,7 +350,13 @@ func TestTranslatePeer(t *testing.T) {
|
||||
"foo",
|
||||
},
|
||||
},
|
||||
out: &mesh.Peer{},
|
||||
out: &mesh.Peer{
|
||||
Peer: wireguard.Peer{
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PersistentKeepaliveInterval: &zero,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid ips",
|
||||
@ -288,9 +368,12 @@ func TestTranslatePeer(t *testing.T) {
|
||||
},
|
||||
out: &mesh.Peer{
|
||||
Peer: wireguard.Peer{
|
||||
AllowedIPs: []*net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
|
||||
{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(32, 32)},
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
AllowedIPs: []net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
|
||||
{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(32, 32)},
|
||||
},
|
||||
PersistentKeepaliveInterval: &zero,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -305,7 +388,13 @@ func TestTranslatePeer(t *testing.T) {
|
||||
Port: mesh.DefaultKiloPort,
|
||||
},
|
||||
},
|
||||
out: &mesh.Peer{},
|
||||
out: &mesh.Peer{
|
||||
Peer: wireguard.Peer{
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PersistentKeepaliveInterval: &zero,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only endpoint port",
|
||||
@ -314,7 +403,13 @@ func TestTranslatePeer(t *testing.T) {
|
||||
Port: mesh.DefaultKiloPort,
|
||||
},
|
||||
},
|
||||
out: &mesh.Peer{},
|
||||
out: &mesh.Peer{
|
||||
Peer: wireguard.Peer{
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PersistentKeepaliveInterval: &zero,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid endpoint ip",
|
||||
@ -328,10 +423,29 @@ func TestTranslatePeer(t *testing.T) {
|
||||
},
|
||||
out: &mesh.Peer{
|
||||
Peer: wireguard.Peer{
|
||||
Endpoint: &wireguard.Endpoint{
|
||||
DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("10.0.0.1")},
|
||||
Port: mesh.DefaultKiloPort,
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PersistentKeepaliveInterval: &zero,
|
||||
},
|
||||
Endpoint: wireguard.NewEndpoint(net.ParseIP("10.0.0.1").To4(), mesh.DefaultKiloPort),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid endpoint ipv6",
|
||||
spec: v1alpha1.PeerSpec{
|
||||
Endpoint: &v1alpha1.PeerEndpoint{
|
||||
DNSOrIP: v1alpha1.DNSOrIP{
|
||||
IP: "ff60::2",
|
||||
},
|
||||
Port: mesh.DefaultKiloPort,
|
||||
},
|
||||
},
|
||||
out: &mesh.Peer{
|
||||
Peer: wireguard.Peer{
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PersistentKeepaliveInterval: &zero,
|
||||
},
|
||||
Endpoint: wireguard.NewEndpoint(net.ParseIP("ff60::2").To16(), mesh.DefaultKiloPort),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -347,9 +461,9 @@ func TestTranslatePeer(t *testing.T) {
|
||||
},
|
||||
out: &mesh.Peer{
|
||||
Peer: wireguard.Peer{
|
||||
Endpoint: &wireguard.Endpoint{
|
||||
DNSOrIP: wireguard.DNSOrIP{DNS: "example.com"},
|
||||
Port: mesh.DefaultKiloPort,
|
||||
Endpoint: wireguard.ParseEndpoint("example.com:51820"),
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PersistentKeepaliveInterval: &zero,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -359,16 +473,25 @@ func TestTranslatePeer(t *testing.T) {
|
||||
spec: v1alpha1.PeerSpec{
|
||||
PublicKey: "",
|
||||
},
|
||||
out: &mesh.Peer{},
|
||||
out: &mesh.Peer{
|
||||
Peer: wireguard.Peer{
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PersistentKeepaliveInterval: &zero,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid key",
|
||||
spec: v1alpha1.PeerSpec{
|
||||
PublicKey: "foo",
|
||||
PublicKey: fooKey.String(),
|
||||
},
|
||||
out: &mesh.Peer{
|
||||
Peer: wireguard.Peer{
|
||||
PublicKey: []byte("foo"),
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PublicKey: fooKey,
|
||||
PersistentKeepaliveInterval: &zero,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -377,7 +500,13 @@ func TestTranslatePeer(t *testing.T) {
|
||||
spec: v1alpha1.PeerSpec{
|
||||
PersistentKeepalive: -1,
|
||||
},
|
||||
out: &mesh.Peer{},
|
||||
out: &mesh.Peer{
|
||||
Peer: wireguard.Peer{
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PersistentKeepaliveInterval: &zero,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid keepalive",
|
||||
@ -386,18 +515,23 @@ func TestTranslatePeer(t *testing.T) {
|
||||
},
|
||||
out: &mesh.Peer{
|
||||
Peer: wireguard.Peer{
|
||||
PersistentKeepalive: 1,
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PersistentKeepaliveInterval: &second,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid preshared key",
|
||||
spec: v1alpha1.PeerSpec{
|
||||
PresharedKey: "psk",
|
||||
PresharedKey: pskKey.String(),
|
||||
},
|
||||
out: &mesh.Peer{
|
||||
Peer: wireguard.Peer{
|
||||
PresharedKey: []byte("psk"),
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PersistentKeepaliveInterval: &zero,
|
||||
PresharedKey: pskKey,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -410,52 +544,3 @@ func TestTranslatePeer(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEndpoint(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
endpoint string
|
||||
out *wireguard.Endpoint
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
endpoint: "",
|
||||
out: nil,
|
||||
},
|
||||
{
|
||||
name: "invalid IP",
|
||||
endpoint: "10.0.0.:51820",
|
||||
out: nil,
|
||||
},
|
||||
{
|
||||
name: "invalid hostname",
|
||||
endpoint: "foo-:51820",
|
||||
out: nil,
|
||||
},
|
||||
{
|
||||
name: "invalid port",
|
||||
endpoint: "10.0.0.1:100000000",
|
||||
out: nil,
|
||||
},
|
||||
{
|
||||
name: "valid IP",
|
||||
endpoint: "10.0.0.1:51820",
|
||||
out: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("10.0.0.1")}, Port: mesh.DefaultKiloPort},
|
||||
},
|
||||
{
|
||||
name: "valid IPv6",
|
||||
endpoint: "[ff02::114]:51820",
|
||||
out: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("ff02::114")}, Port: mesh.DefaultKiloPort},
|
||||
},
|
||||
{
|
||||
name: "valid hostname",
|
||||
endpoint: "foo:51821",
|
||||
out: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{DNS: "foo"}, Port: 51821},
|
||||
},
|
||||
} {
|
||||
endpoint := parseEndpoint(tc.endpoint)
|
||||
if diff := pretty.Compare(endpoint, tc.out); diff != "" {
|
||||
t.Errorf("test case %q: got diff: %v", tc.name, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -18,6 +18,7 @@ package versioned
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
kilov1alpha1 "github.com/squat/kilo/pkg/k8s/clientset/versioned/typed/kilo/v1alpha1"
|
||||
discovery "k8s.io/client-go/discovery"
|
||||
@ -53,22 +54,45 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface {
|
||||
// NewForConfig creates a new Clientset for the given config.
|
||||
// If config's RateLimiter is not set and QPS and Burst are acceptable,
|
||||
// NewForConfig will generate a rate-limiter in configShallowCopy.
|
||||
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
|
||||
// where httpClient was generated with rest.HTTPClientFor(c).
|
||||
func NewForConfig(c *rest.Config) (*Clientset, error) {
|
||||
configShallowCopy := *c
|
||||
|
||||
if configShallowCopy.UserAgent == "" {
|
||||
configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
}
|
||||
|
||||
// share the transport between all clients
|
||||
httpClient, err := rest.HTTPClientFor(&configShallowCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewForConfigAndClient(&configShallowCopy, httpClient)
|
||||
}
|
||||
|
||||
// NewForConfigAndClient creates a new Clientset for the given config and http client.
|
||||
// Note the http client provided takes precedence over the configured transport values.
|
||||
// If config's RateLimiter is not set and QPS and Burst are acceptable,
|
||||
// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
|
||||
func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
|
||||
configShallowCopy := *c
|
||||
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
|
||||
if configShallowCopy.Burst <= 0 {
|
||||
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
|
||||
}
|
||||
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
|
||||
}
|
||||
|
||||
var cs Clientset
|
||||
var err error
|
||||
cs.kiloV1alpha1, err = kilov1alpha1.NewForConfig(&configShallowCopy)
|
||||
cs.kiloV1alpha1, err = kilov1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
|
||||
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -78,11 +102,11 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
|
||||
// NewForConfigOrDie creates a new Clientset for the given config and
|
||||
// panics if there is an error in the config.
|
||||
func NewForConfigOrDie(c *rest.Config) *Clientset {
|
||||
var cs Clientset
|
||||
cs.kiloV1alpha1 = kilov1alpha1.NewForConfigOrDie(c)
|
||||
|
||||
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
|
||||
return &cs
|
||||
cs, err := NewForConfig(c)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return cs
|
||||
}
|
||||
|
||||
// New creates a new Clientset for the given RESTClient.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -72,7 +72,10 @@ func (c *Clientset) Tracker() testing.ObjectTracker {
|
||||
return c.tracker
|
||||
}
|
||||
|
||||
var _ clientset.Interface = &Clientset{}
|
||||
var (
|
||||
_ clientset.Interface = &Clientset{}
|
||||
_ testing.FakeClient = &Clientset{}
|
||||
)
|
||||
|
||||
// KiloV1alpha1 retrieves the KiloV1alpha1Client
|
||||
func (c *Clientset) KiloV1alpha1() kilov1alpha1.KiloV1alpha1Interface {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -97,7 +97,7 @@ func (c *FakePeers) Update(ctx context.Context, peer *v1alpha1.Peer, opts v1.Upd
|
||||
// Delete takes name of the peer and deletes it. Returns an error if one occurs.
|
||||
func (c *FakePeers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewRootDeleteAction(peersResource, name), &v1alpha1.Peer{})
|
||||
Invokes(testing.NewRootDeleteActionWithOptions(peersResource, name, opts), &v1alpha1.Peer{})
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -17,6 +17,8 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
v1alpha1 "github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1"
|
||||
"github.com/squat/kilo/pkg/k8s/clientset/versioned/scheme"
|
||||
rest "k8s.io/client-go/rest"
|
||||
@ -37,12 +39,28 @@ func (c *KiloV1alpha1Client) Peers() PeerInterface {
|
||||
}
|
||||
|
||||
// NewForConfig creates a new KiloV1alpha1Client for the given config.
|
||||
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
|
||||
// where httpClient was generated with rest.HTTPClientFor(c).
|
||||
func NewForConfig(c *rest.Config) (*KiloV1alpha1Client, error) {
|
||||
config := *c
|
||||
if err := setConfigDefaults(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := rest.RESTClientFor(&config)
|
||||
httpClient, err := rest.HTTPClientFor(&config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewForConfigAndClient(&config, httpClient)
|
||||
}
|
||||
|
||||
// NewForConfigAndClient creates a new KiloV1alpha1Client for the given config and http client.
|
||||
// Note the http client provided takes precedence over the configured transport values.
|
||||
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*KiloV1alpha1Client, error) {
|
||||
config := *c
|
||||
if err := setConfigDefaults(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := rest.RESTClientForConfigAndClient(&config, h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2021 the Kilo authors
|
||||
// Copyright 2022 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -15,9 +15,12 @@
|
||||
package mesh
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
|
||||
"github.com/squat/kilo/pkg/wireguard"
|
||||
)
|
||||
|
||||
@ -55,7 +58,7 @@ const (
|
||||
// Node represents a node in the network.
|
||||
type Node struct {
|
||||
Endpoint *wireguard.Endpoint
|
||||
Key []byte
|
||||
Key wgtypes.Key
|
||||
NoInternalIP bool
|
||||
InternalIP *net.IPNet
|
||||
// LastSeen is a Unix time for the last time
|
||||
@ -66,18 +69,23 @@ type Node struct {
|
||||
Leader bool
|
||||
Location string
|
||||
Name string
|
||||
PersistentKeepalive int
|
||||
PersistentKeepalive time.Duration
|
||||
Subnet *net.IPNet
|
||||
WireGuardIP *net.IPNet
|
||||
DiscoveredEndpoints map[string]*wireguard.Endpoint
|
||||
AllowedLocationIPs []*net.IPNet
|
||||
// DiscoveredEndpoints cannot be DNS endpoints, only net.UDPAddr.
|
||||
DiscoveredEndpoints map[string]*net.UDPAddr
|
||||
AllowedLocationIPs []net.IPNet
|
||||
Granularity Granularity
|
||||
}
|
||||
|
||||
// Ready indicates whether or not the node is ready.
|
||||
func (n *Node) Ready() bool {
|
||||
// Nodes that are not leaders will not have WireGuardIPs, so it is not required.
|
||||
return n != nil && n.Endpoint != nil && !(n.Endpoint.IP == nil && n.Endpoint.DNS == "") && n.Endpoint.Port != 0 && n.Key != nil && n.Subnet != nil && time.Now().Unix()-n.LastSeen < int64(checkInPeriod)*2/int64(time.Second)
|
||||
return n != nil &&
|
||||
n.Endpoint.Ready() &&
|
||||
n.Key != wgtypes.Key{} &&
|
||||
n.Subnet != nil &&
|
||||
time.Now().Unix()-n.LastSeen < int64(checkInPeriod)*2/int64(time.Second)
|
||||
}
|
||||
|
||||
// Peer represents a peer in the network.
|
||||
@ -92,7 +100,10 @@ type Peer struct {
|
||||
// will not declare their endpoint and instead allow it to be
|
||||
// discovered.
|
||||
func (p *Peer) Ready() bool {
|
||||
return p != nil && p.AllowedIPs != nil && len(p.AllowedIPs) != 0 && p.PublicKey != nil
|
||||
return p != nil &&
|
||||
p.AllowedIPs != nil &&
|
||||
len(p.AllowedIPs) != 0 &&
|
||||
p.PublicKey != wgtypes.Key{} // If Key was not set, it will be wgtypes.Key{}.
|
||||
}
|
||||
|
||||
// EventType describes what kind of an action an event represents.
|
||||
@ -136,11 +147,11 @@ type Backend interface {
|
||||
// clean up any changes applied to the backend,
|
||||
// and watch for changes to nodes.
|
||||
type NodeBackend interface {
|
||||
CleanUp(string) error
|
||||
CleanUp(context.Context, string) error
|
||||
Get(string) (*Node, error)
|
||||
Init(<-chan struct{}) error
|
||||
Init(context.Context) error
|
||||
List() ([]*Node, error)
|
||||
Set(string, *Node) error
|
||||
Set(context.Context, string, *Node) error
|
||||
Watch() <-chan *NodeEvent
|
||||
}
|
||||
|
||||
@ -150,10 +161,10 @@ type NodeBackend interface {
|
||||
// clean up any changes applied to the backend,
|
||||
// and watch for changes to peers.
|
||||
type PeerBackend interface {
|
||||
CleanUp(string) error
|
||||
CleanUp(context.Context, string) error
|
||||
Get(string) (*Peer, error)
|
||||
Init(<-chan struct{}) error
|
||||
Init(context.Context) error
|
||||
List() ([]*Peer, error)
|
||||
Set(string, *Peer) error
|
||||
Set(context.Context, string, *Peer) error
|
||||
Watch() <-chan *PeerEvent
|
||||
}
|
||||
|
@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package mesh
|
||||
|
@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package mesh
|
||||
@ -59,6 +60,7 @@ func getIP(hostname string, ignoreIfaces ...int) (*net.IPNet, *net.IPNet, error)
|
||||
ignore[oneAddressCIDR(ip.IP).String()] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
var hostPriv, hostPub []*net.IPNet
|
||||
{
|
||||
// Check IPs to which hostname resolves first.
|
||||
@ -71,6 +73,9 @@ func getIP(hostname string, ignoreIfaces ...int) (*net.IPNet, *net.IPNet, error)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if isLocal(ip.IP) {
|
||||
continue
|
||||
}
|
||||
ip.Mask = mask
|
||||
if isPublic(ip.IP) {
|
||||
hostPub = append(hostPub, ip)
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2019 the Kilo authors
|
||||
// Copyright 2021 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -20,6 +20,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/awalterschulze/gographviz"
|
||||
|
||||
"github.com/squat/kilo/pkg/wireguard"
|
||||
)
|
||||
|
||||
@ -166,8 +167,9 @@ func nodeLabel(location, name string, cidr *net.IPNet, priv, wgIP net.IP, endpoi
|
||||
if wgIP != nil {
|
||||
label = append(label, wgIP.String())
|
||||
}
|
||||
if endpoint != nil {
|
||||
label = append(label, endpoint.String())
|
||||
str := endpoint.String()
|
||||
if str != "" {
|
||||
label = append(label, str)
|
||||
}
|
||||
return graphEscape(strings.Join(label, "\\n"))
|
||||
}
|
||||
|
350
pkg/mesh/mesh.go
350
pkg/mesh/mesh.go
@ -1,4 +1,4 @@
|
||||
// Copyright 2019 the Kilo authors
|
||||
// Copyright 2021 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -12,12 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package mesh
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
@ -29,6 +31,8 @@ import (
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/vishvananda/netlink"
|
||||
"golang.zx2c4.com/wireguard/wgctrl"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
|
||||
"github.com/squat/kilo/pkg/encapsulation"
|
||||
"github.com/squat/kilo/pkg/iproute"
|
||||
@ -42,34 +46,32 @@ const (
|
||||
kiloPath = "/var/lib/kilo"
|
||||
// privateKeyPath is the filepath where the WireGuard private key is stored.
|
||||
privateKeyPath = kiloPath + "/key"
|
||||
// confPath is the filepath where the WireGuard configuration is stored.
|
||||
confPath = kiloPath + "/conf"
|
||||
)
|
||||
|
||||
// Mesh is able to create Kilo network meshes.
|
||||
type Mesh struct {
|
||||
Backend
|
||||
cleanUpIface bool
|
||||
cni bool
|
||||
cniPath string
|
||||
enc encapsulation.Encapsulator
|
||||
externalIP *net.IPNet
|
||||
granularity Granularity
|
||||
hostname string
|
||||
internalIP *net.IPNet
|
||||
ipTables *iptables.Controller
|
||||
kiloIface int
|
||||
key []byte
|
||||
local bool
|
||||
port uint32
|
||||
priv []byte
|
||||
privIface int
|
||||
pub []byte
|
||||
resyncPeriod time.Duration
|
||||
stop chan struct{}
|
||||
subnet *net.IPNet
|
||||
table *route.Table
|
||||
wireGuardIP *net.IPNet
|
||||
cleanUpIface bool
|
||||
cni bool
|
||||
cniPath string
|
||||
enc encapsulation.Encapsulator
|
||||
externalIP *net.IPNet
|
||||
granularity Granularity
|
||||
hostname string
|
||||
internalIP *net.IPNet
|
||||
ipTables *iptables.Controller
|
||||
kiloIface int
|
||||
kiloIfaceName string
|
||||
local bool
|
||||
port int
|
||||
priv wgtypes.Key
|
||||
privIface int
|
||||
pub wgtypes.Key
|
||||
resyncPeriod time.Duration
|
||||
iptablesForwardRule bool
|
||||
subnet *net.IPNet
|
||||
table *route.Table
|
||||
wireGuardIP *net.IPNet
|
||||
|
||||
// nodes and peers are mutable fields in the struct
|
||||
// and need to be guarded.
|
||||
@ -86,23 +88,27 @@ type Mesh struct {
|
||||
}
|
||||
|
||||
// New returns a new Mesh instance.
|
||||
func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularity, hostname string, port uint32, subnet *net.IPNet, local, cni bool, cniPath, iface string, cleanUpIface bool, createIface bool, mtu uint, resyncPeriod time.Duration, logger log.Logger) (*Mesh, error) {
|
||||
func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularity, hostname string, port int, subnet *net.IPNet, local, cni bool, cniPath, iface string, cleanUpIface bool, createIface bool, mtu uint, resyncPeriod time.Duration, prioritisePrivateAddr, iptablesForwardRule bool, logger log.Logger, registerer prometheus.Registerer) (*Mesh, error) {
|
||||
if err := os.MkdirAll(kiloPath, 0700); err != nil {
|
||||
return nil, fmt.Errorf("failed to create directory to store configuration: %v", err)
|
||||
}
|
||||
private, err := ioutil.ReadFile(privateKeyPath)
|
||||
private = bytes.Trim(private, "\n")
|
||||
privateB, err := ioutil.ReadFile(privateKeyPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("failed to read private key file: %v", err)
|
||||
}
|
||||
privateB = bytes.Trim(privateB, "\n")
|
||||
private, err := wgtypes.ParseKey(string(privateB))
|
||||
if err != nil {
|
||||
level.Warn(logger).Log("msg", "no private key found on disk; generating one now")
|
||||
if private, err = wireguard.GenKey(); err != nil {
|
||||
if private, err = wgtypes.GeneratePrivateKey(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
public, err := wireguard.PubKey(private)
|
||||
public := private.PublicKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ioutil.WriteFile(privateKeyPath, private, 0600); err != nil {
|
||||
if err := ioutil.WriteFile(privateKeyPath, []byte(private.String()), 0600); err != nil {
|
||||
return nil, fmt.Errorf("failed to write private key to disk: %v", err)
|
||||
}
|
||||
cniIndex, err := cniDeviceIndex()
|
||||
@ -143,34 +149,41 @@ func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularit
|
||||
enc = encapsulation.Noop(enc.Strategy())
|
||||
level.Debug(logger).Log("msg", "running without a private IP address")
|
||||
}
|
||||
var externalIP *net.IPNet
|
||||
if prioritisePrivateAddr && privateIP != nil {
|
||||
externalIP = privateIP
|
||||
} else {
|
||||
externalIP = publicIP
|
||||
}
|
||||
level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the public IP address", publicIP.String()))
|
||||
ipTables, err := iptables.New(iptables.WithLogger(log.With(logger, "component", "iptables")), iptables.WithResyncPeriod(resyncPeriod))
|
||||
ipTables, err := iptables.New(iptables.WithRegisterer(registerer), iptables.WithLogger(log.With(logger, "component", "iptables")), iptables.WithResyncPeriod(resyncPeriod))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to IP tables controller: %v", err)
|
||||
}
|
||||
return &Mesh{
|
||||
Backend: backend,
|
||||
cleanUpIface: cleanUpIface,
|
||||
cni: cni,
|
||||
cniPath: cniPath,
|
||||
enc: enc,
|
||||
externalIP: publicIP,
|
||||
granularity: granularity,
|
||||
hostname: hostname,
|
||||
internalIP: privateIP,
|
||||
ipTables: ipTables,
|
||||
kiloIface: kiloIface,
|
||||
nodes: make(map[string]*Node),
|
||||
peers: make(map[string]*Peer),
|
||||
port: port,
|
||||
priv: private,
|
||||
privIface: privIface,
|
||||
pub: public,
|
||||
resyncPeriod: resyncPeriod,
|
||||
local: local,
|
||||
stop: make(chan struct{}),
|
||||
subnet: subnet,
|
||||
table: route.NewTable(),
|
||||
mesh := Mesh{
|
||||
Backend: backend,
|
||||
cleanUpIface: cleanUpIface,
|
||||
cni: cni,
|
||||
cniPath: cniPath,
|
||||
enc: enc,
|
||||
externalIP: externalIP,
|
||||
granularity: granularity,
|
||||
hostname: hostname,
|
||||
internalIP: privateIP,
|
||||
ipTables: ipTables,
|
||||
kiloIface: kiloIface,
|
||||
kiloIfaceName: iface,
|
||||
nodes: make(map[string]*Node),
|
||||
peers: make(map[string]*Peer),
|
||||
port: port,
|
||||
priv: private,
|
||||
privIface: privIface,
|
||||
pub: public,
|
||||
resyncPeriod: resyncPeriod,
|
||||
iptablesForwardRule: iptablesForwardRule,
|
||||
local: local,
|
||||
subnet: subnet,
|
||||
table: route.NewTable(),
|
||||
errorCounter: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "kilo_errors_total",
|
||||
Help: "Number of errors that occurred while administering the mesh.",
|
||||
@ -192,12 +205,20 @@ func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularit
|
||||
Help: "Number of reconciliation attempts.",
|
||||
}),
|
||||
logger: logger,
|
||||
}, nil
|
||||
}
|
||||
registerer.MustRegister(
|
||||
mesh.errorCounter,
|
||||
mesh.leaderGuage,
|
||||
mesh.nodesGuage,
|
||||
mesh.peersGuage,
|
||||
mesh.reconcileCounter,
|
||||
)
|
||||
return &mesh, nil
|
||||
}
|
||||
|
||||
// Run starts the mesh.
|
||||
func (m *Mesh) Run() error {
|
||||
if err := m.Nodes().Init(m.stop); err != nil {
|
||||
func (m *Mesh) Run(ctx context.Context) error {
|
||||
if err := m.Nodes().Init(ctx); err != nil {
|
||||
return fmt.Errorf("failed to initialize node backend: %v", err)
|
||||
}
|
||||
// Try to set the CNI config quickly.
|
||||
@ -209,14 +230,14 @@ func (m *Mesh) Run() error {
|
||||
level.Warn(m.logger).Log("error", fmt.Errorf("failed to get node %q: %v", m.hostname, err))
|
||||
}
|
||||
}
|
||||
if err := m.Peers().Init(m.stop); err != nil {
|
||||
if err := m.Peers().Init(ctx); err != nil {
|
||||
return fmt.Errorf("failed to initialize peer backend: %v", err)
|
||||
}
|
||||
ipTablesErrors, err := m.ipTables.Run(m.stop)
|
||||
ipTablesErrors, err := m.ipTables.Run(ctx.Done())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to watch for IP tables updates: %v", err)
|
||||
}
|
||||
routeErrors, err := m.table.Run(m.stop)
|
||||
routeErrors, err := m.table.Run(ctx.Done())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to watch for route table updates: %v", err)
|
||||
}
|
||||
@ -226,7 +247,7 @@ func (m *Mesh) Run() error {
|
||||
select {
|
||||
case err = <-ipTablesErrors:
|
||||
case err = <-routeErrors:
|
||||
case <-m.stop:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
@ -245,11 +266,11 @@ func (m *Mesh) Run() error {
|
||||
for {
|
||||
select {
|
||||
case ne = <-nw:
|
||||
m.syncNodes(ne)
|
||||
m.syncNodes(ctx, ne)
|
||||
case pe = <-pw:
|
||||
m.syncPeers(pe)
|
||||
case <-checkIn.C:
|
||||
m.checkIn()
|
||||
m.checkIn(ctx)
|
||||
checkIn.Reset(checkInPeriod)
|
||||
case <-resync.C:
|
||||
if m.cni {
|
||||
@ -257,18 +278,18 @@ func (m *Mesh) Run() error {
|
||||
}
|
||||
m.applyTopology()
|
||||
resync.Reset(m.resyncPeriod)
|
||||
case <-m.stop:
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Mesh) syncNodes(e *NodeEvent) {
|
||||
func (m *Mesh) syncNodes(ctx context.Context, e *NodeEvent) {
|
||||
logger := log.With(m.logger, "event", e.Type)
|
||||
level.Debug(logger).Log("msg", "syncing nodes", "event", e.Type)
|
||||
if isSelf(m.hostname, e.Node) {
|
||||
level.Debug(logger).Log("msg", "processing local node", "node", e.Node)
|
||||
m.handleLocal(e.Node)
|
||||
m.handleLocal(ctx, e.Node)
|
||||
return
|
||||
}
|
||||
var diff bool
|
||||
@ -305,7 +326,7 @@ func (m *Mesh) syncPeers(e *PeerEvent) {
|
||||
var diff bool
|
||||
m.mu.Lock()
|
||||
// Peers are indexed by public key.
|
||||
key := string(e.Peer.PublicKey)
|
||||
key := e.Peer.PublicKey.String()
|
||||
if !e.Peer.Ready() {
|
||||
// Trace non ready peer with their presence in the mesh.
|
||||
_, ok := m.peers[key]
|
||||
@ -315,8 +336,8 @@ func (m *Mesh) syncPeers(e *PeerEvent) {
|
||||
case AddEvent:
|
||||
fallthrough
|
||||
case UpdateEvent:
|
||||
if e.Old != nil && key != string(e.Old.PublicKey) {
|
||||
delete(m.peers, string(e.Old.PublicKey))
|
||||
if e.Old != nil && key != e.Old.PublicKey.String() {
|
||||
delete(m.peers, e.Old.PublicKey.String())
|
||||
diff = true
|
||||
}
|
||||
if !peersAreEqual(m.peers[key], e.Peer) {
|
||||
@ -336,7 +357,7 @@ func (m *Mesh) syncPeers(e *PeerEvent) {
|
||||
|
||||
// checkIn will try to update the local node's LastSeen timestamp
|
||||
// in the backend.
|
||||
func (m *Mesh) checkIn() {
|
||||
func (m *Mesh) checkIn(ctx context.Context) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
n := m.nodes[m.hostname]
|
||||
@ -346,7 +367,7 @@ func (m *Mesh) checkIn() {
|
||||
}
|
||||
oldTime := n.LastSeen
|
||||
n.LastSeen = time.Now().Unix()
|
||||
if err := m.Nodes().Set(m.hostname, n); err != nil {
|
||||
if err := m.Nodes().Set(ctx, m.hostname, n); err != nil {
|
||||
level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", n)
|
||||
m.errorCounter.WithLabelValues("checkin").Inc()
|
||||
// Revert time.
|
||||
@ -356,10 +377,12 @@ func (m *Mesh) checkIn() {
|
||||
level.Debug(m.logger).Log("msg", "successfully checked in local node in backend")
|
||||
}
|
||||
|
||||
func (m *Mesh) handleLocal(n *Node) {
|
||||
func (m *Mesh) handleLocal(ctx context.Context, n *Node) {
|
||||
// Allow the IPs to be overridden.
|
||||
if n.Endpoint == nil || (n.Endpoint.DNS == "" && n.Endpoint.IP == nil) {
|
||||
n.Endpoint = &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: m.externalIP.IP}, Port: m.port}
|
||||
if !n.Endpoint.Ready() {
|
||||
e := wireguard.NewEndpoint(m.externalIP.IP, m.port)
|
||||
level.Info(m.logger).Log("msg", "overriding endpoint", "node", m.hostname, "old endpoint", n.Endpoint.String(), "new endpoint", e.String())
|
||||
n.Endpoint = e
|
||||
}
|
||||
if n.InternalIP == nil && !n.NoInternalIP {
|
||||
n.InternalIP = m.internalIP
|
||||
@ -385,7 +408,7 @@ func (m *Mesh) handleLocal(n *Node) {
|
||||
}
|
||||
if !nodesAreEqual(n, local) {
|
||||
level.Debug(m.logger).Log("msg", "local node differs from backend")
|
||||
if err := m.Nodes().Set(m.hostname, local); err != nil {
|
||||
if err := m.Nodes().Set(ctx, m.hostname, local); err != nil {
|
||||
level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", local)
|
||||
m.errorCounter.WithLabelValues("local").Inc()
|
||||
return
|
||||
@ -453,22 +476,26 @@ func (m *Mesh) applyTopology() {
|
||||
m.errorCounter.WithLabelValues("apply").Inc()
|
||||
return
|
||||
}
|
||||
// Find the old configuration.
|
||||
oldConfDump, err := wireguard.ShowDump(link.Attrs().Name)
|
||||
|
||||
wgClient, err := wgctrl.New()
|
||||
if err != nil {
|
||||
level.Error(m.logger).Log("error", err)
|
||||
m.errorCounter.WithLabelValues("apply").Inc()
|
||||
return
|
||||
}
|
||||
oldConf, err := wireguard.ParseDump(oldConfDump)
|
||||
defer wgClient.Close()
|
||||
|
||||
// wgDevice is the current configuration of the wg interface.
|
||||
wgDevice, err := wgClient.Device(m.kiloIfaceName)
|
||||
if err != nil {
|
||||
level.Error(m.logger).Log("error", err)
|
||||
m.errorCounter.WithLabelValues("apply").Inc()
|
||||
return
|
||||
}
|
||||
natEndpoints := discoverNATEndpoints(nodes, peers, oldConf, m.logger)
|
||||
|
||||
natEndpoints := discoverNATEndpoints(nodes, peers, wgDevice, m.logger)
|
||||
nodes[m.hostname].DiscoveredEndpoints = natEndpoints
|
||||
t, err := NewTopology(nodes, peers, m.granularity, m.hostname, nodes[m.hostname].Endpoint.Port, m.priv, m.subnet, nodes[m.hostname].PersistentKeepalive, m.logger)
|
||||
t, err := NewTopology(nodes, peers, m.granularity, m.hostname, nodes[m.hostname].Endpoint.Port(), m.priv, m.subnet, nodes[m.hostname].PersistentKeepalive, m.logger)
|
||||
if err != nil {
|
||||
level.Error(m.logger).Log("error", err)
|
||||
m.errorCounter.WithLabelValues("apply").Inc()
|
||||
@ -480,19 +507,8 @@ func (m *Mesh) applyTopology() {
|
||||
} else {
|
||||
m.wireGuardIP = nil
|
||||
}
|
||||
conf := t.Conf()
|
||||
buf, err := conf.Bytes()
|
||||
if err != nil {
|
||||
level.Error(m.logger).Log("error", err)
|
||||
m.errorCounter.WithLabelValues("apply").Inc()
|
||||
return
|
||||
}
|
||||
if err := ioutil.WriteFile(confPath, buf, 0600); err != nil {
|
||||
level.Error(m.logger).Log("error", err)
|
||||
m.errorCounter.WithLabelValues("apply").Inc()
|
||||
return
|
||||
}
|
||||
ipRules := t.Rules(m.cni)
|
||||
ipRules := t.Rules(m.cni, m.iptablesForwardRule)
|
||||
|
||||
// If we are handling local routes, ensure the local
|
||||
// tunnel has an IP address and IPIP traffic is allowed.
|
||||
if m.enc.Strategy() != encapsulation.Never && m.local {
|
||||
@ -508,7 +524,9 @@ func (m *Mesh) applyTopology() {
|
||||
break
|
||||
}
|
||||
}
|
||||
ipRules = append(ipRules, m.enc.Rules(cidrs)...)
|
||||
|
||||
ipRules = append(m.enc.Rules(cidrs), ipRules...)
|
||||
|
||||
// If we are handling local routes, ensure the local
|
||||
// tunnel has an IP address.
|
||||
if err := m.enc.Set(oneAddressCIDR(newAllocator(*nodes[m.hostname].Subnet).next().IP)); err != nil {
|
||||
@ -531,10 +549,12 @@ func (m *Mesh) applyTopology() {
|
||||
}
|
||||
// Setting the WireGuard configuration interrupts existing connections
|
||||
// so only set the configuration if it has changed.
|
||||
equal := conf.Equal(oldConf)
|
||||
conf := t.Conf()
|
||||
equal, diff := conf.Equal(wgDevice)
|
||||
if !equal {
|
||||
level.Info(m.logger).Log("msg", "WireGuard configurations are different")
|
||||
if err := wireguard.SetConf(link.Attrs().Name, confPath); err != nil {
|
||||
level.Info(m.logger).Log("msg", "WireGuard configurations are different", "diff", diff)
|
||||
level.Debug(m.logger).Log("msg", "changing wg config", "config", conf.WGConfig())
|
||||
if err := wgClient.ConfigureDevice(m.kiloIfaceName, conf.WGConfig()); err != nil {
|
||||
level.Error(m.logger).Log("error", err)
|
||||
m.errorCounter.WithLabelValues("apply").Inc()
|
||||
return
|
||||
@ -563,23 +583,6 @@ func (m *Mesh) applyTopology() {
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterMetrics registers Prometheus metrics on the given Prometheus
|
||||
// registerer.
|
||||
func (m *Mesh) RegisterMetrics(r prometheus.Registerer) {
|
||||
r.MustRegister(
|
||||
m.errorCounter,
|
||||
m.leaderGuage,
|
||||
m.nodesGuage,
|
||||
m.peersGuage,
|
||||
m.reconcileCounter,
|
||||
)
|
||||
}
|
||||
|
||||
// Stop stops the mesh.
|
||||
func (m *Mesh) Stop() {
|
||||
close(m.stop)
|
||||
}
|
||||
|
||||
func (m *Mesh) cleanUp() {
|
||||
if err := m.ipTables.CleanUp(); err != nil {
|
||||
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up IP tables: %v", err))
|
||||
@ -589,23 +592,27 @@ func (m *Mesh) cleanUp() {
|
||||
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up routes: %v", err))
|
||||
m.errorCounter.WithLabelValues("cleanUp").Inc()
|
||||
}
|
||||
if err := os.Remove(confPath); err != nil {
|
||||
level.Error(m.logger).Log("error", fmt.Sprintf("failed to delete configuration file: %v", err))
|
||||
m.errorCounter.WithLabelValues("cleanUp").Inc()
|
||||
}
|
||||
if m.cleanUpIface {
|
||||
if err := iproute.RemoveInterface(m.kiloIface); err != nil {
|
||||
level.Error(m.logger).Log("error", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
|
||||
m.errorCounter.WithLabelValues("cleanUp").Inc()
|
||||
}
|
||||
}
|
||||
if err := m.Nodes().CleanUp(m.hostname); err != nil {
|
||||
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up node backend: %v", err))
|
||||
m.errorCounter.WithLabelValues("cleanUp").Inc()
|
||||
{
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
if err := m.Nodes().CleanUp(ctx, m.hostname); err != nil {
|
||||
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up node backend: %v", err))
|
||||
m.errorCounter.WithLabelValues("cleanUp").Inc()
|
||||
}
|
||||
}
|
||||
if err := m.Peers().CleanUp(m.hostname); err != nil {
|
||||
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up peer backend: %v", err))
|
||||
m.errorCounter.WithLabelValues("cleanUp").Inc()
|
||||
{
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
if err := m.Peers().CleanUp(ctx, m.hostname); err != nil {
|
||||
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up peer backend: %v", err))
|
||||
m.errorCounter.WithLabelValues("cleanUp").Inc()
|
||||
}
|
||||
}
|
||||
if err := m.enc.CleanUp(); err != nil {
|
||||
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up encapsulator: %v", err))
|
||||
@ -620,12 +627,8 @@ func (m *Mesh) resolveEndpoints() error {
|
||||
if !m.nodes[k].Ready() {
|
||||
continue
|
||||
}
|
||||
// If the node is ready, then the endpoint is not nil
|
||||
// but it may not have a DNS name.
|
||||
if m.nodes[k].Endpoint.DNS == "" {
|
||||
continue
|
||||
}
|
||||
if err := resolveEndpoint(m.nodes[k].Endpoint); err != nil {
|
||||
// Resolve the Endpoint
|
||||
if _, err := m.nodes[k].Endpoint.UDPAddr(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -636,33 +639,16 @@ func (m *Mesh) resolveEndpoints() error {
|
||||
continue
|
||||
}
|
||||
// Peers may have nil endpoints.
|
||||
if m.peers[k].Endpoint == nil || m.peers[k].Endpoint.DNS == "" {
|
||||
if !m.peers[k].Endpoint.Ready() {
|
||||
continue
|
||||
}
|
||||
if err := resolveEndpoint(m.peers[k].Endpoint); err != nil {
|
||||
if _, err := m.peers[k].Endpoint.UDPAddr(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func resolveEndpoint(endpoint *wireguard.Endpoint) error {
|
||||
ips, err := net.LookupIP(endpoint.DNS)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to look up DNS name %q: %v", endpoint.DNS, err)
|
||||
}
|
||||
nets := make([]*net.IPNet, len(ips), len(ips))
|
||||
for i := range ips {
|
||||
nets[i] = oneAddressCIDR(ips[i])
|
||||
}
|
||||
sortIPs(nets)
|
||||
if len(nets) == 0 {
|
||||
return fmt.Errorf("did not find any addresses for DNS name %q", endpoint.DNS)
|
||||
}
|
||||
endpoint.IP = nets[0].IP
|
||||
return nil
|
||||
}
|
||||
|
||||
func isSelf(hostname string, node *Node) bool {
|
||||
return node != nil && node.Name == hostname
|
||||
}
|
||||
@ -682,7 +668,18 @@ func nodesAreEqual(a, b *Node) bool {
|
||||
// Ignore LastSeen when comparing equality we want to check if the nodes are
|
||||
// equivalent. However, we do want to check if LastSeen has transitioned
|
||||
// between valid and invalid.
|
||||
return string(a.Key) == string(b.Key) && ipNetsEqual(a.WireGuardIP, b.WireGuardIP) && ipNetsEqual(a.InternalIP, b.InternalIP) && a.Leader == b.Leader && a.Location == b.Location && a.Name == b.Name && subnetsEqual(a.Subnet, b.Subnet) && a.Ready() == b.Ready() && a.PersistentKeepalive == b.PersistentKeepalive && discoveredEndpointsAreEqual(a.DiscoveredEndpoints, b.DiscoveredEndpoints) && ipNetSlicesEqual(a.AllowedLocationIPs, b.AllowedLocationIPs) && a.Granularity == b.Granularity
|
||||
return a.Key.String() == b.Key.String() &&
|
||||
ipNetsEqual(a.WireGuardIP, b.WireGuardIP) &&
|
||||
ipNetsEqual(a.InternalIP, b.InternalIP) &&
|
||||
a.Leader == b.Leader &&
|
||||
a.Location == b.Location &&
|
||||
a.Name == b.Name &&
|
||||
subnetsEqual(a.Subnet, b.Subnet) &&
|
||||
a.Ready() == b.Ready() &&
|
||||
a.PersistentKeepalive == b.PersistentKeepalive &&
|
||||
discoveredEndpointsAreEqual(a.DiscoveredEndpoints, b.DiscoveredEndpoints) &&
|
||||
ipNetSlicesEqual(a.AllowedLocationIPs, b.AllowedLocationIPs) &&
|
||||
a.Granularity == b.Granularity
|
||||
}
|
||||
|
||||
func peersAreEqual(a, b *Peer) bool {
|
||||
@ -701,11 +698,15 @@ func peersAreEqual(a, b *Peer) bool {
|
||||
return false
|
||||
}
|
||||
for i := range a.AllowedIPs {
|
||||
if !ipNetsEqual(a.AllowedIPs[i], b.AllowedIPs[i]) {
|
||||
if !ipNetsEqual(&a.AllowedIPs[i], &b.AllowedIPs[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return string(a.PublicKey) == string(b.PublicKey) && string(a.PresharedKey) == string(b.PresharedKey) && a.PersistentKeepalive == b.PersistentKeepalive
|
||||
return a.PublicKey.String() == b.PublicKey.String() &&
|
||||
(a.PresharedKey == nil) == (b.PresharedKey == nil) &&
|
||||
(a.PresharedKey == nil || a.PresharedKey.String() == b.PresharedKey.String()) &&
|
||||
(a.PersistentKeepaliveInterval == nil) == (b.PersistentKeepaliveInterval == nil) &&
|
||||
(a.PersistentKeepaliveInterval == nil || *a.PersistentKeepaliveInterval == *b.PersistentKeepaliveInterval)
|
||||
}
|
||||
|
||||
func ipNetsEqual(a, b *net.IPNet) bool {
|
||||
@ -721,12 +722,12 @@ func ipNetsEqual(a, b *net.IPNet) bool {
|
||||
return a.IP.Equal(b.IP)
|
||||
}
|
||||
|
||||
func ipNetSlicesEqual(a, b []*net.IPNet) bool {
|
||||
func ipNetSlicesEqual(a, b []net.IPNet) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if !ipNetsEqual(a[i], b[i]) {
|
||||
if !ipNetsEqual(&a[i], &b[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -752,18 +753,31 @@ func subnetsEqual(a, b *net.IPNet) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func discoveredEndpointsAreEqual(a, b map[string]*wireguard.Endpoint) bool {
|
||||
func udpAddrsEqual(a, b *net.UDPAddr) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if (a != nil) != (b != nil) {
|
||||
return false
|
||||
}
|
||||
if a.Zone != b.Zone {
|
||||
return false
|
||||
}
|
||||
if a.Port != b.Port {
|
||||
return false
|
||||
}
|
||||
return a.IP.Equal(b.IP)
|
||||
}
|
||||
|
||||
func discoveredEndpointsAreEqual(a, b map[string]*net.UDPAddr) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for k := range a {
|
||||
if !a[k].Equal(b[k], false) {
|
||||
if !udpAddrsEqual(a[k], b[k]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -779,24 +793,26 @@ func linkByIndex(index int) (netlink.Link, error) {
|
||||
}
|
||||
|
||||
// discoverNATEndpoints uses the node's WireGuard configuration to returns a list of the most recently discovered endpoints for all nodes and peers behind NAT so that they can roam.
|
||||
func discoverNATEndpoints(nodes map[string]*Node, peers map[string]*Peer, conf *wireguard.Conf, logger log.Logger) map[string]*wireguard.Endpoint {
|
||||
natEndpoints := make(map[string]*wireguard.Endpoint)
|
||||
keys := make(map[string]*wireguard.Peer)
|
||||
// Discovered endpionts will never be DNS names, because WireGuard will always resolve them to net.UDPAddr.
|
||||
func discoverNATEndpoints(nodes map[string]*Node, peers map[string]*Peer, conf *wgtypes.Device, logger log.Logger) map[string]*net.UDPAddr {
|
||||
natEndpoints := make(map[string]*net.UDPAddr)
|
||||
keys := make(map[string]wgtypes.Peer)
|
||||
for i := range conf.Peers {
|
||||
keys[string(conf.Peers[i].PublicKey)] = conf.Peers[i]
|
||||
keys[conf.Peers[i].PublicKey.String()] = conf.Peers[i]
|
||||
}
|
||||
for _, n := range nodes {
|
||||
if peer, ok := keys[string(n.Key)]; ok && n.PersistentKeepalive > 0 {
|
||||
level.Debug(logger).Log("msg", "WireGuard Update NAT Endpoint", "node", n.Name, "endpoint", peer.Endpoint, "former-endpoint", n.Endpoint, "same", n.Endpoint.Equal(peer.Endpoint, false), "latest-handshake", peer.LatestHandshake)
|
||||
if (peer.LatestHandshake != time.Time{}) {
|
||||
natEndpoints[string(n.Key)] = peer.Endpoint
|
||||
if peer, ok := keys[n.Key.String()]; ok && n.PersistentKeepalive != time.Duration(0) {
|
||||
level.Debug(logger).Log("msg", "WireGuard Update NAT Endpoint", "node", n.Name, "endpoint", peer.Endpoint, "former-endpoint", n.Endpoint, "same", peer.Endpoint.String() == n.Endpoint.String(), "latest-handshake", peer.LastHandshakeTime)
|
||||
// Don't update the endpoint, if there was never any handshake.
|
||||
if !peer.LastHandshakeTime.Equal(time.Time{}) {
|
||||
natEndpoints[n.Key.String()] = peer.Endpoint
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, p := range peers {
|
||||
if peer, ok := keys[string(p.PublicKey)]; ok && p.PersistentKeepalive > 0 {
|
||||
if (peer.LatestHandshake != time.Time{}) {
|
||||
natEndpoints[string(p.PublicKey)] = peer.Endpoint
|
||||
if peer, ok := keys[p.PublicKey.String()]; ok && p.PersistentKeepaliveInterval != nil {
|
||||
if !peer.LastHandshakeTime.Equal(time.Time{}) {
|
||||
natEndpoints[p.PublicKey.String()] = peer.Endpoint
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,9 +19,21 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
|
||||
"github.com/squat/kilo/pkg/wireguard"
|
||||
)
|
||||
|
||||
func mustKey() wgtypes.Key {
|
||||
if k, err := wgtypes.GeneratePrivateKey(); err != nil {
|
||||
panic(err.Error())
|
||||
} else {
|
||||
return k
|
||||
}
|
||||
}
|
||||
|
||||
var key = mustKey()
|
||||
|
||||
func TestReady(t *testing.T) {
|
||||
internalIP := oneAddressCIDR(net.ParseIP("1.1.1.1"))
|
||||
externalIP := oneAddressCIDR(net.ParseIP("2.2.2.2"))
|
||||
@ -44,7 +56,7 @@ func TestReady(t *testing.T) {
|
||||
name: "empty endpoint",
|
||||
node: &Node{
|
||||
InternalIP: internalIP,
|
||||
Key: []byte{},
|
||||
Key: key,
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)},
|
||||
},
|
||||
ready: false,
|
||||
@ -52,9 +64,9 @@ func TestReady(t *testing.T) {
|
||||
{
|
||||
name: "empty endpoint IP",
|
||||
node: &Node{
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{}, Port: DefaultKiloPort},
|
||||
Endpoint: wireguard.NewEndpoint(nil, DefaultKiloPort),
|
||||
InternalIP: internalIP,
|
||||
Key: []byte{},
|
||||
Key: wgtypes.Key{},
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)},
|
||||
},
|
||||
ready: false,
|
||||
@ -62,9 +74,9 @@ func TestReady(t *testing.T) {
|
||||
{
|
||||
name: "empty endpoint port",
|
||||
node: &Node{
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: externalIP.IP}},
|
||||
Endpoint: wireguard.NewEndpoint(externalIP.IP, 0),
|
||||
InternalIP: internalIP,
|
||||
Key: []byte{},
|
||||
Key: wgtypes.Key{},
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)},
|
||||
},
|
||||
ready: false,
|
||||
@ -72,8 +84,8 @@ func TestReady(t *testing.T) {
|
||||
{
|
||||
name: "empty internal IP",
|
||||
node: &Node{
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: externalIP.IP}, Port: DefaultKiloPort},
|
||||
Key: []byte{},
|
||||
Endpoint: wireguard.NewEndpoint(externalIP.IP, DefaultKiloPort),
|
||||
Key: wgtypes.Key{},
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)},
|
||||
},
|
||||
ready: false,
|
||||
@ -81,7 +93,7 @@ func TestReady(t *testing.T) {
|
||||
{
|
||||
name: "empty key",
|
||||
node: &Node{
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: externalIP.IP}, Port: DefaultKiloPort},
|
||||
Endpoint: wireguard.NewEndpoint(externalIP.IP, DefaultKiloPort),
|
||||
InternalIP: internalIP,
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)},
|
||||
},
|
||||
@ -90,18 +102,18 @@ func TestReady(t *testing.T) {
|
||||
{
|
||||
name: "empty subnet",
|
||||
node: &Node{
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: externalIP.IP}, Port: DefaultKiloPort},
|
||||
Endpoint: wireguard.NewEndpoint(externalIP.IP, DefaultKiloPort),
|
||||
InternalIP: internalIP,
|
||||
Key: []byte{},
|
||||
Key: wgtypes.Key{},
|
||||
},
|
||||
ready: false,
|
||||
},
|
||||
{
|
||||
name: "valid",
|
||||
node: &Node{
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: externalIP.IP}, Port: DefaultKiloPort},
|
||||
Endpoint: wireguard.NewEndpoint(externalIP.IP, DefaultKiloPort),
|
||||
InternalIP: internalIP,
|
||||
Key: []byte{},
|
||||
Key: key,
|
||||
LastSeen: time.Now().Unix(),
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)},
|
||||
},
|
||||
|
@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package mesh
|
||||
@ -39,7 +40,7 @@ func (t *Topology) Routes(kiloIfaceName string, kiloIface, privIface, tunlIface
|
||||
var gw net.IP
|
||||
for _, segment := range t.segments {
|
||||
if segment.location == t.location {
|
||||
gw = enc.Gw(segment.endpoint.IP, segment.privateIPs[segment.leader], segment.cidrs[segment.leader])
|
||||
gw = enc.Gw(t.updateEndpoint(segment.endpoint, segment.key, &segment.persistentKeepalive).IP(), segment.privateIPs[segment.leader], segment.cidrs[segment.leader])
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -112,7 +113,7 @@ func (t *Topology) Routes(kiloIfaceName string, kiloIface, privIface, tunlIface
|
||||
// we need to set routes for allowed location IPs over the leader in the current location.
|
||||
for i := range segment.allowedLocationIPs {
|
||||
routes = append(routes, encapsulateRoute(&netlink.Route{
|
||||
Dst: segment.allowedLocationIPs[i],
|
||||
Dst: &segment.allowedLocationIPs[i],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: gw,
|
||||
LinkIndex: privIface,
|
||||
@ -124,7 +125,7 @@ func (t *Topology) Routes(kiloIfaceName string, kiloIface, privIface, tunlIface
|
||||
for _, peer := range t.peers {
|
||||
for i := range peer.AllowedIPs {
|
||||
routes = append(routes, encapsulateRoute(&netlink.Route{
|
||||
Dst: peer.AllowedIPs[i],
|
||||
Dst: &peer.AllowedIPs[i],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: gw,
|
||||
LinkIndex: privIface,
|
||||
@ -195,7 +196,7 @@ func (t *Topology) Routes(kiloIfaceName string, kiloIface, privIface, tunlIface
|
||||
// equals the external IP. This means that the node
|
||||
// is only accessible through an external IP and we
|
||||
// cannot encapsulate traffic to an IP through the IP.
|
||||
if segment.privateIPs == nil || segment.privateIPs[i].Equal(segment.endpoint.IP) {
|
||||
if segment.privateIPs == nil || segment.privateIPs[i].Equal(t.updateEndpoint(segment.endpoint, segment.key, &segment.persistentKeepalive).IP()) {
|
||||
continue
|
||||
}
|
||||
// Add routes to the private IPs of nodes in other segments.
|
||||
@ -213,7 +214,7 @@ func (t *Topology) Routes(kiloIfaceName string, kiloIface, privIface, tunlIface
|
||||
// we need to set routes for allowed location IPs over the wg interface.
|
||||
for i := range segment.allowedLocationIPs {
|
||||
routes = append(routes, &netlink.Route{
|
||||
Dst: segment.allowedLocationIPs[i],
|
||||
Dst: &segment.allowedLocationIPs[i],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: segment.wireGuardIP,
|
||||
LinkIndex: kiloIface,
|
||||
@ -225,7 +226,7 @@ func (t *Topology) Routes(kiloIfaceName string, kiloIface, privIface, tunlIface
|
||||
for _, peer := range t.peers {
|
||||
for i := range peer.AllowedIPs {
|
||||
routes = append(routes, &netlink.Route{
|
||||
Dst: peer.AllowedIPs[i],
|
||||
Dst: &peer.AllowedIPs[i],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
})
|
||||
@ -234,6 +235,74 @@ func (t *Topology) Routes(kiloIfaceName string, kiloIface, privIface, tunlIface
|
||||
return routes, rules
|
||||
}
|
||||
|
||||
// PeerRoutes generates a slice of routes and rules for a given peer in the Topology.
|
||||
func (t *Topology) PeerRoutes(name string, kiloIface int, additionalAllowedIPs []net.IPNet) ([]*netlink.Route, []*netlink.Rule) {
|
||||
var routes []*netlink.Route
|
||||
var rules []*netlink.Rule
|
||||
for _, segment := range t.segments {
|
||||
for i := range segment.cidrs {
|
||||
// Add routes to the Pod CIDRs of nodes in other segments.
|
||||
routes = append(routes, &netlink.Route{
|
||||
Dst: segment.cidrs[i],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: segment.wireGuardIP,
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
})
|
||||
}
|
||||
for i := range segment.privateIPs {
|
||||
// Add routes to the private IPs of nodes in other segments.
|
||||
routes = append(routes, &netlink.Route{
|
||||
Dst: oneAddressCIDR(segment.privateIPs[i]),
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: segment.wireGuardIP,
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
})
|
||||
}
|
||||
// Add routes for the allowed location IPs of all segments.
|
||||
for i := range segment.allowedLocationIPs {
|
||||
routes = append(routes, &netlink.Route{
|
||||
Dst: &segment.allowedLocationIPs[i],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: segment.wireGuardIP,
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
})
|
||||
}
|
||||
routes = append(routes, &netlink.Route{
|
||||
Dst: oneAddressCIDR(segment.wireGuardIP),
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
})
|
||||
}
|
||||
// Add routes for the allowed IPs of peers.
|
||||
for _, peer := range t.peers {
|
||||
// Don't add routes to ourselves.
|
||||
if peer.Name == name {
|
||||
continue
|
||||
}
|
||||
for i := range peer.AllowedIPs {
|
||||
routes = append(routes, &netlink.Route{
|
||||
Dst: &peer.AllowedIPs[i],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
})
|
||||
}
|
||||
}
|
||||
for i := range additionalAllowedIPs {
|
||||
routes = append(routes, &netlink.Route{
|
||||
Dst: &additionalAllowedIPs[i],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: t.segments[0].wireGuardIP,
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
})
|
||||
}
|
||||
|
||||
return routes, rules
|
||||
}
|
||||
|
||||
func encapsulateRoute(route *netlink.Route, encapsulate encapsulation.Strategy, subnet *net.IPNet, tunlIface int) *netlink.Route {
|
||||
if encapsulate == encapsulation.Always || (encapsulate == encapsulation.CrossSubnet && !subnet.Contains(route.Gw)) {
|
||||
route.LinkIndex = tunlIface
|
||||
@ -242,17 +311,45 @@ func encapsulateRoute(route *netlink.Route, encapsulate encapsulation.Strategy,
|
||||
}
|
||||
|
||||
// Rules returns the iptables rules required by the local node.
|
||||
func (t *Topology) Rules(cni bool) []iptables.Rule {
|
||||
func (t *Topology) Rules(cni, iptablesForwardRule bool) []iptables.Rule {
|
||||
var rules []iptables.Rule
|
||||
rules = append(rules, iptables.NewIPv4Chain("nat", "KILO-NAT"))
|
||||
rules = append(rules, iptables.NewIPv6Chain("nat", "KILO-NAT"))
|
||||
if cni {
|
||||
rules = append(rules, iptables.NewRule(iptables.GetProtocol(len(t.subnet.IP)), "nat", "POSTROUTING", "-s", t.subnet.String(), "-m", "comment", "--comment", "Kilo: jump to KILO-NAT chain", "-j", "KILO-NAT"))
|
||||
rules = append(rules, iptables.NewRule(iptables.GetProtocol(t.subnet.IP), "nat", "POSTROUTING", "-s", t.subnet.String(), "-m", "comment", "--comment", "Kilo: jump to KILO-NAT chain", "-j", "KILO-NAT"))
|
||||
// Some linux distros or docker will set forward DROP in the filter table.
|
||||
// To still be able to have pod to pod communication we need to ALLOW packets from and to pod CIDRs within a location.
|
||||
// Leader nodes will forward packets from all nodes within a location because they act as a gateway for them.
|
||||
// Non leader nodes only need to allow packages from and to their own pod CIDR.
|
||||
if iptablesForwardRule && t.leader {
|
||||
for _, s := range t.segments {
|
||||
if s.location == t.location {
|
||||
// Make sure packets to and from pod cidrs are not dropped in the forward chain.
|
||||
for _, c := range s.cidrs {
|
||||
rules = append(rules, iptables.NewRule(iptables.GetProtocol(c.IP), "filter", "FORWARD", "-m", "comment", "--comment", "Kilo: forward packets from the pod subnet", "-s", c.String(), "-j", "ACCEPT"))
|
||||
rules = append(rules, iptables.NewRule(iptables.GetProtocol(c.IP), "filter", "FORWARD", "-m", "comment", "--comment", "Kilo: forward packets to the pod subnet", "-d", c.String(), "-j", "ACCEPT"))
|
||||
}
|
||||
// Make sure packets to and from allowed location IPs are not dropped in the forward chain.
|
||||
for _, c := range s.allowedLocationIPs {
|
||||
rules = append(rules, iptables.NewRule(iptables.GetProtocol(c.IP), "filter", "FORWARD", "-m", "comment", "--comment", "Kilo: forward packets from allowed location IPs", "-s", c.String(), "-j", "ACCEPT"))
|
||||
rules = append(rules, iptables.NewRule(iptables.GetProtocol(c.IP), "filter", "FORWARD", "-m", "comment", "--comment", "Kilo: forward packets to allowed location IPs", "-d", c.String(), "-j", "ACCEPT"))
|
||||
}
|
||||
// Make sure packets to and from private IPs are not dropped in the forward chain.
|
||||
for _, c := range s.privateIPs {
|
||||
rules = append(rules, iptables.NewRule(iptables.GetProtocol(c), "filter", "FORWARD", "-m", "comment", "--comment", "Kilo: forward packets from private IPs", "-s", oneAddressCIDR(c).String(), "-j", "ACCEPT"))
|
||||
rules = append(rules, iptables.NewRule(iptables.GetProtocol(c), "filter", "FORWARD", "-m", "comment", "--comment", "Kilo: forward packets to private IPs", "-d", oneAddressCIDR(c).String(), "-j", "ACCEPT"))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if iptablesForwardRule {
|
||||
rules = append(rules, iptables.NewRule(iptables.GetProtocol(t.subnet.IP), "filter", "FORWARD", "-m", "comment", "--comment", "Kilo: forward packets from the node's pod subnet", "-s", t.subnet.String(), "-j", "ACCEPT"))
|
||||
rules = append(rules, iptables.NewRule(iptables.GetProtocol(t.subnet.IP), "filter", "FORWARD", "-m", "comment", "--comment", "Kilo: forward packets to the node's pod subnet", "-d", t.subnet.String(), "-j", "ACCEPT"))
|
||||
}
|
||||
}
|
||||
for _, s := range t.segments {
|
||||
rules = append(rules, iptables.NewRule(iptables.GetProtocol(len(s.wireGuardIP)), "nat", "KILO-NAT", "-d", oneAddressCIDR(s.wireGuardIP).String(), "-m", "comment", "--comment", "Kilo: do not NAT packets destined for WireGuared IPs", "-j", "RETURN"))
|
||||
rules = append(rules, iptables.NewRule(iptables.GetProtocol(s.wireGuardIP), "nat", "KILO-NAT", "-d", oneAddressCIDR(s.wireGuardIP).String(), "-m", "comment", "--comment", "Kilo: do not NAT packets destined for WireGuared IPs", "-j", "RETURN"))
|
||||
for _, aip := range s.allowedIPs {
|
||||
rules = append(rules, iptables.NewRule(iptables.GetProtocol(len(aip.IP)), "nat", "KILO-NAT", "-d", aip.String(), "-m", "comment", "--comment", "Kilo: do not NAT packets destined for known IPs", "-j", "RETURN"))
|
||||
rules = append(rules, iptables.NewRule(iptables.GetProtocol(aip.IP), "nat", "KILO-NAT", "-d", aip.String(), "-m", "comment", "--comment", "Kilo: do not NAT packets destined for known IPs", "-j", "RETURN"))
|
||||
}
|
||||
// Make sure packets to allowed location IPs go through the KILO-NAT chain, so they can be MASQUERADEd,
|
||||
// Otherwise packets to these destinations will reach the destination, but never find their way back.
|
||||
@ -260,7 +357,7 @@ func (t *Topology) Rules(cni bool) []iptables.Rule {
|
||||
if t.location == s.location {
|
||||
for _, alip := range s.allowedLocationIPs {
|
||||
rules = append(rules,
|
||||
iptables.NewRule(iptables.GetProtocol(len(alip.IP)), "nat", "POSTROUTING", "-d", alip.String(), "-m", "comment", "--comment", "Kilo: jump to NAT chain", "-j", "KILO-NAT"),
|
||||
iptables.NewRule(iptables.GetProtocol(alip.IP), "nat", "POSTROUTING", "-d", alip.String(), "-m", "comment", "--comment", "Kilo: jump to NAT chain", "-j", "KILO-NAT"),
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -268,8 +365,8 @@ func (t *Topology) Rules(cni bool) []iptables.Rule {
|
||||
for _, p := range t.peers {
|
||||
for _, aip := range p.AllowedIPs {
|
||||
rules = append(rules,
|
||||
iptables.NewRule(iptables.GetProtocol(len(aip.IP)), "nat", "POSTROUTING", "-s", aip.String(), "-m", "comment", "--comment", "Kilo: jump to NAT chain", "-j", "KILO-NAT"),
|
||||
iptables.NewRule(iptables.GetProtocol(len(aip.IP)), "nat", "KILO-NAT", "-d", aip.String(), "-m", "comment", "--comment", "Kilo: do not NAT packets destined for peers", "-j", "RETURN"),
|
||||
iptables.NewRule(iptables.GetProtocol(aip.IP), "nat", "POSTROUTING", "-s", aip.String(), "-m", "comment", "--comment", "Kilo: jump to NAT chain", "-j", "KILO-NAT"),
|
||||
iptables.NewRule(iptables.GetProtocol(aip.IP), "nat", "KILO-NAT", "-d", aip.String(), "-m", "comment", "--comment", "Kilo: do not NAT packets destined for peers", "-j", "RETURN"),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: nodes["b"].AllowedLocationIPs[0],
|
||||
Dst: &nodes["b"].AllowedLocationIPs[0],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: mustTopoForGranularityAndHost(LogicalGranularity, nodes["a"].Name).segments[1].wireGuardIP,
|
||||
LinkIndex: kiloIface,
|
||||
@ -89,17 +89,17 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[0],
|
||||
Dst: &peers["a"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[1],
|
||||
Dst: &peers["a"].AllowedIPs[1],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["b"].AllowedIPs[0],
|
||||
Dst: &peers["b"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
@ -132,17 +132,17 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[0],
|
||||
Dst: &peers["a"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[1],
|
||||
Dst: &peers["a"].AllowedIPs[1],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["b"].AllowedIPs[0],
|
||||
Dst: &peers["b"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
@ -196,21 +196,21 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[0],
|
||||
Dst: &peers["a"].AllowedIPs[0],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: nodes["b"].InternalIP.IP,
|
||||
LinkIndex: privIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[1],
|
||||
Dst: &peers["a"].AllowedIPs[1],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: nodes["b"].InternalIP.IP,
|
||||
LinkIndex: privIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["b"].AllowedIPs[0],
|
||||
Dst: &peers["b"].AllowedIPs[0],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: nodes["b"].InternalIP.IP,
|
||||
LinkIndex: privIface,
|
||||
@ -266,24 +266,24 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: nodes["b"].AllowedLocationIPs[0],
|
||||
Dst: &nodes["b"].AllowedLocationIPs[0],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: mustTopoForGranularityAndHost(LogicalGranularity, nodes["d"].Name).segments[1].wireGuardIP,
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[0],
|
||||
Dst: &peers["a"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[1],
|
||||
Dst: &peers["a"].AllowedIPs[1],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["b"].AllowedIPs[0],
|
||||
Dst: &peers["b"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
@ -309,7 +309,7 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: nodes["b"].AllowedLocationIPs[0],
|
||||
Dst: &nodes["b"].AllowedLocationIPs[0],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: mustTopoForGranularityAndHost(FullGranularity, nodes["a"].Name).segments[1].wireGuardIP,
|
||||
LinkIndex: kiloIface,
|
||||
@ -337,17 +337,17 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[0],
|
||||
Dst: &peers["a"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[1],
|
||||
Dst: &peers["a"].AllowedIPs[1],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["b"].AllowedIPs[0],
|
||||
Dst: &peers["b"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
@ -394,17 +394,17 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[0],
|
||||
Dst: &peers["a"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[1],
|
||||
Dst: &peers["a"].AllowedIPs[1],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["b"].AllowedIPs[0],
|
||||
Dst: &peers["b"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
@ -444,7 +444,7 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: nodes["b"].AllowedLocationIPs[0],
|
||||
Dst: &nodes["b"].AllowedLocationIPs[0],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: mustTopoForGranularityAndHost(FullGranularity, nodes["c"].Name).segments[1].wireGuardIP,
|
||||
LinkIndex: kiloIface,
|
||||
@ -458,17 +458,17 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[0],
|
||||
Dst: &peers["a"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[1],
|
||||
Dst: &peers["a"].AllowedIPs[1],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["b"].AllowedIPs[0],
|
||||
Dst: &peers["b"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
@ -509,7 +509,7 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: nodes["b"].AllowedLocationIPs[0],
|
||||
Dst: &nodes["b"].AllowedLocationIPs[0],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: mustTopoForGranularityAndHost(LogicalGranularity, nodes["a"].Name).segments[1].wireGuardIP,
|
||||
LinkIndex: kiloIface,
|
||||
@ -523,17 +523,17 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[0],
|
||||
Dst: &peers["a"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[1],
|
||||
Dst: &peers["a"].AllowedIPs[1],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["b"].AllowedIPs[0],
|
||||
Dst: &peers["b"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
@ -574,7 +574,7 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: nodes["b"].AllowedLocationIPs[0],
|
||||
Dst: &nodes["b"].AllowedLocationIPs[0],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: mustTopoForGranularityAndHost(LogicalGranularity, nodes["a"].Name).segments[1].wireGuardIP,
|
||||
LinkIndex: kiloIface,
|
||||
@ -588,17 +588,17 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[0],
|
||||
Dst: &peers["a"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[1],
|
||||
Dst: &peers["a"].AllowedIPs[1],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["b"].AllowedIPs[0],
|
||||
Dst: &peers["b"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
@ -639,17 +639,17 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[0],
|
||||
Dst: &peers["a"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[1],
|
||||
Dst: &peers["a"].AllowedIPs[1],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["b"].AllowedIPs[0],
|
||||
Dst: &peers["b"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
@ -698,17 +698,17 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[0],
|
||||
Dst: &peers["a"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[1],
|
||||
Dst: &peers["a"].AllowedIPs[1],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["b"].AllowedIPs[0],
|
||||
Dst: &peers["b"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
@ -782,21 +782,21 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[0],
|
||||
Dst: &peers["a"].AllowedIPs[0],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: nodes["b"].InternalIP.IP,
|
||||
LinkIndex: privIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[1],
|
||||
Dst: &peers["a"].AllowedIPs[1],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: nodes["b"].InternalIP.IP,
|
||||
LinkIndex: privIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["b"].AllowedIPs[0],
|
||||
Dst: &peers["b"].AllowedIPs[0],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: nodes["b"].InternalIP.IP,
|
||||
LinkIndex: privIface,
|
||||
@ -868,21 +868,21 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[0],
|
||||
Dst: &peers["a"].AllowedIPs[0],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: nodes["b"].InternalIP.IP,
|
||||
LinkIndex: tunlIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[1],
|
||||
Dst: &peers["a"].AllowedIPs[1],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: nodes["b"].InternalIP.IP,
|
||||
LinkIndex: tunlIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["b"].AllowedIPs[0],
|
||||
Dst: &peers["b"].AllowedIPs[0],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: nodes["b"].InternalIP.IP,
|
||||
LinkIndex: tunlIface,
|
||||
@ -918,7 +918,7 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: nodes["b"].AllowedLocationIPs[0],
|
||||
Dst: &nodes["b"].AllowedLocationIPs[0],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: mustTopoForGranularityAndHost(FullGranularity, nodes["a"].Name).segments[1].wireGuardIP,
|
||||
LinkIndex: kiloIface,
|
||||
@ -946,17 +946,17 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[0],
|
||||
Dst: &peers["a"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[1],
|
||||
Dst: &peers["a"].AllowedIPs[1],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["b"].AllowedIPs[0],
|
||||
Dst: &peers["b"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
@ -1004,17 +1004,17 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[0],
|
||||
Dst: &peers["a"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[1],
|
||||
Dst: &peers["a"].AllowedIPs[1],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["b"].AllowedIPs[0],
|
||||
Dst: &peers["b"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
@ -1055,7 +1055,7 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: nodes["b"].AllowedLocationIPs[0],
|
||||
Dst: &nodes["b"].AllowedLocationIPs[0],
|
||||
Flags: int(netlink.FLAG_ONLINK),
|
||||
Gw: mustTopoForGranularityAndHost(FullGranularity, nodes["c"].Name).segments[1].wireGuardIP,
|
||||
LinkIndex: kiloIface,
|
||||
@ -1069,17 +1069,17 @@ func TestRoutes(t *testing.T) {
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[0],
|
||||
Dst: &peers["a"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["a"].AllowedIPs[1],
|
||||
Dst: &peers["a"].AllowedIPs[1],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
{
|
||||
Dst: peers["b"].AllowedIPs[0],
|
||||
Dst: &peers["b"].AllowedIPs[0],
|
||||
LinkIndex: kiloIface,
|
||||
Protocol: unix.RTPROT_STATIC,
|
||||
},
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2019 the Kilo authors
|
||||
// Copyright 2021 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -18,9 +18,11 @@ import (
|
||||
"errors"
|
||||
"net"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
|
||||
"github.com/squat/kilo/pkg/wireguard"
|
||||
)
|
||||
@ -33,8 +35,8 @@ const (
|
||||
// Topology represents the logical structure of the overlay network.
|
||||
type Topology struct {
|
||||
// key is the private key of the node creating the topology.
|
||||
key []byte
|
||||
port uint32
|
||||
key wgtypes.Key
|
||||
port int
|
||||
// Location is the logical location of the local host.
|
||||
location string
|
||||
segments []*segment
|
||||
@ -47,7 +49,7 @@ type Topology struct {
|
||||
leader bool
|
||||
// persistentKeepalive is the interval in seconds of the emission
|
||||
// of keepalive packets by the local node to its peers.
|
||||
persistentKeepalive int
|
||||
persistentKeepalive time.Duration
|
||||
// privateIP is the private IP address of the local node.
|
||||
privateIP *net.IPNet
|
||||
// subnet is the Pod subnet of the local node.
|
||||
@ -59,15 +61,16 @@ type Topology struct {
|
||||
// is equal to the Kilo subnet.
|
||||
wireGuardCIDR *net.IPNet
|
||||
// discoveredEndpoints is the updated map of valid discovered Endpoints
|
||||
discoveredEndpoints map[string]*wireguard.Endpoint
|
||||
discoveredEndpoints map[string]*net.UDPAddr
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// segment represents one logical unit in the topology that is united by one common WireGuard IP.
|
||||
type segment struct {
|
||||
allowedIPs []*net.IPNet
|
||||
allowedIPs []net.IPNet
|
||||
endpoint *wireguard.Endpoint
|
||||
key []byte
|
||||
persistentKeepalive int
|
||||
key wgtypes.Key
|
||||
persistentKeepalive time.Duration
|
||||
// Location is the logical location of this segment.
|
||||
location string
|
||||
|
||||
@ -85,11 +88,11 @@ type segment struct {
|
||||
// allowedLocationIPs are not part of the cluster and are not peers.
|
||||
// They are directly routable from nodes within the segment.
|
||||
// A classic example is a printer that ought to be routable from other locations.
|
||||
allowedLocationIPs []*net.IPNet
|
||||
allowedLocationIPs []net.IPNet
|
||||
}
|
||||
|
||||
// NewTopology creates a new Topology struct from a given set of nodes and peers.
|
||||
func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Granularity, hostname string, port uint32, key []byte, subnet *net.IPNet, persistentKeepalive int, logger log.Logger) (*Topology, error) {
|
||||
func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Granularity, hostname string, port int, key wgtypes.Key, subnet *net.IPNet, persistentKeepalive time.Duration, logger log.Logger) (*Topology, error) {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
@ -120,7 +123,18 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
|
||||
localLocation = nodeLocationPrefix + hostname
|
||||
}
|
||||
|
||||
t := Topology{key: key, port: port, hostname: hostname, location: localLocation, persistentKeepalive: persistentKeepalive, privateIP: nodes[hostname].InternalIP, subnet: nodes[hostname].Subnet, wireGuardCIDR: subnet, discoveredEndpoints: make(map[string]*wireguard.Endpoint), logger: logger}
|
||||
t := Topology{
|
||||
key: key,
|
||||
port: port,
|
||||
hostname: hostname,
|
||||
location: localLocation,
|
||||
persistentKeepalive: persistentKeepalive,
|
||||
privateIP: nodes[hostname].InternalIP,
|
||||
subnet: nodes[hostname].Subnet,
|
||||
wireGuardCIDR: subnet,
|
||||
discoveredEndpoints: make(map[string]*net.UDPAddr),
|
||||
logger: logger,
|
||||
}
|
||||
for location := range topoMap {
|
||||
// Sort the location so the result is stable.
|
||||
sort.Slice(topoMap[location], func(i, j int) bool {
|
||||
@ -130,9 +144,9 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
|
||||
if location == localLocation && topoMap[location][leader].Name == hostname {
|
||||
t.leader = true
|
||||
}
|
||||
var allowedIPs []*net.IPNet
|
||||
var allowedIPs []net.IPNet
|
||||
allowedLocationIPsMap := make(map[string]struct{})
|
||||
var allowedLocationIPs []*net.IPNet
|
||||
var allowedLocationIPs []net.IPNet
|
||||
var cidrs []*net.IPNet
|
||||
var hostnames []string
|
||||
var privateIPs []net.IP
|
||||
@ -142,7 +156,9 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
|
||||
// - the node's WireGuard IP
|
||||
// - the node's internal IP
|
||||
// - IPs that were specified by the allowed-location-ips annotation
|
||||
allowedIPs = append(allowedIPs, node.Subnet)
|
||||
if node.Subnet != nil {
|
||||
allowedIPs = append(allowedIPs, *node.Subnet)
|
||||
}
|
||||
for _, ip := range node.AllowedLocationIPs {
|
||||
if _, ok := allowedLocationIPsMap[ip.String()]; !ok {
|
||||
allowedLocationIPs = append(allowedLocationIPs, ip)
|
||||
@ -150,7 +166,7 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
|
||||
}
|
||||
}
|
||||
if node.InternalIP != nil {
|
||||
allowedIPs = append(allowedIPs, oneAddressCIDR(node.InternalIP.IP))
|
||||
allowedIPs = append(allowedIPs, *oneAddressCIDR(node.InternalIP.IP))
|
||||
privateIPs = append(privateIPs, node.InternalIP.IP)
|
||||
}
|
||||
cidrs = append(cidrs, node.Subnet)
|
||||
@ -172,6 +188,8 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
|
||||
privateIPs: privateIPs,
|
||||
allowedLocationIPs: allowedLocationIPs,
|
||||
})
|
||||
level.Debug(t.logger).Log("msg", "generated segment", "location", location, "allowedIPs", allowedIPs, "endpoint", topoMap[location][leader].Endpoint, "cidrs", cidrs, "hostnames", hostnames, "leader", leader, "privateIPs", privateIPs, "allowedLocationIPs", allowedLocationIPs)
|
||||
|
||||
}
|
||||
// Sort the Topology segments so the result is stable.
|
||||
sort.Slice(t.segments, func(i, j int) bool {
|
||||
@ -200,7 +218,7 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
|
||||
return nil, errors.New("failed to allocate an IP address; ran out of IP addresses")
|
||||
}
|
||||
segment.wireGuardIP = ipNet.IP
|
||||
segment.allowedIPs = append(segment.allowedIPs, oneAddressCIDR(ipNet.IP))
|
||||
segment.allowedIPs = append(segment.allowedIPs, *oneAddressCIDR(ipNet.IP))
|
||||
if t.leader && segment.location == t.location {
|
||||
t.wireGuardCIDR = &net.IPNet{IP: ipNet.IP, Mask: subnet.Mask}
|
||||
}
|
||||
@ -218,14 +236,15 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
|
||||
segment.allowedLocationIPs = t.filterAllowedLocationIPs(segment.allowedLocationIPs, segment.location)
|
||||
}
|
||||
|
||||
level.Debug(t.logger).Log("msg", "generated topology", "location", t.location, "hostname", t.hostname, "wireGuardIP", t.wireGuardCIDR, "privateIP", t.privateIP, "subnet", t.subnet, "leader", t.leader)
|
||||
return &t, nil
|
||||
}
|
||||
|
||||
func intersect(n1, n2 *net.IPNet) bool {
|
||||
func intersect(n1, n2 net.IPNet) bool {
|
||||
return n1.Contains(n2.IP) || n2.Contains(n1.IP)
|
||||
}
|
||||
|
||||
func (t *Topology) filterAllowedLocationIPs(ips []*net.IPNet, location string) (ret []*net.IPNet) {
|
||||
func (t *Topology) filterAllowedLocationIPs(ips []net.IPNet, location string) (ret []net.IPNet) {
|
||||
CheckIPs:
|
||||
for _, ip := range ips {
|
||||
for _, s := range t.segments {
|
||||
@ -267,14 +286,14 @@ CheckIPs:
|
||||
return
|
||||
}
|
||||
|
||||
func (t *Topology) updateEndpoint(endpoint *wireguard.Endpoint, key []byte, persistentKeepalive int) *wireguard.Endpoint {
|
||||
func (t *Topology) updateEndpoint(endpoint *wireguard.Endpoint, key wgtypes.Key, persistentKeepalive *time.Duration) *wireguard.Endpoint {
|
||||
// Do not update non-nat peers
|
||||
if persistentKeepalive == 0 {
|
||||
if persistentKeepalive == nil || *persistentKeepalive == time.Duration(0) {
|
||||
return endpoint
|
||||
}
|
||||
e, ok := t.discoveredEndpoints[string(key)]
|
||||
e, ok := t.discoveredEndpoints[key.String()]
|
||||
if ok {
|
||||
return e
|
||||
return wireguard.NewEndpointFromUDPAddr(e)
|
||||
}
|
||||
return endpoint
|
||||
}
|
||||
@ -282,30 +301,37 @@ func (t *Topology) updateEndpoint(endpoint *wireguard.Endpoint, key []byte, pers
|
||||
// Conf generates a WireGuard configuration file for a given Topology.
|
||||
func (t *Topology) Conf() *wireguard.Conf {
|
||||
c := &wireguard.Conf{
|
||||
Interface: &wireguard.Interface{
|
||||
PrivateKey: t.key,
|
||||
ListenPort: t.port,
|
||||
Config: wgtypes.Config{
|
||||
PrivateKey: &t.key,
|
||||
ListenPort: &t.port,
|
||||
ReplacePeers: true,
|
||||
},
|
||||
}
|
||||
for _, s := range t.segments {
|
||||
if s.location == t.location {
|
||||
continue
|
||||
}
|
||||
peer := &wireguard.Peer{
|
||||
AllowedIPs: append(s.allowedIPs, s.allowedLocationIPs...),
|
||||
Endpoint: t.updateEndpoint(s.endpoint, s.key, s.persistentKeepalive),
|
||||
PersistentKeepalive: t.persistentKeepalive,
|
||||
PublicKey: s.key,
|
||||
peer := wireguard.Peer{
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
AllowedIPs: append(s.allowedIPs, s.allowedLocationIPs...),
|
||||
PersistentKeepaliveInterval: &t.persistentKeepalive,
|
||||
PublicKey: s.key,
|
||||
ReplaceAllowedIPs: true,
|
||||
},
|
||||
Endpoint: t.updateEndpoint(s.endpoint, s.key, &s.persistentKeepalive),
|
||||
}
|
||||
c.Peers = append(c.Peers, peer)
|
||||
}
|
||||
for _, p := range t.peers {
|
||||
peer := &wireguard.Peer{
|
||||
AllowedIPs: p.AllowedIPs,
|
||||
Endpoint: t.updateEndpoint(p.Endpoint, p.PublicKey, p.PersistentKeepalive),
|
||||
PersistentKeepalive: t.persistentKeepalive,
|
||||
PresharedKey: p.PresharedKey,
|
||||
PublicKey: p.PublicKey,
|
||||
peer := wireguard.Peer{
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
AllowedIPs: p.AllowedIPs,
|
||||
PersistentKeepaliveInterval: &t.persistentKeepalive,
|
||||
PresharedKey: p.PresharedKey,
|
||||
PublicKey: p.PublicKey,
|
||||
ReplaceAllowedIPs: true,
|
||||
},
|
||||
Endpoint: t.updateEndpoint(p.Endpoint, p.PublicKey, p.PersistentKeepaliveInterval),
|
||||
}
|
||||
c.Peers = append(c.Peers, peer)
|
||||
}
|
||||
@ -319,34 +345,39 @@ func (t *Topology) AsPeer() *wireguard.Peer {
|
||||
if s.location != t.location {
|
||||
continue
|
||||
}
|
||||
return &wireguard.Peer{
|
||||
AllowedIPs: s.allowedIPs,
|
||||
Endpoint: s.endpoint,
|
||||
PublicKey: s.key,
|
||||
p := &wireguard.Peer{
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
AllowedIPs: s.allowedIPs,
|
||||
PublicKey: s.key,
|
||||
},
|
||||
Endpoint: s.endpoint,
|
||||
}
|
||||
return p
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PeerConf generates a WireGuard configuration file for a given peer in a Topology.
|
||||
func (t *Topology) PeerConf(name string) *wireguard.Conf {
|
||||
var pka int
|
||||
var psk []byte
|
||||
var pka *time.Duration
|
||||
var psk *wgtypes.Key
|
||||
for i := range t.peers {
|
||||
if t.peers[i].Name == name {
|
||||
pka = t.peers[i].PersistentKeepalive
|
||||
pka = t.peers[i].PersistentKeepaliveInterval
|
||||
psk = t.peers[i].PresharedKey
|
||||
break
|
||||
}
|
||||
}
|
||||
c := &wireguard.Conf{}
|
||||
for _, s := range t.segments {
|
||||
peer := &wireguard.Peer{
|
||||
AllowedIPs: s.allowedIPs,
|
||||
Endpoint: s.endpoint,
|
||||
PersistentKeepalive: pka,
|
||||
PresharedKey: psk,
|
||||
PublicKey: s.key,
|
||||
peer := wireguard.Peer{
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
AllowedIPs: append(s.allowedIPs, s.allowedLocationIPs...),
|
||||
PersistentKeepaliveInterval: pka,
|
||||
PresharedKey: psk,
|
||||
PublicKey: s.key,
|
||||
},
|
||||
Endpoint: t.updateEndpoint(s.endpoint, s.key, &s.persistentKeepalive),
|
||||
}
|
||||
c.Peers = append(c.Peers, peer)
|
||||
}
|
||||
@ -354,11 +385,13 @@ func (t *Topology) PeerConf(name string) *wireguard.Conf {
|
||||
if t.peers[i].Name == name {
|
||||
continue
|
||||
}
|
||||
peer := &wireguard.Peer{
|
||||
AllowedIPs: t.peers[i].AllowedIPs,
|
||||
PersistentKeepalive: pka,
|
||||
PublicKey: t.peers[i].PublicKey,
|
||||
Endpoint: t.peers[i].Endpoint,
|
||||
peer := wireguard.Peer{
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
AllowedIPs: t.peers[i].AllowedIPs,
|
||||
PersistentKeepaliveInterval: pka,
|
||||
PublicKey: t.peers[i].PublicKey,
|
||||
},
|
||||
Endpoint: t.updateEndpoint(t.peers[i].Endpoint, t.peers[i].PublicKey, t.peers[i].PersistentKeepaliveInterval),
|
||||
}
|
||||
c.Peers = append(c.Peers, peer)
|
||||
}
|
||||
@ -379,13 +412,13 @@ func findLeader(nodes []*Node) int {
|
||||
var leaders, public []int
|
||||
for i := range nodes {
|
||||
if nodes[i].Leader {
|
||||
if isPublic(nodes[i].Endpoint.IP) {
|
||||
if isPublic(nodes[i].Endpoint.IP()) {
|
||||
return i
|
||||
}
|
||||
leaders = append(leaders, i)
|
||||
|
||||
}
|
||||
if isPublic(nodes[i].Endpoint.IP) {
|
||||
if nodes[i].Endpoint.IP() != nil && isPublic(nodes[i].Endpoint.IP()) {
|
||||
public = append(public, i)
|
||||
}
|
||||
}
|
||||
@ -405,10 +438,12 @@ func deduplicatePeerIPs(peers []*Peer) []*Peer {
|
||||
p := Peer{
|
||||
Name: peer.Name,
|
||||
Peer: wireguard.Peer{
|
||||
Endpoint: peer.Endpoint,
|
||||
PersistentKeepalive: peer.PersistentKeepalive,
|
||||
PresharedKey: peer.PresharedKey,
|
||||
PublicKey: peer.PublicKey,
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PersistentKeepaliveInterval: peer.PersistentKeepaliveInterval,
|
||||
PresharedKey: peer.PresharedKey,
|
||||
PublicKey: peer.PublicKey,
|
||||
},
|
||||
Endpoint: peer.Endpoint,
|
||||
},
|
||||
}
|
||||
for _, ip := range peer.AllowedIPs {
|
||||
|
@ -16,30 +16,35 @@ package mesh
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/kylelemons/godebug/pretty"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
|
||||
"github.com/squat/kilo/pkg/wireguard"
|
||||
)
|
||||
|
||||
func allowedIPs(ips ...string) string {
|
||||
return strings.Join(ips, ", ")
|
||||
}
|
||||
|
||||
func mustParseCIDR(s string) (r *net.IPNet) {
|
||||
func mustParseCIDR(s string) (r net.IPNet) {
|
||||
if _, ip, err := net.ParseCIDR(s); err != nil {
|
||||
panic("failed to parse CIDR")
|
||||
} else {
|
||||
r = ip
|
||||
r = *ip
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setup(t *testing.T) (map[string]*Node, map[string]*Peer, []byte, uint32) {
|
||||
key := []byte("private")
|
||||
var (
|
||||
key1 = wgtypes.Key{'k', 'e', 'y', '1'}
|
||||
key2 = wgtypes.Key{'k', 'e', 'y', '2'}
|
||||
key3 = wgtypes.Key{'k', 'e', 'y', '3'}
|
||||
key4 = wgtypes.Key{'k', 'e', 'y', '4'}
|
||||
key5 = wgtypes.Key{'k', 'e', 'y', '5'}
|
||||
)
|
||||
|
||||
func setup(t *testing.T) (map[string]*Node, map[string]*Peer, wgtypes.Key, int) {
|
||||
key := wgtypes.Key{'p', 'r', 'i', 'v'}
|
||||
e1 := &net.IPNet{IP: net.ParseIP("10.1.0.1").To4(), Mask: net.CIDRMask(16, 32)}
|
||||
e2 := &net.IPNet{IP: net.ParseIP("10.1.0.2").To4(), Mask: net.CIDRMask(16, 32)}
|
||||
e3 := &net.IPNet{IP: net.ParseIP("10.1.0.3").To4(), Mask: net.CIDRMask(16, 32)}
|
||||
@ -50,62 +55,63 @@ func setup(t *testing.T) (map[string]*Node, map[string]*Peer, []byte, uint32) {
|
||||
nodes := map[string]*Node{
|
||||
"a": {
|
||||
Name: "a",
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e1.IP}, Port: DefaultKiloPort},
|
||||
Endpoint: wireguard.NewEndpoint(e1.IP, DefaultKiloPort),
|
||||
InternalIP: i1,
|
||||
Location: "1",
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)},
|
||||
Key: []byte("key1"),
|
||||
Key: key1,
|
||||
PersistentKeepalive: 25,
|
||||
},
|
||||
"b": {
|
||||
Name: "b",
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e2.IP}, Port: DefaultKiloPort},
|
||||
Endpoint: wireguard.NewEndpoint(e2.IP, DefaultKiloPort),
|
||||
InternalIP: i1,
|
||||
Location: "2",
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.2.0"), Mask: net.CIDRMask(24, 32)},
|
||||
Key: []byte("key2"),
|
||||
AllowedLocationIPs: []*net.IPNet{i3},
|
||||
Key: key2,
|
||||
AllowedLocationIPs: []net.IPNet{*i3},
|
||||
},
|
||||
"c": {
|
||||
Name: "c",
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e3.IP}, Port: DefaultKiloPort},
|
||||
Endpoint: wireguard.NewEndpoint(e3.IP, DefaultKiloPort),
|
||||
InternalIP: i2,
|
||||
// Same location as node b.
|
||||
Location: "2",
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.3.0"), Mask: net.CIDRMask(24, 32)},
|
||||
Key: []byte("key3"),
|
||||
Key: key3,
|
||||
},
|
||||
"d": {
|
||||
Name: "d",
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e4.IP}, Port: DefaultKiloPort},
|
||||
Endpoint: wireguard.NewEndpoint(e4.IP, DefaultKiloPort),
|
||||
// Same location as node a, but without private IP
|
||||
Location: "1",
|
||||
Subnet: &net.IPNet{IP: net.ParseIP("10.2.4.0"), Mask: net.CIDRMask(24, 32)},
|
||||
Key: []byte("key4"),
|
||||
Key: key4,
|
||||
},
|
||||
}
|
||||
peers := map[string]*Peer{
|
||||
"a": {
|
||||
Name: "a",
|
||||
Peer: wireguard.Peer{
|
||||
AllowedIPs: []*net.IPNet{
|
||||
{IP: net.ParseIP("10.5.0.1"), Mask: net.CIDRMask(24, 32)},
|
||||
{IP: net.ParseIP("10.5.0.2"), Mask: net.CIDRMask(24, 32)},
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
AllowedIPs: []net.IPNet{
|
||||
{IP: net.ParseIP("10.5.0.1"), Mask: net.CIDRMask(24, 32)},
|
||||
{IP: net.ParseIP("10.5.0.2"), Mask: net.CIDRMask(24, 32)},
|
||||
},
|
||||
PublicKey: key4,
|
||||
},
|
||||
PublicKey: []byte("key4"),
|
||||
},
|
||||
},
|
||||
"b": {
|
||||
Name: "b",
|
||||
Peer: wireguard.Peer{
|
||||
AllowedIPs: []*net.IPNet{
|
||||
{IP: net.ParseIP("10.5.0.3"), Mask: net.CIDRMask(24, 32)},
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
AllowedIPs: []net.IPNet{
|
||||
{IP: net.ParseIP("10.5.0.3"), Mask: net.CIDRMask(24, 32)},
|
||||
},
|
||||
PublicKey: key5,
|
||||
},
|
||||
Endpoint: &wireguard.Endpoint{
|
||||
DNSOrIP: wireguard.DNSOrIP{IP: net.ParseIP("192.168.0.1")},
|
||||
Port: DefaultKiloPort,
|
||||
},
|
||||
PublicKey: []byte("key5"),
|
||||
Endpoint: wireguard.NewEndpoint(net.ParseIP("192.168.0.1"), DefaultKiloPort),
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -138,7 +144,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardCIDR: &net.IPNet{IP: w1, Mask: net.CIDRMask(16, 32)},
|
||||
segments: []*segment{
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["a"].Subnet, *nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["a"].Endpoint,
|
||||
key: nodes["a"].Key,
|
||||
persistentKeepalive: nodes["a"].PersistentKeepalive,
|
||||
@ -149,7 +155,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardIP: w1,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["b"].Subnet, *nodes["b"].InternalIP, *nodes["c"].Subnet, *nodes["c"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["b"].Endpoint,
|
||||
key: nodes["b"].Key,
|
||||
persistentKeepalive: nodes["b"].PersistentKeepalive,
|
||||
@ -161,7 +167,7 @@ func TestNewTopology(t *testing.T) {
|
||||
allowedLocationIPs: nodes["b"].AllowedLocationIPs,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["d"].Subnet, {IP: w3, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["d"].Subnet, {IP: w3, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["d"].Endpoint,
|
||||
key: nodes["d"].Key,
|
||||
persistentKeepalive: nodes["d"].PersistentKeepalive,
|
||||
@ -189,7 +195,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardCIDR: &net.IPNet{IP: w2, Mask: net.CIDRMask(16, 32)},
|
||||
segments: []*segment{
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["a"].Subnet, *nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["a"].Endpoint,
|
||||
key: nodes["a"].Key,
|
||||
persistentKeepalive: nodes["a"].PersistentKeepalive,
|
||||
@ -200,7 +206,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardIP: w1,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["b"].Subnet, *nodes["b"].InternalIP, *nodes["c"].Subnet, *nodes["c"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["b"].Endpoint,
|
||||
key: nodes["b"].Key,
|
||||
persistentKeepalive: nodes["b"].PersistentKeepalive,
|
||||
@ -212,7 +218,7 @@ func TestNewTopology(t *testing.T) {
|
||||
allowedLocationIPs: nodes["b"].AllowedLocationIPs,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["d"].Subnet, {IP: w3, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["d"].Subnet, {IP: w3, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["d"].Endpoint,
|
||||
key: nodes["d"].Key,
|
||||
persistentKeepalive: nodes["d"].PersistentKeepalive,
|
||||
@ -240,7 +246,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardCIDR: DefaultKiloSubnet,
|
||||
segments: []*segment{
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["a"].Subnet, *nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["a"].Endpoint,
|
||||
key: nodes["a"].Key,
|
||||
persistentKeepalive: nodes["a"].PersistentKeepalive,
|
||||
@ -251,7 +257,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardIP: w1,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["b"].Subnet, *nodes["b"].InternalIP, *nodes["c"].Subnet, *nodes["c"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["b"].Endpoint,
|
||||
key: nodes["b"].Key,
|
||||
persistentKeepalive: nodes["b"].PersistentKeepalive,
|
||||
@ -263,7 +269,7 @@ func TestNewTopology(t *testing.T) {
|
||||
allowedLocationIPs: nodes["b"].AllowedLocationIPs,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["d"].Subnet, {IP: w3, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["d"].Subnet, {IP: w3, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["d"].Endpoint,
|
||||
key: nodes["d"].Key,
|
||||
persistentKeepalive: nodes["d"].PersistentKeepalive,
|
||||
@ -291,7 +297,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardCIDR: &net.IPNet{IP: w1, Mask: net.CIDRMask(16, 32)},
|
||||
segments: []*segment{
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["a"].Subnet, *nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["a"].Endpoint,
|
||||
key: nodes["a"].Key,
|
||||
persistentKeepalive: nodes["a"].PersistentKeepalive,
|
||||
@ -302,7 +308,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardIP: w1,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["b"].Subnet, *nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["b"].Endpoint,
|
||||
key: nodes["b"].Key,
|
||||
persistentKeepalive: nodes["b"].PersistentKeepalive,
|
||||
@ -314,7 +320,7 @@ func TestNewTopology(t *testing.T) {
|
||||
allowedLocationIPs: nodes["b"].AllowedLocationIPs,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w3, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["c"].Subnet, *nodes["c"].InternalIP, {IP: w3, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["c"].Endpoint,
|
||||
key: nodes["c"].Key,
|
||||
persistentKeepalive: nodes["c"].PersistentKeepalive,
|
||||
@ -325,7 +331,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardIP: w3,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["d"].Subnet, {IP: w4, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["d"].Subnet, {IP: w4, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["d"].Endpoint,
|
||||
key: nodes["d"].Key,
|
||||
persistentKeepalive: nodes["d"].PersistentKeepalive,
|
||||
@ -353,7 +359,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardCIDR: &net.IPNet{IP: w2, Mask: net.CIDRMask(16, 32)},
|
||||
segments: []*segment{
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["a"].Subnet, *nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["a"].Endpoint,
|
||||
key: nodes["a"].Key,
|
||||
persistentKeepalive: nodes["a"].PersistentKeepalive,
|
||||
@ -364,7 +370,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardIP: w1,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["b"].Subnet, *nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["b"].Endpoint,
|
||||
key: nodes["b"].Key,
|
||||
persistentKeepalive: nodes["b"].PersistentKeepalive,
|
||||
@ -376,7 +382,7 @@ func TestNewTopology(t *testing.T) {
|
||||
allowedLocationIPs: nodes["b"].AllowedLocationIPs,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w3, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["c"].Subnet, *nodes["c"].InternalIP, {IP: w3, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["c"].Endpoint,
|
||||
key: nodes["c"].Key,
|
||||
persistentKeepalive: nodes["c"].PersistentKeepalive,
|
||||
@ -387,7 +393,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardIP: w3,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["d"].Subnet, {IP: w4, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["d"].Subnet, {IP: w4, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["d"].Endpoint,
|
||||
key: nodes["d"].Key,
|
||||
persistentKeepalive: nodes["d"].PersistentKeepalive,
|
||||
@ -415,7 +421,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardCIDR: &net.IPNet{IP: w3, Mask: net.CIDRMask(16, 32)},
|
||||
segments: []*segment{
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["a"].Subnet, *nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["a"].Endpoint,
|
||||
key: nodes["a"].Key,
|
||||
persistentKeepalive: nodes["a"].PersistentKeepalive,
|
||||
@ -426,7 +432,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardIP: w1,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["b"].Subnet, *nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["b"].Endpoint,
|
||||
key: nodes["b"].Key,
|
||||
persistentKeepalive: nodes["b"].PersistentKeepalive,
|
||||
@ -438,7 +444,7 @@ func TestNewTopology(t *testing.T) {
|
||||
allowedLocationIPs: nodes["b"].AllowedLocationIPs,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w3, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["c"].Subnet, *nodes["c"].InternalIP, {IP: w3, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["c"].Endpoint,
|
||||
key: nodes["c"].Key,
|
||||
persistentKeepalive: nodes["c"].PersistentKeepalive,
|
||||
@ -449,7 +455,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardIP: w3,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["d"].Subnet, {IP: w4, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["d"].Subnet, {IP: w4, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["d"].Endpoint,
|
||||
key: nodes["d"].Key,
|
||||
persistentKeepalive: nodes["d"].PersistentKeepalive,
|
||||
@ -477,7 +483,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardCIDR: &net.IPNet{IP: w4, Mask: net.CIDRMask(16, 32)},
|
||||
segments: []*segment{
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["a"].Subnet, *nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["a"].Endpoint,
|
||||
key: nodes["a"].Key,
|
||||
persistentKeepalive: nodes["a"].PersistentKeepalive,
|
||||
@ -488,7 +494,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardIP: w1,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["b"].Subnet, *nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["b"].Endpoint,
|
||||
key: nodes["b"].Key,
|
||||
persistentKeepalive: nodes["b"].PersistentKeepalive,
|
||||
@ -500,7 +506,7 @@ func TestNewTopology(t *testing.T) {
|
||||
allowedLocationIPs: nodes["b"].AllowedLocationIPs,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w3, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["c"].Subnet, *nodes["c"].InternalIP, {IP: w3, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["c"].Endpoint,
|
||||
key: nodes["c"].Key,
|
||||
persistentKeepalive: nodes["c"].PersistentKeepalive,
|
||||
@ -511,7 +517,7 @@ func TestNewTopology(t *testing.T) {
|
||||
wireGuardIP: w3,
|
||||
},
|
||||
{
|
||||
allowedIPs: []*net.IPNet{nodes["d"].Subnet, {IP: w4, Mask: net.CIDRMask(32, 32)}},
|
||||
allowedIPs: []net.IPNet{*nodes["d"].Subnet, {IP: w4, Mask: net.CIDRMask(32, 32)}},
|
||||
endpoint: nodes["d"].Endpoint,
|
||||
key: nodes["d"].Key,
|
||||
persistentKeepalive: nodes["d"].PersistentKeepalive,
|
||||
@ -539,7 +545,7 @@ func TestNewTopology(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func mustTopo(t *testing.T, nodes map[string]*Node, peers map[string]*Peer, granularity Granularity, hostname string, port uint32, key []byte, subnet *net.IPNet, persistentKeepalive int) *Topology {
|
||||
func mustTopo(t *testing.T, nodes map[string]*Node, peers map[string]*Peer, granularity Granularity, hostname string, port int, key wgtypes.Key, subnet *net.IPNet, persistentKeepalive time.Duration) *Topology {
|
||||
topo, err := NewTopology(nodes, peers, granularity, hostname, port, key, subnet, persistentKeepalive, nil)
|
||||
if err != nil {
|
||||
t.Errorf("failed to generate Topology: %v", err)
|
||||
@ -547,211 +553,6 @@ func mustTopo(t *testing.T, nodes map[string]*Node, peers map[string]*Peer, gran
|
||||
return topo
|
||||
}
|
||||
|
||||
func TestConf(t *testing.T) {
|
||||
nodes, peers, key, port := setup(t)
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
topology *Topology
|
||||
result string
|
||||
}{
|
||||
{
|
||||
name: "logical from a",
|
||||
topology: mustTopo(t, nodes, peers, LogicalGranularity, nodes["a"].Name, port, key, DefaultKiloSubnet, nodes["a"].PersistentKeepalive),
|
||||
result: `[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
|
||||
[Peer]
|
||||
PublicKey = key2
|
||||
Endpoint = 10.1.0.2:51820
|
||||
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32, 192.168.178.3/32
|
||||
PersistentKeepalive = 25
|
||||
|
||||
[Peer]
|
||||
PublicKey = key4
|
||||
Endpoint = 10.1.0.4:51820
|
||||
AllowedIPs = 10.2.4.0/24, 10.4.0.3/32
|
||||
PersistentKeepalive = 25
|
||||
|
||||
[Peer]
|
||||
PublicKey = key4
|
||||
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
|
||||
PersistentKeepalive = 25
|
||||
|
||||
[Peer]
|
||||
PublicKey = key5
|
||||
Endpoint = 192.168.0.1:51820
|
||||
AllowedIPs = 10.5.0.3/24
|
||||
PersistentKeepalive = 25
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "logical from b",
|
||||
topology: mustTopo(t, nodes, peers, LogicalGranularity, nodes["b"].Name, port, key, DefaultKiloSubnet, nodes["b"].PersistentKeepalive),
|
||||
result: `[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
|
||||
[Peer]
|
||||
PublicKey = key1
|
||||
Endpoint = 10.1.0.1:51820
|
||||
AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32
|
||||
|
||||
[Peer]
|
||||
PublicKey = key4
|
||||
Endpoint = 10.1.0.4:51820
|
||||
AllowedIPs = 10.2.4.0/24, 10.4.0.3/32
|
||||
|
||||
[Peer]
|
||||
PublicKey = key4
|
||||
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
|
||||
|
||||
[Peer]
|
||||
PublicKey = key5
|
||||
Endpoint = 192.168.0.1:51820
|
||||
AllowedIPs = 10.5.0.3/24
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "logical from c",
|
||||
topology: mustTopo(t, nodes, peers, LogicalGranularity, nodes["c"].Name, port, key, DefaultKiloSubnet, nodes["c"].PersistentKeepalive),
|
||||
result: `[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
|
||||
[Peer]
|
||||
PublicKey = key1
|
||||
Endpoint = 10.1.0.1:51820
|
||||
AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32
|
||||
|
||||
[Peer]
|
||||
PublicKey = key4
|
||||
Endpoint = 10.1.0.4:51820
|
||||
AllowedIPs = 10.2.4.0/24, 10.4.0.3/32
|
||||
|
||||
[Peer]
|
||||
PublicKey = key4
|
||||
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
|
||||
|
||||
[Peer]
|
||||
PublicKey = key5
|
||||
Endpoint = 192.168.0.1:51820
|
||||
AllowedIPs = 10.5.0.3/24
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "full from a",
|
||||
topology: mustTopo(t, nodes, peers, FullGranularity, nodes["a"].Name, port, key, DefaultKiloSubnet, nodes["a"].PersistentKeepalive),
|
||||
result: `[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
|
||||
[Peer]
|
||||
PublicKey = key2
|
||||
Endpoint = 10.1.0.2:51820
|
||||
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.4.0.2/32, 192.168.178.3/32
|
||||
PersistentKeepalive = 25
|
||||
|
||||
[Peer]
|
||||
PublicKey = key3
|
||||
Endpoint = 10.1.0.3:51820
|
||||
AllowedIPs = 10.2.3.0/24, 192.168.0.2/32, 10.4.0.3/32
|
||||
PersistentKeepalive = 25
|
||||
|
||||
[Peer]
|
||||
PublicKey = key4
|
||||
Endpoint = 10.1.0.4:51820
|
||||
AllowedIPs = 10.2.4.0/24, 10.4.0.4/32
|
||||
PersistentKeepalive = 25
|
||||
|
||||
[Peer]
|
||||
PublicKey = key4
|
||||
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
|
||||
PersistentKeepalive = 25
|
||||
|
||||
[Peer]
|
||||
PublicKey = key5
|
||||
Endpoint = 192.168.0.1:51820
|
||||
AllowedIPs = 10.5.0.3/24
|
||||
PersistentKeepalive = 25
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "full from b",
|
||||
topology: mustTopo(t, nodes, peers, FullGranularity, nodes["b"].Name, port, key, DefaultKiloSubnet, nodes["b"].PersistentKeepalive),
|
||||
result: `[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
|
||||
[Peer]
|
||||
PublicKey = key1
|
||||
Endpoint = 10.1.0.1:51820
|
||||
AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32
|
||||
|
||||
[Peer]
|
||||
PublicKey = key3
|
||||
Endpoint = 10.1.0.3:51820
|
||||
AllowedIPs = 10.2.3.0/24, 192.168.0.2/32, 10.4.0.3/32
|
||||
|
||||
[Peer]
|
||||
PublicKey = key4
|
||||
Endpoint = 10.1.0.4:51820
|
||||
AllowedIPs = 10.2.4.0/24, 10.4.0.4/32
|
||||
|
||||
[Peer]
|
||||
PublicKey = key4
|
||||
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
|
||||
|
||||
[Peer]
|
||||
PublicKey = key5
|
||||
Endpoint = 192.168.0.1:51820
|
||||
AllowedIPs = 10.5.0.3/24
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "full from c",
|
||||
topology: mustTopo(t, nodes, peers, FullGranularity, nodes["c"].Name, port, key, DefaultKiloSubnet, nodes["c"].PersistentKeepalive),
|
||||
result: `[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
|
||||
[Peer]
|
||||
PublicKey = key1
|
||||
Endpoint = 10.1.0.1:51820
|
||||
AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32
|
||||
|
||||
[Peer]
|
||||
PublicKey = key2
|
||||
Endpoint = 10.1.0.2:51820
|
||||
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.4.0.2/32, 192.168.178.3/32
|
||||
|
||||
[Peer]
|
||||
PublicKey = key4
|
||||
Endpoint = 10.1.0.4:51820
|
||||
AllowedIPs = 10.2.4.0/24, 10.4.0.4/32
|
||||
|
||||
[Peer]
|
||||
PublicKey = key4
|
||||
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
|
||||
|
||||
[Peer]
|
||||
PublicKey = key5
|
||||
Endpoint = 192.168.0.1:51820
|
||||
AllowedIPs = 10.5.0.3/24
|
||||
`,
|
||||
},
|
||||
} {
|
||||
conf := tc.topology.Conf()
|
||||
if !conf.Equal(wireguard.Parse([]byte(tc.result))) {
|
||||
buf, err := conf.Bytes()
|
||||
if err != nil {
|
||||
t.Errorf("test case %q: failed to render conf: %v", tc.name, err)
|
||||
}
|
||||
t.Errorf("test case %q: expected %s got %s", tc.name, tc.result, string(buf))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindLeader(t *testing.T) {
|
||||
ip, e1, err := net.ParseCIDR("10.0.0.1/32")
|
||||
if err != nil {
|
||||
@ -767,24 +568,24 @@ func TestFindLeader(t *testing.T) {
|
||||
nodes := []*Node{
|
||||
{
|
||||
Name: "a",
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e1.IP}, Port: DefaultKiloPort},
|
||||
Endpoint: wireguard.NewEndpoint(e1.IP, DefaultKiloPort),
|
||||
},
|
||||
{
|
||||
Name: "b",
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e2.IP}, Port: DefaultKiloPort},
|
||||
Endpoint: wireguard.NewEndpoint(e2.IP, DefaultKiloPort),
|
||||
},
|
||||
{
|
||||
Name: "c",
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e2.IP}, Port: DefaultKiloPort},
|
||||
Endpoint: wireguard.NewEndpoint(e2.IP, DefaultKiloPort),
|
||||
},
|
||||
{
|
||||
Name: "d",
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e1.IP}, Port: DefaultKiloPort},
|
||||
Endpoint: wireguard.NewEndpoint(e1.IP, DefaultKiloPort),
|
||||
Leader: true,
|
||||
},
|
||||
{
|
||||
Name: "2",
|
||||
Endpoint: &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: e2.IP}, Port: DefaultKiloPort},
|
||||
Endpoint: wireguard.NewEndpoint(e2.IP, DefaultKiloPort),
|
||||
Leader: true,
|
||||
},
|
||||
}
|
||||
@ -840,31 +641,38 @@ func TestDeduplicatePeerIPs(t *testing.T) {
|
||||
p1 := &Peer{
|
||||
Name: "1",
|
||||
Peer: wireguard.Peer{
|
||||
PublicKey: []byte("key1"),
|
||||
AllowedIPs: []*net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
|
||||
{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)},
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
|
||||
PublicKey: key1,
|
||||
AllowedIPs: []net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
|
||||
{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
p2 := &Peer{
|
||||
Name: "2",
|
||||
Peer: wireguard.Peer{
|
||||
PublicKey: []byte("key2"),
|
||||
AllowedIPs: []*net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
|
||||
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PublicKey: key2,
|
||||
AllowedIPs: []net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
|
||||
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
p3 := &Peer{
|
||||
Name: "3",
|
||||
Peer: wireguard.Peer{
|
||||
PublicKey: []byte("key3"),
|
||||
AllowedIPs: []*net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)},
|
||||
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
|
||||
{IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PublicKey: key3,
|
||||
AllowedIPs: []net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)},
|
||||
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
|
||||
{IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -872,10 +680,12 @@ func TestDeduplicatePeerIPs(t *testing.T) {
|
||||
p4 := &Peer{
|
||||
Name: "4",
|
||||
Peer: wireguard.Peer{
|
||||
PublicKey: []byte("key4"),
|
||||
AllowedIPs: []*net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
|
||||
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PublicKey: key4,
|
||||
AllowedIPs: []net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
|
||||
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -898,9 +708,11 @@ func TestDeduplicatePeerIPs(t *testing.T) {
|
||||
{
|
||||
Name: "2",
|
||||
Peer: wireguard.Peer{
|
||||
PublicKey: []byte("key2"),
|
||||
AllowedIPs: []*net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PublicKey: key2,
|
||||
AllowedIPs: []net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -914,9 +726,11 @@ func TestDeduplicatePeerIPs(t *testing.T) {
|
||||
{
|
||||
Name: "1",
|
||||
Peer: wireguard.Peer{
|
||||
PublicKey: []byte("key1"),
|
||||
AllowedIPs: []*net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)},
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PublicKey: key1,
|
||||
AllowedIPs: []net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -930,19 +744,25 @@ func TestDeduplicatePeerIPs(t *testing.T) {
|
||||
{
|
||||
Name: "2",
|
||||
Peer: wireguard.Peer{
|
||||
PublicKey: []byte("key2"),
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PublicKey: key2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "1",
|
||||
Peer: wireguard.Peer{
|
||||
PublicKey: []byte("key1"),
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PublicKey: key1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "4",
|
||||
Peer: wireguard.Peer{
|
||||
PublicKey: []byte("key4"),
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PublicKey: key4,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -954,19 +774,23 @@ func TestDeduplicatePeerIPs(t *testing.T) {
|
||||
{
|
||||
Name: "4",
|
||||
Peer: wireguard.Peer{
|
||||
PublicKey: []byte("key4"),
|
||||
AllowedIPs: []*net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PublicKey: key4,
|
||||
AllowedIPs: []net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.3"), Mask: net.CIDRMask(24, 32)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "1",
|
||||
Peer: wireguard.Peer{
|
||||
PublicKey: []byte("key1"),
|
||||
AllowedIPs: []*net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
|
||||
{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)},
|
||||
PeerConfig: wgtypes.PeerConfig{
|
||||
PublicKey: key1,
|
||||
AllowedIPs: []net.IPNet{
|
||||
{IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)},
|
||||
{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -985,12 +809,12 @@ func TestFilterAllowedIPs(t *testing.T) {
|
||||
topo := mustTopo(t, nodes, peers, LogicalGranularity, nodes["a"].Name, port, key, DefaultKiloSubnet, nodes["a"].PersistentKeepalive)
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
allowedLocationIPs map[int][]*net.IPNet
|
||||
result map[int][]*net.IPNet
|
||||
allowedLocationIPs map[int][]net.IPNet
|
||||
result map[int][]net.IPNet
|
||||
}{
|
||||
{
|
||||
name: "nothing to filter",
|
||||
allowedLocationIPs: map[int][]*net.IPNet{
|
||||
allowedLocationIPs: map[int][]net.IPNet{
|
||||
0: {
|
||||
mustParseCIDR("192.168.178.4/32"),
|
||||
},
|
||||
@ -1002,7 +826,7 @@ func TestFilterAllowedIPs(t *testing.T) {
|
||||
mustParseCIDR("192.168.178.7/32"),
|
||||
},
|
||||
},
|
||||
result: map[int][]*net.IPNet{
|
||||
result: map[int][]net.IPNet{
|
||||
0: {
|
||||
mustParseCIDR("192.168.178.4/32"),
|
||||
},
|
||||
@ -1017,7 +841,7 @@ func TestFilterAllowedIPs(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "intersections between segments",
|
||||
allowedLocationIPs: map[int][]*net.IPNet{
|
||||
allowedLocationIPs: map[int][]net.IPNet{
|
||||
0: {
|
||||
mustParseCIDR("192.168.178.4/32"),
|
||||
mustParseCIDR("192.168.178.8/32"),
|
||||
@ -1031,7 +855,7 @@ func TestFilterAllowedIPs(t *testing.T) {
|
||||
mustParseCIDR("192.168.178.4/32"),
|
||||
},
|
||||
},
|
||||
result: map[int][]*net.IPNet{
|
||||
result: map[int][]net.IPNet{
|
||||
0: {
|
||||
mustParseCIDR("192.168.178.8/32"),
|
||||
},
|
||||
@ -1047,7 +871,7 @@ func TestFilterAllowedIPs(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "intersections with wireGuardCIDR",
|
||||
allowedLocationIPs: map[int][]*net.IPNet{
|
||||
allowedLocationIPs: map[int][]net.IPNet{
|
||||
0: {
|
||||
mustParseCIDR("10.4.0.1/32"),
|
||||
mustParseCIDR("192.168.178.8/32"),
|
||||
@ -1060,7 +884,7 @@ func TestFilterAllowedIPs(t *testing.T) {
|
||||
mustParseCIDR("192.168.178.7/32"),
|
||||
},
|
||||
},
|
||||
result: map[int][]*net.IPNet{
|
||||
result: map[int][]net.IPNet{
|
||||
0: {
|
||||
mustParseCIDR("192.168.178.8/32"),
|
||||
},
|
||||
@ -1075,7 +899,7 @@ func TestFilterAllowedIPs(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "intersections with more than one allowedLocationIPs",
|
||||
allowedLocationIPs: map[int][]*net.IPNet{
|
||||
allowedLocationIPs: map[int][]net.IPNet{
|
||||
0: {
|
||||
mustParseCIDR("192.168.178.8/32"),
|
||||
},
|
||||
@ -1086,7 +910,7 @@ func TestFilterAllowedIPs(t *testing.T) {
|
||||
mustParseCIDR("192.168.178.7/24"),
|
||||
},
|
||||
},
|
||||
result: map[int][]*net.IPNet{
|
||||
result: map[int][]net.IPNet{
|
||||
0: {},
|
||||
1: {},
|
||||
2: {
|
||||
|
@ -15,16 +15,15 @@
|
||||
package wireguard
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
)
|
||||
|
||||
@ -32,10 +31,6 @@ type section string
|
||||
type key string
|
||||
|
||||
const (
|
||||
separator = "="
|
||||
dumpSeparator = "\t"
|
||||
dumpNone = "(none)"
|
||||
dumpOff = "off"
|
||||
interfaceSection section = "Interface"
|
||||
peerSection section = "Peer"
|
||||
listenPortKey key = "ListenPort"
|
||||
@ -47,56 +42,209 @@ const (
|
||||
publicKeyKey key = "PublicKey"
|
||||
)
|
||||
|
||||
type dumpInterfaceIndex int
|
||||
|
||||
const (
|
||||
dumpInterfacePrivateKeyIndex = iota
|
||||
dumpInterfacePublicKeyIndex
|
||||
dumpInterfaceListenPortIndex
|
||||
dumpInterfaceFWMarkIndex
|
||||
dumpInterfaceLen
|
||||
)
|
||||
|
||||
type dumpPeerIndex int
|
||||
|
||||
const (
|
||||
dumpPeerPublicKeyIndex = iota
|
||||
dumpPeerPresharedKeyIndex
|
||||
dumpPeerEndpointIndex
|
||||
dumpPeerAllowedIPsIndex
|
||||
dumpPeerLatestHandshakeIndex
|
||||
dumpPeerTransferRXIndex
|
||||
dumpPeerTransferTXIndex
|
||||
dumpPeerPersistentKeepaliveIndex
|
||||
dumpPeerLen
|
||||
)
|
||||
|
||||
// Conf represents a WireGuard configuration file.
|
||||
type Conf struct {
|
||||
Interface *Interface
|
||||
Peers []*Peer
|
||||
wgtypes.Config
|
||||
// The Peers field is shadowed because every Peer needs the Endpoint field that contains a DNS endpoint.
|
||||
Peers []Peer
|
||||
}
|
||||
|
||||
// Interface represents the `interface` section of a WireGuard configuration.
|
||||
type Interface struct {
|
||||
ListenPort uint32
|
||||
PrivateKey []byte
|
||||
// WGConfig returns a wgytpes.Config from a Conf.
|
||||
func (c *Conf) WGConfig() wgtypes.Config {
|
||||
if c == nil {
|
||||
// The empty Config will do nothing, when applied.
|
||||
return wgtypes.Config{}
|
||||
}
|
||||
r := c.Config
|
||||
wgPs := make([]wgtypes.PeerConfig, len(c.Peers))
|
||||
for i, p := range c.Peers {
|
||||
wgPs[i] = p.PeerConfig
|
||||
if p.Endpoint.Resolved() {
|
||||
// We can ingore the error because we already checked if the Endpoint was resolved in the above line.
|
||||
wgPs[i].Endpoint, _ = p.Endpoint.UDPAddr(false)
|
||||
}
|
||||
wgPs[i].ReplaceAllowedIPs = true
|
||||
}
|
||||
r.Peers = wgPs
|
||||
r.ReplacePeers = true
|
||||
return r
|
||||
}
|
||||
|
||||
// Endpoint represents a WireGuard endpoint.
|
||||
type Endpoint struct {
|
||||
udpAddr *net.UDPAddr
|
||||
addr string
|
||||
}
|
||||
|
||||
// ParseEndpoint returns an Endpoint from a string.
|
||||
// The input should look like "10.0.0.0:100", "[ff10::10]:100"
|
||||
// or "example.com:100".
|
||||
func ParseEndpoint(endpoint string) *Endpoint {
|
||||
if len(endpoint) == 0 {
|
||||
return nil
|
||||
}
|
||||
hostRaw, portRaw, err := net.SplitHostPort(endpoint)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
port, err := strconv.ParseUint(portRaw, 10, 32)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if len(validation.IsValidPortNum(int(port))) != 0 {
|
||||
return nil
|
||||
}
|
||||
ip := net.ParseIP(hostRaw)
|
||||
if ip == nil {
|
||||
if len(validation.IsDNS1123Subdomain(hostRaw)) == 0 {
|
||||
return &Endpoint{
|
||||
addr: endpoint,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// ResolveUDPAddr will not resolve the endpoint as long as a valid IP and port is given.
|
||||
// This should be the case here.
|
||||
u, err := net.ResolveUDPAddr("udp", endpoint)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
u.IP = cutIP(u.IP)
|
||||
return &Endpoint{
|
||||
udpAddr: u,
|
||||
}
|
||||
}
|
||||
|
||||
// NewEndpointFromUDPAddr returns an Endpoint from a net.UDPAddr.
|
||||
func NewEndpointFromUDPAddr(u *net.UDPAddr) *Endpoint {
|
||||
if u != nil {
|
||||
u.IP = cutIP(u.IP)
|
||||
}
|
||||
return &Endpoint{
|
||||
udpAddr: u,
|
||||
}
|
||||
}
|
||||
|
||||
// NewEndpoint returns an Endpoint from a net.IP and port.
|
||||
func NewEndpoint(ip net.IP, port int) *Endpoint {
|
||||
return &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
IP: cutIP(ip),
|
||||
Port: port,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Ready return true, if the Enpoint is ready.
|
||||
// Ready means that an IP or DN and port exists.
|
||||
func (e *Endpoint) Ready() bool {
|
||||
if e == nil {
|
||||
return false
|
||||
}
|
||||
return (e.udpAddr != nil && e.udpAddr.IP != nil && e.udpAddr.Port > 0) || len(e.addr) > 0
|
||||
}
|
||||
|
||||
// Port returns the port of the Endpoint.
|
||||
func (e *Endpoint) Port() int {
|
||||
if !e.Ready() {
|
||||
return 0
|
||||
}
|
||||
if e.udpAddr != nil {
|
||||
return e.udpAddr.Port
|
||||
}
|
||||
// We can ignore the errors here bacause the returned port will be "".
|
||||
// This will result to Port 0 after the conversion to and int.
|
||||
_, p, _ := net.SplitHostPort(e.addr)
|
||||
port, _ := strconv.ParseUint(p, 10, 32)
|
||||
return int(port)
|
||||
}
|
||||
|
||||
// HasDNS returns true if the endpoint has a DN.
|
||||
func (e *Endpoint) HasDNS() bool {
|
||||
return e != nil && e.addr != ""
|
||||
}
|
||||
|
||||
// DNS returns the DN of the Endpoint.
|
||||
func (e *Endpoint) DNS() string {
|
||||
if e == nil {
|
||||
return ""
|
||||
}
|
||||
_, s, _ := net.SplitHostPort(e.addr)
|
||||
return s
|
||||
}
|
||||
|
||||
// Resolved returns true, if the DN of the Endpoint was resolved
|
||||
// or if the Endpoint has a resolved endpoint.
|
||||
func (e *Endpoint) Resolved() bool {
|
||||
return e != nil && e.udpAddr != nil
|
||||
}
|
||||
|
||||
// UDPAddr returns the UDPAddr of the Endpoint. If resolve is false,
|
||||
// UDPAddr() will not try to resolve a DN name, if the Endpoint is not yet resolved.
|
||||
func (e *Endpoint) UDPAddr(resolve bool) (*net.UDPAddr, error) {
|
||||
if !e.Ready() {
|
||||
return nil, errors.New("endpoint is not ready")
|
||||
}
|
||||
if e.udpAddr != nil {
|
||||
// Make a copy of the UDPAddr to protect it from modification outside this package.
|
||||
h := *e.udpAddr
|
||||
return &h, nil
|
||||
}
|
||||
if !resolve {
|
||||
return nil, errors.New("endpoint is not resolved")
|
||||
}
|
||||
var err error
|
||||
if e.udpAddr, err = net.ResolveUDPAddr("udp", e.addr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Make a copy of the UDPAddr to protect it from modification outside this package.
|
||||
h := *e.udpAddr
|
||||
return &h, nil
|
||||
}
|
||||
|
||||
// IP returns the IP address of the Enpoint or nil.
|
||||
func (e *Endpoint) IP() net.IP {
|
||||
if !e.Resolved() {
|
||||
return nil
|
||||
}
|
||||
return e.udpAddr.IP
|
||||
}
|
||||
|
||||
// String will return the endpoint as a string.
|
||||
// If a DN exists, it will take prcedence over the resolved endpoint.
|
||||
func (e *Endpoint) String() string {
|
||||
return e.StringOpt(true)
|
||||
}
|
||||
|
||||
// StringOpt will return the string of the Endpoint.
|
||||
// If dnsFirst is false, the resolved Endpoint will
|
||||
// take precedence over the DN.
|
||||
func (e *Endpoint) StringOpt(dnsFirst bool) string {
|
||||
if e == nil {
|
||||
return ""
|
||||
}
|
||||
if e.udpAddr != nil && (!dnsFirst || e.addr == "") {
|
||||
return e.udpAddr.String()
|
||||
}
|
||||
return e.addr
|
||||
}
|
||||
|
||||
// Equal will return true, if the Enpoints are equal.
|
||||
// If dnsFirst is false, the DN will only be compared if
|
||||
// the IPs are nil.
|
||||
func (e *Endpoint) Equal(b *Endpoint, dnsFirst bool) bool {
|
||||
return e.StringOpt(dnsFirst) == b.StringOpt(dnsFirst)
|
||||
}
|
||||
|
||||
// Peer represents a `peer` section of a WireGuard configuration.
|
||||
type Peer struct {
|
||||
AllowedIPs []*net.IPNet
|
||||
Endpoint *Endpoint
|
||||
PersistentKeepalive int
|
||||
PresharedKey []byte
|
||||
PublicKey []byte
|
||||
// The following fields are part of the runtime information, not the configuration.
|
||||
LatestHandshake time.Time
|
||||
wgtypes.PeerConfig
|
||||
Endpoint *Endpoint
|
||||
}
|
||||
|
||||
// DeduplicateIPs eliminates duplicate allowed IPs.
|
||||
func (p *Peer) DeduplicateIPs() {
|
||||
var ips []*net.IPNet
|
||||
var ips []net.IPNet
|
||||
seen := make(map[string]struct{})
|
||||
for _, ip := range p.AllowedIPs {
|
||||
if _, ok := seen[ip.String()]; ok {
|
||||
@ -108,181 +256,27 @@ func (p *Peer) DeduplicateIPs() {
|
||||
p.AllowedIPs = ips
|
||||
}
|
||||
|
||||
// Endpoint represents an `endpoint` key of a `peer` section.
|
||||
type Endpoint struct {
|
||||
DNSOrIP
|
||||
Port uint32
|
||||
}
|
||||
|
||||
// String prints the string representation of the endpoint.
|
||||
func (e *Endpoint) String() string {
|
||||
if e == nil {
|
||||
return ""
|
||||
}
|
||||
dnsOrIP := e.DNSOrIP.String()
|
||||
if e.IP != nil && len(e.IP) == net.IPv6len {
|
||||
dnsOrIP = "[" + dnsOrIP + "]"
|
||||
}
|
||||
return dnsOrIP + ":" + strconv.FormatUint(uint64(e.Port), 10)
|
||||
}
|
||||
|
||||
// Equal compares two endpoints.
|
||||
func (e *Endpoint) Equal(b *Endpoint, DNSFirst bool) bool {
|
||||
if (e == nil) != (b == nil) {
|
||||
return false
|
||||
}
|
||||
if e != nil {
|
||||
if e.Port != b.Port {
|
||||
return false
|
||||
}
|
||||
if DNSFirst {
|
||||
// Check the DNS name first if it was resolved.
|
||||
if e.DNS != b.DNS {
|
||||
return false
|
||||
}
|
||||
if e.DNS == "" && !e.IP.Equal(b.IP) {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
// IPs take priority, so check them first.
|
||||
if !e.IP.Equal(b.IP) {
|
||||
return false
|
||||
}
|
||||
// Only check the DNS name if the IP is empty.
|
||||
if e.IP == nil && e.DNS != b.DNS {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// DNSOrIP represents either a DNS name or an IP address.
|
||||
// IPs, as they are more specific, are preferred.
|
||||
type DNSOrIP struct {
|
||||
DNS string
|
||||
IP net.IP
|
||||
}
|
||||
|
||||
// String prints the string representation of the struct.
|
||||
func (d DNSOrIP) String() string {
|
||||
if d.IP != nil {
|
||||
return d.IP.String()
|
||||
}
|
||||
return d.DNS
|
||||
}
|
||||
|
||||
// Parse parses a given WireGuard configuration file and produces a Conf struct.
|
||||
func Parse(buf []byte) *Conf {
|
||||
var (
|
||||
active section
|
||||
kv []string
|
||||
c Conf
|
||||
err error
|
||||
iface *Interface
|
||||
i int
|
||||
k key
|
||||
line, v string
|
||||
peer *Peer
|
||||
port uint64
|
||||
)
|
||||
s := bufio.NewScanner(bytes.NewBuffer(buf))
|
||||
for s.Scan() {
|
||||
line = strings.TrimSpace(s.Text())
|
||||
// Skip comments.
|
||||
if strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
// Line is a section title.
|
||||
if strings.HasPrefix(line, "[") {
|
||||
if peer != nil {
|
||||
c.Peers = append(c.Peers, peer)
|
||||
peer = nil
|
||||
}
|
||||
if iface != nil {
|
||||
c.Interface = iface
|
||||
iface = nil
|
||||
}
|
||||
active = section(strings.TrimSpace(strings.Trim(line, "[]")))
|
||||
switch active {
|
||||
case interfaceSection:
|
||||
iface = new(Interface)
|
||||
case peerSection:
|
||||
peer = new(Peer)
|
||||
}
|
||||
continue
|
||||
}
|
||||
kv = strings.SplitN(line, separator, 2)
|
||||
if len(kv) != 2 {
|
||||
continue
|
||||
}
|
||||
k = key(strings.TrimSpace(kv[0]))
|
||||
v = strings.TrimSpace(kv[1])
|
||||
switch active {
|
||||
case interfaceSection:
|
||||
switch k {
|
||||
case listenPortKey:
|
||||
port, err = strconv.ParseUint(v, 10, 32)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
iface.ListenPort = uint32(port)
|
||||
case privateKeyKey:
|
||||
iface.PrivateKey = []byte(v)
|
||||
}
|
||||
case peerSection:
|
||||
switch k {
|
||||
case allowedIPsKey:
|
||||
err = peer.parseAllowedIPs(v)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
case endpointKey:
|
||||
err = peer.parseEndpoint(v)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
case persistentKeepaliveKey:
|
||||
i, err = strconv.Atoi(v)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
peer.PersistentKeepalive = i
|
||||
case presharedKeyKey:
|
||||
peer.PresharedKey = []byte(v)
|
||||
case publicKeyKey:
|
||||
peer.PublicKey = []byte(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
if peer != nil {
|
||||
c.Peers = append(c.Peers, peer)
|
||||
}
|
||||
if iface != nil {
|
||||
c.Interface = iface
|
||||
}
|
||||
return &c
|
||||
}
|
||||
|
||||
// Bytes renders a WireGuard configuration to bytes.
|
||||
func (c *Conf) Bytes() ([]byte, error) {
|
||||
if c == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var err error
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 512))
|
||||
if c.Interface != nil {
|
||||
if c.PrivateKey != nil {
|
||||
if err = writeSection(buf, interfaceSection); err != nil {
|
||||
return nil, fmt.Errorf("failed to write interface: %v", err)
|
||||
}
|
||||
if err = writePKey(buf, privateKeyKey, c.Interface.PrivateKey); err != nil {
|
||||
if err = writePKey(buf, privateKeyKey, c.PrivateKey); err != nil {
|
||||
return nil, fmt.Errorf("failed to write private key: %v", err)
|
||||
}
|
||||
if err = writeValue(buf, listenPortKey, strconv.FormatUint(uint64(c.Interface.ListenPort), 10)); err != nil {
|
||||
if err = writeValue(buf, listenPortKey, strconv.Itoa(*c.ListenPort)); err != nil {
|
||||
return nil, fmt.Errorf("failed to write listen port: %v", err)
|
||||
}
|
||||
}
|
||||
for i, p := range c.Peers {
|
||||
// Add newlines to make the formatting nicer.
|
||||
if i == 0 && c.Interface != nil || i != 0 {
|
||||
if i == 0 && c.PrivateKey != nil || i != 0 {
|
||||
if err = buf.WriteByte('\n'); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -297,71 +291,97 @@ func (c *Conf) Bytes() ([]byte, error) {
|
||||
if err = writeEndpoint(buf, p.Endpoint); err != nil {
|
||||
return nil, fmt.Errorf("failed to write endpoint: %v", err)
|
||||
}
|
||||
if err = writeValue(buf, persistentKeepaliveKey, strconv.Itoa(p.PersistentKeepalive)); err != nil {
|
||||
if p.PersistentKeepaliveInterval == nil {
|
||||
p.PersistentKeepaliveInterval = new(time.Duration)
|
||||
}
|
||||
if err = writeValue(buf, persistentKeepaliveKey, strconv.FormatUint(uint64(*p.PersistentKeepaliveInterval/time.Second), 10)); err != nil {
|
||||
return nil, fmt.Errorf("failed to write persistent keepalive: %v", err)
|
||||
}
|
||||
if err = writePKey(buf, presharedKeyKey, p.PresharedKey); err != nil {
|
||||
return nil, fmt.Errorf("failed to write preshared key: %v", err)
|
||||
}
|
||||
if err = writePKey(buf, publicKeyKey, p.PublicKey); err != nil {
|
||||
if err = writePKey(buf, publicKeyKey, &p.PublicKey); err != nil {
|
||||
return nil, fmt.Errorf("failed to write public key: %v", err)
|
||||
}
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// Equal checks if two WireGuard configurations are equivalent.
|
||||
func (c *Conf) Equal(b *Conf) bool {
|
||||
if (c.Interface == nil) != (b.Interface == nil) {
|
||||
return false
|
||||
// Equal returns true if the Conf and wgtypes.Device are equal.
|
||||
func (c *Conf) Equal(d *wgtypes.Device) (bool, string) {
|
||||
if c == nil || d == nil {
|
||||
return c == nil && d == nil, "nil values"
|
||||
}
|
||||
if c.Interface != nil {
|
||||
if c.Interface.ListenPort != b.Interface.ListenPort || !bytes.Equal(c.Interface.PrivateKey, b.Interface.PrivateKey) {
|
||||
return false
|
||||
}
|
||||
if c.ListenPort == nil || *c.ListenPort != d.ListenPort {
|
||||
return false, fmt.Sprintf("port: old=%q, new=\"%v\"", d.ListenPort, c.ListenPort)
|
||||
}
|
||||
if len(c.Peers) != len(b.Peers) {
|
||||
return false
|
||||
if c.PrivateKey == nil || *c.PrivateKey != d.PrivateKey {
|
||||
return false, fmt.Sprintf("private key: old=\"%s...\", new=\"%s\"", d.PrivateKey.String()[0:5], c.PrivateKey.String()[0:5])
|
||||
}
|
||||
if len(c.Peers) != len(d.Peers) {
|
||||
return false, fmt.Sprintf("number of peers: old=%d, new=%d", len(d.Peers), len(c.Peers))
|
||||
}
|
||||
sortPeerConfigs(d.Peers)
|
||||
sortPeers(c.Peers)
|
||||
sortPeers(b.Peers)
|
||||
for i := range c.Peers {
|
||||
if len(c.Peers[i].AllowedIPs) != len(b.Peers[i].AllowedIPs) {
|
||||
return false
|
||||
if len(c.Peers[i].AllowedIPs) != len(d.Peers[i].AllowedIPs) {
|
||||
return false, fmt.Sprintf("Peer %d allowed IP length: old=%d, new=%d", i, len(d.Peers[i].AllowedIPs), len(c.Peers[i].AllowedIPs))
|
||||
}
|
||||
sortCIDRs(c.Peers[i].AllowedIPs)
|
||||
sortCIDRs(b.Peers[i].AllowedIPs)
|
||||
sortCIDRs(d.Peers[i].AllowedIPs)
|
||||
for j := range c.Peers[i].AllowedIPs {
|
||||
if c.Peers[i].AllowedIPs[j].String() != b.Peers[i].AllowedIPs[j].String() {
|
||||
return false
|
||||
if c.Peers[i].AllowedIPs[j].String() != d.Peers[i].AllowedIPs[j].String() {
|
||||
return false, fmt.Sprintf("Peer %d allowed IP: old=%q, new=%q", i, d.Peers[i].AllowedIPs[j].String(), c.Peers[i].AllowedIPs[j].String())
|
||||
}
|
||||
}
|
||||
if !c.Peers[i].Endpoint.Equal(b.Peers[i].Endpoint, false) {
|
||||
return false
|
||||
if c.Peers[i].Endpoint == nil || d.Peers[i].Endpoint == nil {
|
||||
return c.Peers[i].Endpoint == nil && d.Peers[i].Endpoint == nil, "peer endpoints: nil value"
|
||||
}
|
||||
if c.Peers[i].PersistentKeepalive != b.Peers[i].PersistentKeepalive || !bytes.Equal(c.Peers[i].PresharedKey, b.Peers[i].PresharedKey) || !bytes.Equal(c.Peers[i].PublicKey, b.Peers[i].PublicKey) {
|
||||
return false
|
||||
if c.Peers[i].Endpoint.StringOpt(false) != d.Peers[i].Endpoint.String() {
|
||||
return false, fmt.Sprintf("Peer %d endpoint: old=%q, new=%q", i, d.Peers[i].Endpoint.String(), c.Peers[i].Endpoint.StringOpt(false))
|
||||
}
|
||||
|
||||
pki := time.Duration(0)
|
||||
if p := c.Peers[i].PersistentKeepaliveInterval; p != nil {
|
||||
pki = *p
|
||||
}
|
||||
psk := wgtypes.Key{}
|
||||
if p := c.Peers[i].PresharedKey; p != nil {
|
||||
psk = *p
|
||||
}
|
||||
if pki != d.Peers[i].PersistentKeepaliveInterval || psk != d.Peers[i].PresharedKey || c.Peers[i].PublicKey != d.Peers[i].PublicKey {
|
||||
return false, "persistent keepalive or pershared key"
|
||||
}
|
||||
}
|
||||
return true
|
||||
return true, ""
|
||||
}
|
||||
|
||||
func sortPeers(peers []*Peer) {
|
||||
func sortPeerConfigs(peers []wgtypes.Peer) {
|
||||
sort.Slice(peers, func(i, j int) bool {
|
||||
if bytes.Compare(peers[i].PublicKey, peers[j].PublicKey) < 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return peers[i].PublicKey.String() < peers[j].PublicKey.String()
|
||||
})
|
||||
}
|
||||
|
||||
func sortCIDRs(cidrs []*net.IPNet) {
|
||||
func sortPeers(peers []Peer) {
|
||||
sort.Slice(peers, func(i, j int) bool {
|
||||
return peers[i].PublicKey.String() < peers[j].PublicKey.String()
|
||||
})
|
||||
}
|
||||
|
||||
func sortCIDRs(cidrs []net.IPNet) {
|
||||
sort.Slice(cidrs, func(i, j int) bool {
|
||||
return cidrs[i].String() < cidrs[j].String()
|
||||
})
|
||||
}
|
||||
|
||||
func writeAllowedIPs(buf *bytes.Buffer, ais []*net.IPNet) error {
|
||||
func cutIP(ip net.IP) net.IP {
|
||||
if i4 := ip.To4(); i4 != nil {
|
||||
return i4
|
||||
}
|
||||
return ip.To16()
|
||||
}
|
||||
|
||||
func writeAllowedIPs(buf *bytes.Buffer, ais []net.IPNet) error {
|
||||
if len(ais) == 0 {
|
||||
return nil
|
||||
}
|
||||
@ -382,15 +402,16 @@ func writeAllowedIPs(buf *bytes.Buffer, ais []*net.IPNet) error {
|
||||
return buf.WriteByte('\n')
|
||||
}
|
||||
|
||||
func writePKey(buf *bytes.Buffer, k key, b []byte) error {
|
||||
if len(b) == 0 {
|
||||
func writePKey(buf *bytes.Buffer, k key, b *wgtypes.Key) error {
|
||||
// Print nothing if the public key was never initialized.
|
||||
if b == nil || (wgtypes.Key{}) == *b {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
if err = writeKey(buf, k); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = buf.Write(b); err != nil {
|
||||
if _, err = buf.Write([]byte(b.String())); err != nil {
|
||||
return err
|
||||
}
|
||||
return buf.WriteByte('\n')
|
||||
@ -408,14 +429,15 @@ func writeValue(buf *bytes.Buffer, k key, v string) error {
|
||||
}
|
||||
|
||||
func writeEndpoint(buf *bytes.Buffer, e *Endpoint) error {
|
||||
if e == nil {
|
||||
str := e.String()
|
||||
if str == "" {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
if err = writeKey(buf, endpointKey); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = buf.WriteString(e.String()); err != nil {
|
||||
if _, err = buf.WriteString(str); err != nil {
|
||||
return err
|
||||
}
|
||||
return buf.WriteByte('\n')
|
||||
@ -443,177 +465,3 @@ func writeKey(buf *bytes.Buffer, k key) error {
|
||||
_, err = buf.WriteString(" = ")
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
errParseEndpoint = errors.New("could not parse Endpoint")
|
||||
)
|
||||
|
||||
func (p *Peer) parseEndpoint(v string) error {
|
||||
var (
|
||||
kv []string
|
||||
err error
|
||||
ip, ip4 net.IP
|
||||
port uint64
|
||||
)
|
||||
kv = strings.Split(v, ":")
|
||||
if len(kv) < 2 {
|
||||
return errParseEndpoint
|
||||
}
|
||||
port, err = strconv.ParseUint(kv[len(kv)-1], 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d := DNSOrIP{}
|
||||
ip = net.ParseIP(strings.Trim(strings.Join(kv[:len(kv)-1], ":"), "[]"))
|
||||
if ip == nil {
|
||||
if len(validation.IsDNS1123Subdomain(kv[0])) != 0 {
|
||||
return errParseEndpoint
|
||||
}
|
||||
d.DNS = kv[0]
|
||||
} else {
|
||||
if ip4 = ip.To4(); ip4 != nil {
|
||||
d.IP = ip4
|
||||
} else {
|
||||
d.IP = ip.To16()
|
||||
}
|
||||
}
|
||||
|
||||
p.Endpoint = &Endpoint{
|
||||
DNSOrIP: d,
|
||||
Port: uint32(port),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Peer) parseAllowedIPs(v string) error {
|
||||
var (
|
||||
ai *net.IPNet
|
||||
kv []string
|
||||
err error
|
||||
i int
|
||||
ip, ip4 net.IP
|
||||
)
|
||||
|
||||
kv = strings.Split(v, ",")
|
||||
for i = range kv {
|
||||
ip, ai, err = net.ParseCIDR(strings.TrimSpace(kv[i]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ip4 = ip.To4(); ip4 != nil {
|
||||
ip = ip4
|
||||
} else {
|
||||
ip = ip.To16()
|
||||
}
|
||||
ai.IP = ip
|
||||
p.AllowedIPs = append(p.AllowedIPs, ai)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseDump parses a given WireGuard dump and produces a Conf struct.
|
||||
func ParseDump(buf []byte) (*Conf, error) {
|
||||
// from man wg, show section:
|
||||
// If dump is specified, then several lines are printed;
|
||||
// the first contains in order separated by tab: private-key, public-key, listen-port, fw‐mark.
|
||||
// Subsequent lines are printed for each peer and contain in order separated by tab:
|
||||
// public-key, preshared-key, endpoint, allowed-ips, latest-handshake, transfer-rx, transfer-tx, persistent-keepalive.
|
||||
var (
|
||||
active section
|
||||
values []string
|
||||
c Conf
|
||||
err error
|
||||
iface *Interface
|
||||
peer *Peer
|
||||
port uint64
|
||||
sec int64
|
||||
pka int
|
||||
line int
|
||||
)
|
||||
// First line is Interface
|
||||
active = interfaceSection
|
||||
s := bufio.NewScanner(bytes.NewBuffer(buf))
|
||||
for s.Scan() {
|
||||
values = strings.Split(s.Text(), dumpSeparator)
|
||||
|
||||
switch active {
|
||||
case interfaceSection:
|
||||
if len(values) < dumpInterfaceLen {
|
||||
return nil, fmt.Errorf("invalid interface line: missing fields (%d < %d)", len(values), dumpInterfaceLen)
|
||||
}
|
||||
iface = new(Interface)
|
||||
for i := range values {
|
||||
switch i {
|
||||
case dumpInterfacePrivateKeyIndex:
|
||||
iface.PrivateKey = []byte(values[i])
|
||||
case dumpInterfaceListenPortIndex:
|
||||
port, err = strconv.ParseUint(values[i], 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid interface line: error parsing listen-port: %w", err)
|
||||
}
|
||||
iface.ListenPort = uint32(port)
|
||||
}
|
||||
}
|
||||
c.Interface = iface
|
||||
// Next lines are Peers
|
||||
active = peerSection
|
||||
case peerSection:
|
||||
if len(values) < dumpPeerLen {
|
||||
return nil, fmt.Errorf("invalid peer line %d: missing fields (%d < %d)", line, len(values), dumpPeerLen)
|
||||
}
|
||||
peer = new(Peer)
|
||||
|
||||
for i := range values {
|
||||
switch i {
|
||||
case dumpPeerPublicKeyIndex:
|
||||
peer.PublicKey = []byte(values[i])
|
||||
case dumpPeerPresharedKeyIndex:
|
||||
if values[i] == dumpNone {
|
||||
continue
|
||||
}
|
||||
peer.PresharedKey = []byte(values[i])
|
||||
case dumpPeerEndpointIndex:
|
||||
if values[i] == dumpNone {
|
||||
continue
|
||||
}
|
||||
err = peer.parseEndpoint(values[i])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid peer line %d: error parsing endpoint: %w", line, err)
|
||||
}
|
||||
case dumpPeerAllowedIPsIndex:
|
||||
if values[i] == dumpNone {
|
||||
continue
|
||||
}
|
||||
err = peer.parseAllowedIPs(values[i])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid peer line %d: error parsing allowed-ips: %w", line, err)
|
||||
}
|
||||
case dumpPeerLatestHandshakeIndex:
|
||||
if values[i] == "0" {
|
||||
// Use go zero value, not unix 0 timestamp.
|
||||
peer.LatestHandshake = time.Time{}
|
||||
continue
|
||||
}
|
||||
sec, err = strconv.ParseInt(values[i], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid peer line %d: error parsing latest-handshake: %w", line, err)
|
||||
}
|
||||
peer.LatestHandshake = time.Unix(sec, 0)
|
||||
case dumpPeerPersistentKeepaliveIndex:
|
||||
if values[i] == dumpOff {
|
||||
continue
|
||||
}
|
||||
pka, err = strconv.Atoi(values[i])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid peer line %d: error parsing persistent-keepalive: %w", line, err)
|
||||
}
|
||||
peer.PersistentKeepalive = pka
|
||||
}
|
||||
}
|
||||
c.Peers = append(c.Peers, peer)
|
||||
peer = nil
|
||||
}
|
||||
line++
|
||||
}
|
||||
return &c, nil
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2019 the Kilo authors
|
||||
// Copyright 2021 the Kilo authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -21,336 +21,431 @@ import (
|
||||
"github.com/kylelemons/godebug/pretty"
|
||||
)
|
||||
|
||||
func TestCompareConf(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
func TestNewEndpoint(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
name string
|
||||
a []byte
|
||||
b []byte
|
||||
out bool
|
||||
ip net.IP
|
||||
port int
|
||||
out *Endpoint
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
a: []byte{},
|
||||
b: []byte{},
|
||||
out: true,
|
||||
name: "no ip, no port",
|
||||
out: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "key and value order",
|
||||
a: []byte(`[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
|
||||
[Peer]
|
||||
Endpoint = 10.1.0.2:51820
|
||||
PresharedKey = psk
|
||||
PublicKey = key
|
||||
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
|
||||
`),
|
||||
b: []byte(`[Interface]
|
||||
ListenPort = 51820
|
||||
PrivateKey = private
|
||||
|
||||
[Peer]
|
||||
PublicKey = key
|
||||
AllowedIPs = 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32, 10.2.2.0/24
|
||||
PresharedKey = psk
|
||||
Endpoint = 10.1.0.2:51820
|
||||
`),
|
||||
out: true,
|
||||
name: "only port",
|
||||
ip: nil,
|
||||
port: 99,
|
||||
out: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
Port: 99,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "whitespace",
|
||||
a: []byte(`[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
|
||||
[Peer]
|
||||
Endpoint = 10.1.0.2:51820
|
||||
PresharedKey = psk
|
||||
PublicKey = key
|
||||
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
|
||||
`),
|
||||
b: []byte(`[Interface]
|
||||
PrivateKey=private
|
||||
ListenPort=51820
|
||||
[Peer]
|
||||
Endpoint=10.1.0.2:51820
|
||||
PresharedKey = psk
|
||||
PublicKey=key
|
||||
AllowedIPs=10.2.2.0/24,192.168.0.1/32,10.2.3.0/24,192.168.0.2/32,10.4.0.2/32
|
||||
`),
|
||||
out: true,
|
||||
name: "only ipv4",
|
||||
ip: net.ParseIP("10.0.0.0"),
|
||||
out: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
IP: net.ParseIP("10.0.0.0").To4(),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "missing key",
|
||||
a: []byte(`[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
|
||||
[Peer]
|
||||
Endpoint = 10.1.0.2:51820
|
||||
PublicKey = key
|
||||
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
|
||||
`),
|
||||
b: []byte(`[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
|
||||
[Peer]
|
||||
PublicKey = key
|
||||
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
|
||||
`),
|
||||
out: false,
|
||||
name: "only ipv6",
|
||||
ip: net.ParseIP("ff50::10"),
|
||||
out: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
IP: net.ParseIP("ff50::10").To16(),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "different value",
|
||||
a: []byte(`[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
|
||||
[Peer]
|
||||
Endpoint = 10.1.0.2:51820
|
||||
PublicKey = key
|
||||
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
|
||||
`),
|
||||
b: []byte(`[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
|
||||
[Peer]
|
||||
Endpoint = 10.1.0.2:51820
|
||||
PublicKey = key2
|
||||
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
|
||||
`),
|
||||
out: false,
|
||||
name: "ipv4",
|
||||
ip: net.ParseIP("10.0.0.0"),
|
||||
port: 1000,
|
||||
out: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
IP: net.ParseIP("10.0.0.0").To4(),
|
||||
Port: 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "section order",
|
||||
a: []byte(`[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
|
||||
[Peer]
|
||||
Endpoint = 10.1.0.2:51820
|
||||
PresharedKey = psk
|
||||
PublicKey = key
|
||||
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
|
||||
`),
|
||||
b: []byte(`[Peer]
|
||||
Endpoint = 10.1.0.2:51820
|
||||
PresharedKey = psk
|
||||
PublicKey = key
|
||||
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
|
||||
|
||||
[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
`),
|
||||
out: true,
|
||||
name: "ipv6",
|
||||
ip: net.ParseIP("ff50::10"),
|
||||
port: 1000,
|
||||
out: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
IP: net.ParseIP("ff50::10").To16(),
|
||||
Port: 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "out of order peers",
|
||||
a: []byte(`[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
|
||||
[Peer]
|
||||
Endpoint = 10.1.0.2:51820
|
||||
PresharedKey = psk2
|
||||
PublicKey = key2
|
||||
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
|
||||
|
||||
[Peer]
|
||||
Endpoint = 10.1.0.2:51820
|
||||
PresharedKey = psk1
|
||||
PublicKey = key1
|
||||
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
|
||||
`),
|
||||
b: []byte(`[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
|
||||
[Peer]
|
||||
Endpoint = 10.1.0.2:51820
|
||||
PresharedKey = psk1
|
||||
PublicKey = key1
|
||||
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
|
||||
|
||||
[Peer]
|
||||
Endpoint = 10.1.0.2:51820
|
||||
PresharedKey = psk2
|
||||
PublicKey = key2
|
||||
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
|
||||
`),
|
||||
out: true,
|
||||
},
|
||||
{
|
||||
name: "one empty",
|
||||
a: []byte(`[Interface]
|
||||
PrivateKey = private
|
||||
ListenPort = 51820
|
||||
|
||||
[Peer]
|
||||
Endpoint = 10.1.0.2:51820
|
||||
PresharedKey = psk
|
||||
PublicKey = key
|
||||
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
|
||||
`),
|
||||
b: []byte(``),
|
||||
out: false,
|
||||
name: "ipv6",
|
||||
ip: net.ParseIP("fc00:f853:ccd:e793::3"),
|
||||
port: 51820,
|
||||
out: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
IP: net.ParseIP("fc00:f853:ccd:e793::3").To16(),
|
||||
Port: 51820,
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
equal := Parse(tc.a).Equal(Parse(tc.b))
|
||||
if equal != tc.out {
|
||||
t.Errorf("test case %q: expected %t, got %t", tc.name, tc.out, equal)
|
||||
out := NewEndpoint(tc.ip, tc.port)
|
||||
if diff := pretty.Compare(out, tc.out); diff != "" {
|
||||
t.Errorf("%d %s: got diff:\n%s\n", i, tc.name, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareEndpoint(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
a *Endpoint
|
||||
b *Endpoint
|
||||
dnsFirst bool
|
||||
out bool
|
||||
}{
|
||||
{
|
||||
name: "both nil",
|
||||
a: nil,
|
||||
b: nil,
|
||||
out: true,
|
||||
},
|
||||
{
|
||||
name: "a nil",
|
||||
a: nil,
|
||||
b: &Endpoint{},
|
||||
out: false,
|
||||
},
|
||||
{
|
||||
name: "b nil",
|
||||
a: &Endpoint{},
|
||||
b: nil,
|
||||
out: false,
|
||||
},
|
||||
{
|
||||
name: "zero",
|
||||
a: &Endpoint{},
|
||||
b: &Endpoint{},
|
||||
out: true,
|
||||
},
|
||||
{
|
||||
name: "diff port",
|
||||
a: &Endpoint{Port: 1234},
|
||||
b: &Endpoint{Port: 5678},
|
||||
out: false,
|
||||
},
|
||||
{
|
||||
name: "same IP",
|
||||
a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1")}},
|
||||
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1")}},
|
||||
out: true,
|
||||
},
|
||||
{
|
||||
name: "diff IP",
|
||||
a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1")}},
|
||||
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.2")}},
|
||||
out: false,
|
||||
},
|
||||
{
|
||||
name: "same IP ignore DNS",
|
||||
a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1"), DNS: "a"}},
|
||||
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1"), DNS: "b"}},
|
||||
out: true,
|
||||
},
|
||||
{
|
||||
name: "no IP check DNS",
|
||||
a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{DNS: "a"}},
|
||||
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{DNS: "b"}},
|
||||
out: false,
|
||||
},
|
||||
{
|
||||
name: "no IP check DNS (same)",
|
||||
a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{DNS: "a"}},
|
||||
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{DNS: "a"}},
|
||||
out: true,
|
||||
},
|
||||
{
|
||||
name: "DNS first, ignore IP",
|
||||
a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1"), DNS: "a"}},
|
||||
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.2"), DNS: "a"}},
|
||||
dnsFirst: true,
|
||||
out: true,
|
||||
},
|
||||
{
|
||||
name: "DNS first",
|
||||
a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{DNS: "a"}},
|
||||
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{DNS: "b"}},
|
||||
dnsFirst: true,
|
||||
out: false,
|
||||
},
|
||||
{
|
||||
name: "DNS first, no DNS compare IP",
|
||||
a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1"), DNS: ""}},
|
||||
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.2"), DNS: ""}},
|
||||
dnsFirst: true,
|
||||
out: false,
|
||||
},
|
||||
{
|
||||
name: "DNS first, no DNS compare IP (same)",
|
||||
a: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1"), DNS: ""}},
|
||||
b: &Endpoint{Port: 1234, DNSOrIP: DNSOrIP{IP: net.ParseIP("192.168.0.1"), DNS: ""}},
|
||||
dnsFirst: true,
|
||||
out: true,
|
||||
},
|
||||
} {
|
||||
equal := tc.a.Equal(tc.b, tc.dnsFirst)
|
||||
if equal != tc.out {
|
||||
t.Errorf("test case %q: expected %t, got %t", tc.name, tc.out, equal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareDumpConf(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
func TestParseEndpoint(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
name string
|
||||
d []byte
|
||||
c []byte
|
||||
str string
|
||||
out *Endpoint
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
d: []byte{},
|
||||
c: []byte{},
|
||||
name: "no ip, no port",
|
||||
},
|
||||
{
|
||||
name: "redacted copy from wg output",
|
||||
d: []byte(`private B7qk8EMlob0nfado0ABM6HulUV607r4yqtBKjhap7S4= 51820 off
|
||||
key1 (none) 10.254.1.1:51820 100.64.1.0/24,192.168.0.125/32,10.4.0.1/32 1619012801 67048 34952 10
|
||||
key2 (none) 10.254.2.1:51820 100.64.4.0/24,10.69.76.55/32,100.64.3.0/24,10.66.25.131/32,10.4.0.2/32 1619013058 1134456 10077852 10`),
|
||||
c: []byte(`[Interface]
|
||||
ListenPort = 51820
|
||||
PrivateKey = private
|
||||
|
||||
[Peer]
|
||||
PublicKey = key1
|
||||
AllowedIPs = 100.64.1.0/24, 192.168.0.125/32, 10.4.0.1/32
|
||||
Endpoint = 10.254.1.1:51820
|
||||
PersistentKeepalive = 10
|
||||
|
||||
[Peer]
|
||||
PublicKey = key2
|
||||
AllowedIPs = 100.64.4.0/24, 10.69.76.55/32, 100.64.3.0/24, 10.66.25.131/32, 10.4.0.2/32
|
||||
Endpoint = 10.254.2.1:51820
|
||||
PersistentKeepalive = 10`),
|
||||
name: "only port",
|
||||
str: ":1000",
|
||||
},
|
||||
{
|
||||
name: "only ipv4",
|
||||
str: "10.0.0.0",
|
||||
},
|
||||
{
|
||||
name: "only ipv6",
|
||||
str: "ff50::10",
|
||||
},
|
||||
{
|
||||
name: "ipv4",
|
||||
str: "10.0.0.0:1000",
|
||||
out: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
IP: net.ParseIP("10.0.0.0").To4(),
|
||||
Port: 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ipv6",
|
||||
str: "[ff50::10]:1000",
|
||||
out: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
IP: net.ParseIP("ff50::10").To16(),
|
||||
Port: 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
|
||||
dumpConf, _ := ParseDump(tc.d)
|
||||
conf := Parse(tc.c)
|
||||
// Equal will ignore runtime fields and only compare configuration fields.
|
||||
if !dumpConf.Equal(conf) {
|
||||
diff := pretty.Compare(dumpConf, conf)
|
||||
t.Errorf("test case %q: got diff: %v", tc.name, diff)
|
||||
out := ParseEndpoint(tc.str)
|
||||
if diff := pretty.Compare(out, tc.out); diff != "" {
|
||||
t.Errorf("ParseEndpoint %s(%d): got diff:\n%s\n", tc.name, i, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewEndpointFromUDPAddr(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
name string
|
||||
u *net.UDPAddr
|
||||
out *Endpoint
|
||||
}{
|
||||
{
|
||||
name: "no ip, no port",
|
||||
out: &Endpoint{
|
||||
addr: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only port",
|
||||
u: &net.UDPAddr{
|
||||
Port: 1000,
|
||||
},
|
||||
out: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
Port: 1000,
|
||||
},
|
||||
addr: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only ipv4",
|
||||
u: &net.UDPAddr{
|
||||
IP: net.ParseIP("10.0.0.0"),
|
||||
},
|
||||
out: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
IP: net.ParseIP("10.0.0.0").To4(),
|
||||
},
|
||||
addr: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only ipv6",
|
||||
u: &net.UDPAddr{
|
||||
IP: net.ParseIP("ff60::10"),
|
||||
},
|
||||
out: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
IP: net.ParseIP("ff60::10").To16(),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ipv4",
|
||||
u: &net.UDPAddr{
|
||||
IP: net.ParseIP("10.0.0.0"),
|
||||
Port: 1000,
|
||||
},
|
||||
out: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
IP: net.ParseIP("10.0.0.0").To4(),
|
||||
Port: 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ipv6",
|
||||
u: &net.UDPAddr{
|
||||
IP: net.ParseIP("ff50::10"),
|
||||
Port: 1000,
|
||||
},
|
||||
out: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
IP: net.ParseIP("ff50::10").To16(),
|
||||
Port: 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
out := NewEndpointFromUDPAddr(tc.u)
|
||||
if diff := pretty.Compare(out, tc.out); diff != "" {
|
||||
t.Errorf("ParseEndpoint %s(%d): got diff:\n%s\n", tc.name, i, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReady(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
name string
|
||||
in *Endpoint
|
||||
r bool
|
||||
}{
|
||||
{
|
||||
name: "nil",
|
||||
r: false,
|
||||
},
|
||||
{
|
||||
name: "no ip, no port",
|
||||
in: &Endpoint{
|
||||
addr: "",
|
||||
udpAddr: &net.UDPAddr{},
|
||||
},
|
||||
r: false,
|
||||
},
|
||||
{
|
||||
name: "only port",
|
||||
in: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
Port: 1000,
|
||||
},
|
||||
},
|
||||
r: false,
|
||||
},
|
||||
{
|
||||
name: "only ipv4",
|
||||
in: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
IP: net.ParseIP("10.0.0.0"),
|
||||
},
|
||||
},
|
||||
r: false,
|
||||
},
|
||||
{
|
||||
name: "only ipv6",
|
||||
in: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
IP: net.ParseIP("ff60::10"),
|
||||
},
|
||||
},
|
||||
r: false,
|
||||
},
|
||||
{
|
||||
name: "ipv4",
|
||||
in: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
IP: net.ParseIP("10.0.0.0"),
|
||||
Port: 1000,
|
||||
},
|
||||
},
|
||||
r: true,
|
||||
},
|
||||
{
|
||||
name: "ipv6",
|
||||
in: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
IP: net.ParseIP("ff50::10"),
|
||||
Port: 1000,
|
||||
},
|
||||
},
|
||||
r: true,
|
||||
},
|
||||
} {
|
||||
if tc.r != tc.in.Ready() {
|
||||
t.Errorf("Endpoint.Ready() %s(%d): expected=%v\tgot=%v\n", tc.name, i, tc.r, tc.in.Ready())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEqual(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
name string
|
||||
a *Endpoint
|
||||
b *Endpoint
|
||||
df bool
|
||||
r bool
|
||||
}{
|
||||
{
|
||||
name: "nil dns last",
|
||||
r: true,
|
||||
},
|
||||
{
|
||||
name: "nil dns first",
|
||||
df: true,
|
||||
r: true,
|
||||
},
|
||||
{
|
||||
name: "equal: only port",
|
||||
a: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
Port: 1000,
|
||||
},
|
||||
},
|
||||
b: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
Port: 1000,
|
||||
},
|
||||
},
|
||||
r: true,
|
||||
},
|
||||
{
|
||||
name: "not equal: only port",
|
||||
a: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
Port: 1000,
|
||||
},
|
||||
},
|
||||
b: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
Port: 1001,
|
||||
},
|
||||
},
|
||||
r: false,
|
||||
},
|
||||
{
|
||||
name: "equal dns first",
|
||||
a: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
Port: 1000,
|
||||
IP: net.ParseIP("10.0.0.0"),
|
||||
},
|
||||
addr: "example.com:1000",
|
||||
},
|
||||
b: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
Port: 1000,
|
||||
IP: net.ParseIP("10.0.0.0"),
|
||||
},
|
||||
addr: "example.com:1000",
|
||||
},
|
||||
r: true,
|
||||
},
|
||||
{
|
||||
name: "equal dns last",
|
||||
a: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
Port: 1000,
|
||||
IP: net.ParseIP("10.0.0.0"),
|
||||
},
|
||||
addr: "example.com:1000",
|
||||
},
|
||||
b: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
Port: 1000,
|
||||
IP: net.ParseIP("10.0.0.0"),
|
||||
},
|
||||
addr: "foo",
|
||||
},
|
||||
r: true,
|
||||
},
|
||||
{
|
||||
name: "unequal dns first",
|
||||
a: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
Port: 1000,
|
||||
IP: net.ParseIP("10.0.0.0"),
|
||||
},
|
||||
addr: "example.com:1000",
|
||||
},
|
||||
b: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
Port: 1000,
|
||||
IP: net.ParseIP("10.0.0.0"),
|
||||
},
|
||||
addr: "foo",
|
||||
},
|
||||
df: true,
|
||||
r: false,
|
||||
},
|
||||
{
|
||||
name: "unequal dns last",
|
||||
a: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
Port: 1000,
|
||||
IP: net.ParseIP("10.0.0.0"),
|
||||
},
|
||||
addr: "foo",
|
||||
},
|
||||
b: &Endpoint{
|
||||
udpAddr: &net.UDPAddr{
|
||||
Port: 1000,
|
||||
IP: net.ParseIP("11.0.0.0"),
|
||||
},
|
||||
addr: "foo",
|
||||
},
|
||||
r: false,
|
||||
},
|
||||
{
|
||||
name: "unequal dns last empty IP",
|
||||
a: &Endpoint{
|
||||
addr: "foo",
|
||||
},
|
||||
b: &Endpoint{
|
||||
addr: "bar",
|
||||
},
|
||||
r: false,
|
||||
},
|
||||
{
|
||||
name: "equal dns last empty IP",
|
||||
a: &Endpoint{
|
||||
addr: "foo",
|
||||
},
|
||||
b: &Endpoint{
|
||||
addr: "foo",
|
||||
},
|
||||
r: true,
|
||||
},
|
||||
} {
|
||||
if out := tc.a.Equal(tc.b, tc.df); out != tc.r {
|
||||
t.Errorf("ParseEndpoint %s(%d): expected: %v\tgot: %v\n", tc.name, i, tc.r, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -12,14 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package wireguard
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
"github.com/vishvananda/netlink"
|
||||
)
|
||||
@ -64,74 +63,3 @@ func New(name string, mtu uint) (int, bool, error) {
|
||||
}
|
||||
return link.Attrs().Index, true, nil
|
||||
}
|
||||
|
||||
// Keys generates a WireGuard private and public key-pair.
|
||||
func Keys() ([]byte, []byte, error) {
|
||||
private, err := GenKey()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to generate private key: %v", err)
|
||||
}
|
||||
public, err := PubKey(private)
|
||||
return private, public, err
|
||||
}
|
||||
|
||||
// GenKey generates a WireGuard private key.
|
||||
func GenKey() ([]byte, error) {
|
||||
key, err := exec.Command("wg", "genkey").Output()
|
||||
return bytes.Trim(key, "\n"), err
|
||||
}
|
||||
|
||||
// PubKey generates a WireGuard public key for a given private key.
|
||||
func PubKey(key []byte) ([]byte, error) {
|
||||
cmd := exec.Command("wg", "pubkey")
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open pipe to stdin: %v", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer stdin.Close()
|
||||
stdin.Write(key)
|
||||
}()
|
||||
|
||||
public, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate public key: %v", err)
|
||||
}
|
||||
return bytes.Trim(public, "\n"), nil
|
||||
}
|
||||
|
||||
// SetConf applies a WireGuard configuration file to the given interface.
|
||||
func SetConf(iface string, path string) error {
|
||||
cmd := exec.Command("wg", "setconf", iface, path)
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to apply the WireGuard configuration: %s", stderr.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShowConf gets the WireGuard configuration for the given interface.
|
||||
func ShowConf(iface string) ([]byte, error) {
|
||||
cmd := exec.Command("wg", "showconf", iface)
|
||||
var stderr, stdout bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
cmd.Stdout = &stdout
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("failed to read the WireGuard configuration: %s", stderr.String())
|
||||
}
|
||||
return stdout.Bytes(), nil
|
||||
}
|
||||
|
||||
// ShowDump gets the WireGuard configuration and runtime information for the given interface.
|
||||
func ShowDump(iface string) ([]byte, error) {
|
||||
cmd := exec.Command("wg", "show", iface, "dump")
|
||||
var stderr, stdout bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
cmd.Stdout = &stdout
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("failed to read the WireGuard dump output: %s", stderr.String())
|
||||
}
|
||||
return stdout.Bytes(), nil
|
||||
}
|
||||
|
3
tools.go
3
tools.go
@ -12,13 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build tools
|
||||
// +build tools
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
_ "github.com/campoy/embedmd"
|
||||
_ "golang.org/x/lint/golint"
|
||||
_ "honnef.co/go/tools/cmd/staticcheck"
|
||||
_ "k8s.io/code-generator/cmd/client-gen"
|
||||
_ "k8s.io/code-generator/cmd/deepcopy-gen"
|
||||
_ "k8s.io/code-generator/cmd/informer-gen"
|
||||
|
2
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
Normal file
2
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
toml.test
|
||||
/toml-test
|
1
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
Normal file
1
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
21
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
Normal file
21
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 TOML authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
220
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
Normal file
220
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
Normal file
@ -0,0 +1,220 @@
|
||||
## TOML parser and encoder for Go with reflection
|
||||
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
||||
representations. (There is an example of this below.)
|
||||
|
||||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
||||
|
||||
Documentation: https://godocs.io/github.com/BurntSushi/toml
|
||||
|
||||
See the [releases page](https://github.com/BurntSushi/toml/releases) for a
|
||||
changelog; this information is also in the git tag annotations (e.g. `git show
|
||||
v0.4.0`).
|
||||
|
||||
This library requires Go 1.13 or newer; install it with:
|
||||
|
||||
$ go get github.com/BurntSushi/toml
|
||||
|
||||
It also comes with a TOML validator CLI tool:
|
||||
|
||||
$ go get github.com/BurntSushi/toml/cmd/tomlv
|
||||
$ tomlv some-toml-file.toml
|
||||
|
||||
### Testing
|
||||
|
||||
This package passes all tests in
|
||||
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
|
||||
and the encoder.
|
||||
|
||||
### Examples
|
||||
|
||||
This package works similarly to how the Go standard library handles XML and
|
||||
JSON. Namely, data is loaded into Go values via reflection.
|
||||
|
||||
For the simplest example, consider some TOML file as just a list of keys
|
||||
and values:
|
||||
|
||||
```toml
|
||||
Age = 25
|
||||
Cats = [ "Cauchy", "Plato" ]
|
||||
Pi = 3.14
|
||||
Perfection = [ 6, 28, 496, 8128 ]
|
||||
DOB = 1987-07-05T05:45:00Z
|
||||
```
|
||||
|
||||
Which could be defined in Go as:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
Age int
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time // requires `import time`
|
||||
}
|
||||
```
|
||||
|
||||
And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
if _, err := toml.Decode(tomlData, &conf); err != nil {
|
||||
// handle error
|
||||
}
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
||||
key value directly:
|
||||
|
||||
```toml
|
||||
some_key_NAME = "wat"
|
||||
```
|
||||
|
||||
```go
|
||||
type TOML struct {
|
||||
ObscureKey string `toml:"some_key_NAME"`
|
||||
}
|
||||
```
|
||||
|
||||
Beware that like other most other decoders **only exported fields** are
|
||||
considered when encoding and decoding; private fields are silently ignored.
|
||||
|
||||
### Using the `encoding.TextUnmarshaler` interface
|
||||
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
|
||||
```toml
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
```
|
||||
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := toml.Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
|
||||
a similar way.
|
||||
|
||||
### More complex usage
|
||||
|
||||
Here's an example of how to load the example from the official spec page:
|
||||
|
||||
```toml
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
|
||||
# Line breaks are OK when inside arrays
|
||||
hosts = [
|
||||
"alpha",
|
||||
"omega"
|
||||
]
|
||||
```
|
||||
|
||||
And the corresponding Go types are:
|
||||
|
||||
```go
|
||||
type tomlConfig struct {
|
||||
Title string
|
||||
Owner ownerInfo
|
||||
DB database `toml:"database"`
|
||||
Servers map[string]server
|
||||
Clients clients
|
||||
}
|
||||
|
||||
type ownerInfo struct {
|
||||
Name string
|
||||
Org string `toml:"organization"`
|
||||
Bio string
|
||||
DOB time.Time
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Server string
|
||||
Ports []int
|
||||
ConnMax int `toml:"connection_max"`
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string
|
||||
DC string
|
||||
}
|
||||
|
||||
type clients struct {
|
||||
Data [][]interface{}
|
||||
Hosts []string
|
||||
}
|
||||
```
|
||||
|
||||
Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
||||
|
511
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
Normal file
511
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
Normal file
@ -0,0 +1,511 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||
// TOML description of themselves.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalTOML(interface{}) error
|
||||
}
|
||||
|
||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
||||
func Unmarshal(p []byte, v interface{}) error {
|
||||
_, err := Decode(string(p), v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
//
|
||||
// This type can be used for any value, which will cause decoding to be delayed.
|
||||
// You can use the PrimitiveDecode() function to "manually" decode these values.
|
||||
//
|
||||
// NOTE: The underlying representation of a `Primitive` value is subject to
|
||||
// change. Do not rely on it.
|
||||
//
|
||||
// NOTE: Primitive values are still parsed, so using them will only avoid the
|
||||
// overhead of reflection. They can be useful when you don't know the exact type
|
||||
// of TOML data until runtime.
|
||||
type Primitive struct {
|
||||
undecoded interface{}
|
||||
context Key
|
||||
}
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
//
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// Decoder decodes TOML data.
|
||||
//
|
||||
// TOML tables correspond to Go structs or maps (dealer's choice – they can be
|
||||
// used interchangeably).
|
||||
//
|
||||
// TOML table arrays correspond to either a slice of structs or a slice of maps.
|
||||
//
|
||||
// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
|
||||
// in the local timezone.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond to the
|
||||
// obvious Go types.
|
||||
//
|
||||
// An exception to the above rules is if a type implements the TextUnmarshaler
|
||||
// interface, in which case any primitive TOML value (floats, strings, integers,
|
||||
// booleans, datetimes) will be converted to a []byte and given to the value's
|
||||
// UnmarshalText method. See the Unmarshaler example for a demonstration with
|
||||
// time duration strings.
|
||||
//
|
||||
// Key mapping
|
||||
//
|
||||
// TOML keys can map to either keys in a Go map or field names in a Go struct.
|
||||
// The special `toml` struct tag can be used to map TOML keys to struct fields
|
||||
// that don't match the key name exactly (see the example). A case insensitive
|
||||
// match to struct names will be tried if an exact match can't be found.
|
||||
//
|
||||
// The mapping between TOML values and Go values is loose. That is, there may
|
||||
// exist TOML values that cannot be placed into your representation, and there
|
||||
// may be parts of your representation that do not correspond to TOML values.
|
||||
// This loose mapping can be made stricter by using the IsDefined and/or
|
||||
// Undecoded methods on the MetaData returned.
|
||||
//
|
||||
// This decoder does not handle cyclic types. Decode will not terminate if a
|
||||
// cyclic type is passed.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
// NewDecoder creates a new Decoder.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{r: r}
|
||||
}
|
||||
|
||||
// Decode TOML data in to the pointer `v`.
|
||||
func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
|
||||
}
|
||||
if rv.IsNil() {
|
||||
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
|
||||
}
|
||||
|
||||
// TODO: have parser should read from io.Reader? Or at the very least, make
|
||||
// it read from []byte rather than string
|
||||
data, err := ioutil.ReadAll(dec.r)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
|
||||
p, err := parse(string(data))
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
md := MetaData{
|
||||
p.mapping, p.types, p.ordered,
|
||||
make(map[string]bool, len(p.ordered)), nil,
|
||||
}
|
||||
return md, md.unify(p.mapping, indirect(rv))
|
||||
}
|
||||
|
||||
// Decode the TOML data in to the pointer v.
|
||||
//
|
||||
// See the documentation on Decoder for a description of the decoding process.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
return NewDecoder(strings.NewReader(data)).Decode(v)
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at path and decode it for you.
|
||||
func DecodeFile(path string, v interface{}) (MetaData, error) {
|
||||
fp, err := os.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
defer fp.Close()
|
||||
return NewDecoder(fp).Decode(v)
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
// which is the client representation.
|
||||
//
|
||||
// Any type mismatch produces an error. Finding a type that we don't know
|
||||
// how to handle produces an unsupported type error.
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
// Special case. Look for a `Primitive` value.
|
||||
// TODO: #76 would make this superfluous after implemented.
|
||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
||||
// Save the undecoded data and the key context into the primitive
|
||||
// value.
|
||||
context := make(Key, len(md.context))
|
||||
copy(context, md.context)
|
||||
rv.Set(reflect.ValueOf(Primitive{
|
||||
undecoded: data,
|
||||
context: context,
|
||||
}))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special case. Unmarshaler Interface support.
|
||||
if rv.CanAddr() {
|
||||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
}
|
||||
|
||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
||||
if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
}
|
||||
// TODO:
|
||||
// The behavior here is incorrect whenever a Go type satisfies the
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
|
||||
// array. In particular, the unmarshaler should only be applied to primitive
|
||||
// TOML values. But at this point, it will be applied to all kinds of values
|
||||
// and produce an incorrect error whenever those values are hashes or arrays
|
||||
// (including arrays of tables).
|
||||
|
||||
k := rv.Kind()
|
||||
|
||||
// laziness
|
||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
||||
return md.unifyInt(data, rv)
|
||||
}
|
||||
switch k {
|
||||
case reflect.Ptr:
|
||||
elem := reflect.New(rv.Type().Elem())
|
||||
err := md.unify(data, reflect.Indirect(elem))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rv.Set(elem)
|
||||
return nil
|
||||
case reflect.Struct:
|
||||
return md.unifyStruct(data, rv)
|
||||
case reflect.Map:
|
||||
return md.unifyMap(data, rv)
|
||||
case reflect.Array:
|
||||
return md.unifyArray(data, rv)
|
||||
case reflect.Slice:
|
||||
return md.unifySlice(data, rv)
|
||||
case reflect.String:
|
||||
return md.unifyString(data, rv)
|
||||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("unsupported type %s", rv.Type())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("unsupported type %s", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return e("type mismatch for %s: expected table but found %T",
|
||||
rv.Type().String(), mapping)
|
||||
}
|
||||
|
||||
for key, datum := range tmap {
|
||||
var f *field
|
||||
fields := cachedTypeFields(rv.Type())
|
||||
for i := range fields {
|
||||
ff := &fields[i]
|
||||
if ff.name == key {
|
||||
f = ff
|
||||
break
|
||||
}
|
||||
if f == nil && strings.EqualFold(ff.name, key) {
|
||||
f = ff
|
||||
}
|
||||
}
|
||||
if f != nil {
|
||||
subv := rv
|
||||
for _, i := range f.index {
|
||||
subv = indirect(subv.Field(i))
|
||||
}
|
||||
if isUnifiable(subv) {
|
||||
md.decoded[md.context.add(key).String()] = true
|
||||
md.context = append(md.context, key)
|
||||
if err := md.unify(datum, subv); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
// Bad user! No soup for you!
|
||||
return e("cannot write unexported field %s.%s",
|
||||
rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
if k := rv.Type().Key().Kind(); k != reflect.String {
|
||||
return fmt.Errorf(
|
||||
"toml: cannot decode to a map with non-string key type (%s in %q)",
|
||||
k, rv.Type())
|
||||
}
|
||||
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if tmap == nil {
|
||||
return nil
|
||||
}
|
||||
return badtype("map", mapping)
|
||||
}
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeMap(rv.Type()))
|
||||
}
|
||||
for k, v := range tmap {
|
||||
md.decoded[md.context.add(k).String()] = true
|
||||
md.context = append(md.context, k)
|
||||
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
||||
if err := md.unify(v, rvval); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
|
||||
rvkey.SetString(k)
|
||||
rv.SetMapIndex(rvkey, rvval)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
}
|
||||
if l := datav.Len(); l != rv.Len() {
|
||||
return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
}
|
||||
n := datav.Len()
|
||||
if rv.IsNil() || rv.Cap() < n {
|
||||
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
|
||||
}
|
||||
rv.SetLen(n)
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
||||
l := data.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
|
||||
if _, ok := data.(time.Time); ok {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
return badtype("time.Time", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
if s, ok := data.(string); ok {
|
||||
rv.SetString(s)
|
||||
return nil
|
||||
}
|
||||
return badtype("string", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(float64); ok {
|
||||
switch rv.Kind() {
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
rv.SetFloat(num)
|
||||
default:
|
||||
panic("bug")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("float", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(int64); ok {
|
||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("value %d is out of range for int8", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("value %d is out of range for int16", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("value %d is out of range for int32", num)
|
||||
}
|
||||
}
|
||||
rv.SetInt(num)
|
||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
||||
unum := uint64(num)
|
||||
switch rv.Kind() {
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("value %d is out of range for uint8", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("value %d is out of range for uint16", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("value %d is out of range for uint32", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("integer", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
if b, ok := data.(bool); ok {
|
||||
rv.SetBool(b)
|
||||
return nil
|
||||
}
|
||||
return badtype("boolean", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
|
||||
var s string
|
||||
switch sdata := data.(type) {
|
||||
case TextMarshaler:
|
||||
text, err := sdata.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = string(text)
|
||||
case fmt.Stringer:
|
||||
s = sdata.String()
|
||||
case string:
|
||||
s = sdata
|
||||
case bool:
|
||||
s = fmt.Sprintf("%v", sdata)
|
||||
case int64:
|
||||
s = fmt.Sprintf("%d", sdata)
|
||||
case float64:
|
||||
s = fmt.Sprintf("%f", sdata)
|
||||
default:
|
||||
return badtype("primitive (string-like)", data)
|
||||
}
|
||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
func rvalue(v interface{}) reflect.Value {
|
||||
return indirect(reflect.ValueOf(v))
|
||||
}
|
||||
|
||||
// indirect returns the value pointed to by a pointer.
|
||||
// Pointers are followed until the value is not a pointer.
|
||||
// New values are allocated for each nil pointer.
|
||||
//
|
||||
// An exception to this rule is if the value satisfies an interface of
|
||||
// interest to us (like encoding.TextUnmarshaler).
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanSet() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
return indirect(reflect.Indirect(v))
|
||||
}
|
||||
|
||||
func isUnifiable(rv reflect.Value) bool {
|
||||
if rv.CanSet() {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func e(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("toml: "+format, args...)
|
||||
}
|
||||
|
||||
func badtype(expected string, data interface{}) error {
|
||||
return e("cannot load TOML value of type %T into a Go %s", data, expected)
|
||||
}
|
18
vendor/github.com/BurntSushi/toml/decode_go116.go
generated
vendored
Normal file
18
vendor/github.com/BurntSushi/toml/decode_go116.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// +build go1.16
|
||||
|
||||
package toml
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
)
|
||||
|
||||
// DecodeFS is just like Decode, except it will automatically read the contents
|
||||
// of the file at `path` from a fs.FS instance.
|
||||
func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
|
||||
fp, err := fsys.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
defer fp.Close()
|
||||
return NewDecoder(fp).Decode(v)
|
||||
}
|
123
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
Normal file
123
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
package toml
|
||||
|
||||
import "strings"
|
||||
|
||||
// MetaData allows access to meta information about TOML data that may not be
|
||||
// inferable via reflection. In particular, whether a key has been defined and
|
||||
// the TOML type of a key.
|
||||
type MetaData struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
keys []Key
|
||||
decoded map[string]bool
|
||||
context Key // Used only during decoding.
|
||||
}
|
||||
|
||||
// IsDefined reports if the key exists in the TOML data.
|
||||
//
|
||||
// The key should be specified hierarchically, for example to access the TOML
|
||||
// key "a.b.c" you would use:
|
||||
//
|
||||
// IsDefined("a", "b", "c")
|
||||
//
|
||||
// IsDefined will return false if an empty key given. Keys are case sensitive.
|
||||
func (md *MetaData) IsDefined(key ...string) bool {
|
||||
if len(key) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var hash map[string]interface{}
|
||||
var ok bool
|
||||
var hashOrVal interface{} = md.mapping
|
||||
for _, k := range key {
|
||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
||||
return false
|
||||
}
|
||||
if hashOrVal, ok = hash[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Type returns a string representation of the type of the key specified.
|
||||
//
|
||||
// Type will return the empty string if given an empty key or a key that does
|
||||
// not exist. Keys are case sensitive.
|
||||
func (md *MetaData) Type(key ...string) string {
|
||||
fullkey := strings.Join(key, ".")
|
||||
if typ, ok := md.types[fullkey]; ok {
|
||||
return typ.typeString()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
|
||||
// values of this type.
|
||||
type Key []string
|
||||
|
||||
func (k Key) String() string { return strings.Join(k, ".") }
|
||||
|
||||
func (k Key) maybeQuotedAll() string {
|
||||
var ss []string
|
||||
for i := range k {
|
||||
ss = append(ss, k.maybeQuoted(i))
|
||||
}
|
||||
return strings.Join(ss, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuoted(i int) string {
|
||||
if k[i] == "" {
|
||||
return `""`
|
||||
}
|
||||
quote := false
|
||||
for _, c := range k[i] {
|
||||
if !isBareKeyChar(c) {
|
||||
quote = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if quote {
|
||||
return `"` + quotedReplacer.Replace(k[i]) + `"`
|
||||
}
|
||||
return k[i]
|
||||
}
|
||||
|
||||
func (k Key) add(piece string) Key {
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
return newKey
|
||||
}
|
||||
|
||||
// Keys returns a slice of every key in the TOML data, including key groups.
|
||||
//
|
||||
// Each key is itself a slice, where the first element is the top of the
|
||||
// hierarchy and the last is the most specific. The list will have the same
|
||||
// order as the keys appeared in the TOML data.
|
||||
//
|
||||
// All keys returned are non-empty.
|
||||
func (md *MetaData) Keys() []Key {
|
||||
return md.keys
|
||||
}
|
||||
|
||||
// Undecoded returns all keys that have not been decoded in the order in which
|
||||
// they appear in the original TOML document.
|
||||
//
|
||||
// This includes keys that haven't been decoded because of a Primitive value.
|
||||
// Once the Primitive value is decoded, the keys will be considered decoded.
|
||||
//
|
||||
// Also note that decoding into an empty interface will result in no decoding,
|
||||
// and so no keys will be considered decoded.
|
||||
//
|
||||
// In this sense, the Undecoded keys correspond to keys in the TOML document
|
||||
// that do not have a concrete type in your representation.
|
||||
func (md *MetaData) Undecoded() []Key {
|
||||
undecoded := make([]Key, 0, len(md.keys))
|
||||
for _, key := range md.keys {
|
||||
if !md.decoded[key.String()] {
|
||||
undecoded = append(undecoded, key)
|
||||
}
|
||||
}
|
||||
return undecoded
|
||||
}
|
33
vendor/github.com/BurntSushi/toml/deprecated.go
generated
vendored
Normal file
33
vendor/github.com/BurntSushi/toml/deprecated.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"io"
|
||||
)
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use the identical encoding.TextMarshaler instead. It is defined here to
|
||||
// support Go 1.1 and older.
|
||||
type TextMarshaler encoding.TextMarshaler
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use the identical encoding.TextUnmarshaler instead. It is defined here to
|
||||
// support Go 1.1 and older.
|
||||
type TextUnmarshaler encoding.TextUnmarshaler
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use MetaData.PrimitiveDecode instead.
|
||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md := MetaData{decoded: make(map[string]bool)}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use NewDecoder(reader).Decode(&v) instead.
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
|
||||
return NewDecoder(r).Decode(v)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user