*: add complete CNI support

This commit enables Kilo to work as an independent networking provider.
This is done by leveraging CNI. Kilo brings the necessary CNI plugins to
operate and takes care of all networking.

Add-on compatibility for Calico, Flannel, etc, will be re-introduced
shortly.
This commit is contained in:
Lucas Servén Marín 2019-05-07 01:49:55 +02:00
parent 72991949ac
commit b3a3c37e0a
No known key found for this signature in database
GPG Key ID: 586FEAF680DA74AD
12 changed files with 439 additions and 77 deletions

View File

@ -1,6 +1,12 @@
FROM alpine AS cni
RUN apk add --no-cache curl && \
curl -Lo cni.tar.gz https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz && \
tar -xf cni.tar.gz
FROM alpine
MAINTAINER squat <lserven@gmail.com>
RUN echo "@testing http://nl.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
apk add --no-cache ipset iptables wireguard-tools@testing
COPY --from=cni bridge host-local loopback portmap /opt/cni/bin/
COPY bin/kg /opt/bin/
ENTRYPOINT ["/opt/bin/kg"]

View File

@ -18,7 +18,7 @@ Kilo's design allows clients to VPN to a cluster in order to securely access ser
Kilo uses [WireGuard](https://www.wireguard.com/), a performant and secure VPN, to create a mesh between the different logical locations in a cluster.
The Kilo agent, `kg`, runs on every node in the cluster, setting up the public and private keys for the VPN as well as the necessary rules to route packets between locations.
Kilo can operate as an add-on complimenting the cluster-networking solution currently installed on a cluster.
Kilo can operate both as a complete, independent networking provider as well as an add-on complimenting the cluster-networking solution currently installed on a cluster.
This means that if a cluster uses, for example, Calico for networking, Kilo can be installed on top to enable pools of nodes in different locations to join; Kilo will take care of the network between locations, while Calico will take care of the network within locations.
## Installing on Kubernetes

View File

@ -75,6 +75,8 @@ var (
// Main is the principal function for the binary, wrapped only by `main` for convenience.
func Main() error {
backend := flag.String("backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends))
cni := flag.Bool("cni", true, "Should Kilo manage the node's CNI configuration.")
cniPath := flag.String("cni-path", mesh.DefaultCNIPath, "Path to CNI config.")
encapsulate := flag.String("encapsulate", string(mesh.AlwaysEncapsulate), fmt.Sprintf("When should Kilo encapsulate packets within a location. Possible values: %s", availableEncapsulations))
granularity := flag.String("mesh-granularity", string(mesh.DataCenterGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", availableGranularities))
kubeconfig := flag.String("kubeconfig", "", "Path to kubeconfig.")
@ -159,7 +161,7 @@ func Main() error {
return fmt.Errorf("backend %v unknown; possible values are: %s", *backend, availableBackends)
}
m, err := mesh.New(b, e, gr, *hostname, uint32(port), s, *local, log.With(logger, "component", "kilo"))
m, err := mesh.New(b, e, gr, *hostname, uint32(port), s, *local, *cni, *cniPath, log.With(logger, "component", "kilo"))
if err != nil {
return fmt.Errorf("failed to create Kilo mesh: %v", err)
}

View File

@ -59,3 +59,10 @@ for ip in $(kgctl showconf peer $PEER | grep AllowedIPs | cut -f 3- -d ' ' | tr
sudo ip route add $ip dev $IFACE
done
```
Once the routes are in place, the connection to the cluster can be tested.
For example, try connecting to the API server:
```shell
curl -k https://10.0.27.179:6443
```

View File

@ -1,4 +1,36 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
data:
cni-conf.json: |
{
"cniVersion":"0.3.1",
"name":"kilo",
"plugins":[
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"ipam":{
"type":"host-local"
}
},
{
"type":"portmap",
"snat":true,
"capabilities":{
"portMappings":true
}
}
]
}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kilo
@ -72,6 +104,36 @@ spec:
mountPath: /etc/kubernetes/kubeconfig
readOnly: true
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
initContainers:
- name: install-cni
image: squat/kilo
command:
- /bin/sh
- -c
- set -e -x;
cp /opt/cni/bin/* /host/opt/cni/bin/;
TMP_CONF="$CNI_CONF_NAME".tmp;
echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;
rm -f /host/etc/cni/net.d/*;
mv $TMP_CONF /host/etc/cni/net.d/$CNI_CONF_NAME
env:
- name: CNI_CONF_NAME
value: 10-kilo.conflist
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kilo
key: cni-conf.json
volumeMounts:
- name: cni-bin-dir
mountPath: /host/opt/cni/bin
- name: cni-conf-dir
mountPath: /host/etc/cni/net.d
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
@ -80,3 +142,9 @@ spec:
- name: kubeconfig
hostPath:
path: /etc/kubernetes/kubeconfig
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-conf-dir
hostPath:
path: /etc/kubernetes/cni/net.d

View File

@ -1,4 +1,36 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
data:
cni-conf.json: |
{
"cniVersion":"0.3.1",
"name":"kilo",
"plugins":[
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"ipam":{
"type":"host-local"
}
},
{
"type":"portmap",
"snat":true,
"capabilities":{
"portMappings":true
}
}
]
}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kilo
@ -68,9 +100,36 @@ spec:
securityContext:
privileged: true
volumeMounts:
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kubeconfig
mountPath: /etc/kubernetes
readOnly: true
initContainers:
- name: install-cni
image: squat/kilo
command:
- /bin/sh
- -c
- set -e -x;
cp /opt/cni/bin/* /host/opt/cni/bin/;
TMP_CONF="$CNI_CONF_NAME".tmp;
echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;
rm -f /host/etc/cni/net.d/*;
mv $TMP_CONF /host/etc/cni/net.d/$CNI_CONF_NAME
env:
- name: CNI_CONF_NAME
value: 10-kilo.conflist
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kilo
key: cni-conf.json
volumeMounts:
- name: cni-bin-dir
mountPath: /host/opt/cni/bin
- name: cni-conf-dir
mountPath: /host/etc/cni/net.d
tolerations:
- effect: NoSchedule
operator: Exists
@ -83,3 +142,9 @@ spec:
items:
- key: kubeconfig.conf
path: kubeconfig
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-conf-dir
hostPath:
path: /etc/kubernetes/cni/net.d

View File

@ -1,4 +1,36 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
data:
cni-conf.json: |
{
"cniVersion":"0.3.1",
"name":"kilo",
"plugins":[
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"ipam":{
"type":"host-local"
}
},
{
"type":"portmap",
"snat":true,
"capabilities":{
"portMappings":true
}
}
]
}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kilo
@ -68,9 +100,36 @@ spec:
securityContext:
privileged: true
volumeMounts:
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kubeconfig
mountPath: /etc/kubernetes
readOnly: true
initContainers:
- name: install-cni
image: squat/kilo
command:
- /bin/sh
- -c
- set -e -x;
cp /opt/cni/bin/* /host/opt/cni/bin/;
TMP_CONF="$CNI_CONF_NAME".tmp;
echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;
rm -f /host/etc/cni/net.d/*;
mv $TMP_CONF /host/etc/cni/net.d/$CNI_CONF_NAME
env:
- name: CNI_CONF_NAME
value: 10-kilo.conflist
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kilo
key: cni-conf.json
volumeMounts:
- name: cni-bin-dir
mountPath: /host/opt/cni/bin
- name: cni-conf-dir
mountPath: /host/etc/cni/net.d
tolerations:
- effect: NoSchedule
operator: Exists
@ -80,3 +139,9 @@ spec:
- name: kubeconfig
configMap:
name: kubeconfig-in-cluster
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-conf-dir
hostPath:
path: /etc/kubernetes/cni/net.d

View File

@ -245,10 +245,15 @@ func (c *Controller) CleanUp() error {
// when traffic between nodes must be encapsulated.
func EncapsulateRules(nodes []*net.IPNet) []Rule {
var rules []Rule
rules = append(rules, &chain{"filter", "KILO-IPIP", nil})
rules = append(rules, &rule{"filter", "INPUT", []string{"-m", "comment", "--comment", "Kilo: jump to IPIP chain", "-p", "4", "-j", "KILO-IPIP"}, nil})
for _, n := range nodes {
// Accept encapsulated traffic from peers.
rules = append(rules, &rule{"filter", "INPUT", []string{"-m", "comment", "--comment", "Kilo: allow IPIP traffic", "-s", n.IP.String(), "-p", "4", "-j", "ACCEPT"}, nil})
rules = append(rules, &rule{"filter", "KILO-IPIP", []string{"-m", "comment", "--comment", "Kilo: allow IPIP traffic", "-s", n.IP.String(), "-j", "ACCEPT"}, nil})
}
// Drop all other IPIP traffic.
rules = append(rules, &rule{"filter", "INPUT", []string{"-m", "comment", "--comment", "Kilo: reject other IPIP traffic", "-p", "4", "-j", "DROP"}, nil})
return rules
}
@ -268,20 +273,27 @@ func ForwardRules(subnets ...*net.IPNet) []Rule {
}
// MasqueradeRules returns a set of iptables rules that are necessary
// when traffic must be masqueraded for Kilo.
func MasqueradeRules(subnet, localPodSubnet *net.IPNet, remotePodSubnet []*net.IPNet) []Rule {
// to NAT traffic from the local Pod subnet to the Internet and out of the Kilo interface.
func MasqueradeRules(kilo, private, localPodSubnet *net.IPNet, remotePodSubnet, peers []*net.IPNet) []Rule {
var rules []Rule
rules = append(rules, &chain{"mangle", "KILO-MARK", nil})
rules = append(rules, &rule{"mangle", "PREROUTING", []string{"-m", "comment", "--comment", "Kilo: jump to mark chain", "-i", "kilo+", "-j", "KILO-MARK"}, nil})
rules = append(rules, &rule{"mangle", "KILO-MARK", []string{"-m", "comment", "--comment", "Kilo: do not mark packets destined for the local Pod subnet", "-d", localPodSubnet.String(), "-j", "RETURN"}, nil})
if subnet != nil {
rules = append(rules, &rule{"mangle", "KILO-MARK", []string{"-m", "comment", "--comment", "Kilo: do not mark packets destined for the local private subnet", "-d", subnet.String(), "-j", "RETURN"}, nil})
}
rules = append(rules, &rule{"mangle", "KILO-MARK", []string{"-m", "comment", "--comment", "Kilo: remaining packets should be marked for NAT", "-j", "MARK", "--set-xmark", "0x1107/0x1107"}, nil})
rules = append(rules, &rule{"nat", "POSTROUTING", []string{"-m", "comment", "--comment", "Kilo: NAT packets from Kilo interface", "-m", "mark", "--mark", "0x1107/0x1107", "-j", "MASQUERADE"}, nil})
rules = append(rules, &chain{"nat", "KILO-NAT", nil})
// NAT packets from Kilo interface.
rules = append(rules, &rule{"mangle", "PREROUTING", []string{"-m", "comment", "--comment", "Kilo: jump to mark chain", "-i", "kilo+", "-j", "MARK", "--set-xmark", "0x1107/0x1107"}, nil})
rules = append(rules, &rule{"nat", "POSTROUTING", []string{"-m", "comment", "--comment", "Kilo: NAT packets from Kilo interface", "-m", "mark", "--mark", "0x1107/0x1107", "-j", "KILO-NAT"}, nil})
// NAT packets from pod subnet.
rules = append(rules, &rule{"nat", "POSTROUTING", []string{"-m", "comment", "--comment", "Kilo: jump to NAT chain", "-s", localPodSubnet.String(), "-j", "KILO-NAT"}, nil})
rules = append(rules, &rule{"nat", "KILO-NAT", []string{"-m", "comment", "--comment", "Kilo: do not NAT packets destined for the local Pod subnet", "-d", localPodSubnet.String(), "-j", "RETURN"}, nil})
rules = append(rules, &rule{"nat", "KILO-NAT", []string{"-m", "comment", "--comment", "Kilo: do not NAT packets destined for the Kilo subnet", "-d", kilo.String(), "-j", "RETURN"}, nil})
rules = append(rules, &rule{"nat", "KILO-NAT", []string{"-m", "comment", "--comment", "Kilo: do not NAT packets destined for the local private IP", "-d", private.String(), "-j", "RETURN"}, nil})
for _, r := range remotePodSubnet {
rules = append(rules, &rule{"nat", "POSTROUTING", []string{"-m", "comment", "--comment", "Kilo: NAT packets from local pod subnet to remote pod subnets", "-s", localPodSubnet.String(), "-d", r.String(), "-j", "MASQUERADE"}, nil})
rules = append(rules, &rule{"nat", "KILO-NAT", []string{"-m", "comment", "--comment", "Kilo: do not NAT packets from local pod subnet to remote pod subnets", "-s", localPodSubnet.String(), "-d", r.String(), "-j", "RETURN"}, nil})
}
for _, p := range peers {
rules = append(rules, &rule{"nat", "KILO-NAT", []string{"-m", "comment", "--comment", "Kilo: do not NAT packets from local pod subnet to peers", "-s", localPodSubnet.String(), "-d", p.String(), "-j", "RETURN"}, nil})
}
rules = append(rules, &rule{"nat", "KILO-NAT", []string{"-m", "comment", "--comment", "Kilo: NAT remaining packets", "-j", "MASQUERADE"}, nil})
return rules
}

View File

@ -142,7 +142,7 @@ func (nb *nodeBackend) Init(stop <-chan struct{}) error {
if ok := cache.WaitForCacheSync(stop, func() bool {
return nb.informer.HasSynced()
}); !ok {
return errors.New("failed to start sync node cache")
return errors.New("failed to sync node cache")
}
nb.informer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
@ -357,7 +357,7 @@ func (pb *peerBackend) Init(stop <-chan struct{}) error {
if ok := cache.WaitForCacheSync(stop, func() bool {
return pb.informer.HasSynced()
}); !ok {
return errors.New("failed to start sync peer cache")
return errors.New("failed to sync peer cache")
}
pb.informer.AddEventHandler(
cache.ResourceEventHandlerFuncs{

142
pkg/mesh/cni.go Normal file
View File

@ -0,0 +1,142 @@
// Copyright 2019 the Kilo authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mesh
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net"
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/types"
ipamallocator "github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator"
"github.com/go-kit/kit/log/level"
)
// updateCNIConfig will try to update the local node's CNI config.
func (m *Mesh) updateCNIConfig() {
m.mu.Lock()
n := m.nodes[m.hostname]
m.mu.Unlock()
if n == nil || n.Subnet == nil {
level.Debug(m.logger).Log("msg", "local node does not have a valid subnet assigned")
return
}
cidr, err := getCIDRFromCNI(m.cniPath)
if err != nil {
level.Warn(m.logger).Log("msg", "failed to get CIDR from CNI file; overwriting it", "err", err.Error())
}
if ipNetsEqual(cidr, n.Subnet) {
return
}
if cidr == nil {
level.Info(m.logger).Log("msg", "CIDR in CNI file is empty")
} else {
level.Info(m.logger).Log("msg", "CIDR in CNI file is not empty; overwriting", "old", cidr.String(), "new", n.Subnet.String())
}
if err := setCIDRInCNI(m.cniPath, n.Subnet); err != nil {
level.Warn(m.logger).Log("msg", "failed to set CIDR in CNI file", "err", err.Error())
}
}
// getCIDRFromCNI finds the CIDR for the node from the CNI configuration file.
func getCIDRFromCNI(path string) (*net.IPNet, error) {
var cidr net.IPNet
var ic *ipamallocator.IPAMConfig
cl, err := libcni.ConfListFromFile(path)
if err != nil {
return nil, fmt.Errorf("failed to read CNI config list file: %v", err)
}
for _, conf := range cl.Plugins {
if conf.Network.IPAM.Type != "" {
ic, _, err = ipamallocator.LoadIPAMConfig(conf.Bytes, "")
if err != nil {
return nil, fmt.Errorf("failed to read IPAM config from CNI config list file: %v", err)
}
for _, set := range ic.Ranges {
for _, r := range set {
cidr = net.IPNet(r.Subnet)
if (&cidr).String() == "" {
continue
}
// Return the first subnet we find.
return &cidr, nil
}
}
}
}
return nil, nil
}
// setCIDRInCNI sets the CIDR allocated to the node in the CNI configuration file.
func setCIDRInCNI(path string, cidr *net.IPNet) error {
f, err := ioutil.ReadFile(path)
if err != nil {
return fmt.Errorf("failed to read CNI config list file: %v", err)
}
raw := make(map[string]interface{})
if err := json.Unmarshal(f, &raw); err != nil {
return fmt.Errorf("failed to parse CNI config file: %v", err)
}
if _, ok := raw["plugins"]; !ok {
return errors.New("failed to find plugins in CNI config file")
}
plugins, ok := raw["plugins"].([]interface{})
if !ok {
return errors.New("failed to parse plugins in CNI config file")
}
var found bool
for i := range plugins {
p, ok := plugins[i].(map[string]interface{})
if !ok {
return fmt.Errorf("failed to parse plugin %d in CNI config file", i)
}
if _, ok := p["ipam"]; !ok {
continue
}
ipam, ok := p["ipam"].(map[string]interface{})
if !ok {
return errors.New("failed to parse IPAM configuration in CNI config file")
}
ipam["ranges"] = []ipamallocator.RangeSet{
{
{
Subnet: types.IPNet(*cidr),
},
},
}
found = true
}
if !found {
return errors.New("failed to set subnet CIDR in CNI config file; file appears invalid")
}
buf, err := json.Marshal(raw)
if err != nil {
return fmt.Errorf("failed to marshal CNI config: %v", err)
}
if err := ioutil.WriteFile(path, buf, 0644); err != nil {
return fmt.Errorf("failed to write CNI config file to disk: %v", err)
}
return nil
}

View File

@ -29,7 +29,6 @@ import (
"github.com/vishvananda/netlink"
"github.com/squat/kilo/pkg/iproute"
"github.com/squat/kilo/pkg/ipset"
"github.com/squat/kilo/pkg/iptables"
"github.com/squat/kilo/pkg/route"
"github.com/squat/kilo/pkg/wireguard"
@ -46,6 +45,8 @@ const (
ConfPath = KiloPath + "/conf"
// DefaultKiloPort is the default UDP port Kilo uses.
DefaultKiloPort = 51820
// DefaultCNIPath is the default path to the CNI config file.
DefaultCNIPath = "/etc/cni/net.d/10-kilo.conflist"
)
// Granularity represents the abstraction level at which the network
@ -171,12 +172,13 @@ type PeerBackend interface {
// Mesh is able to create Kilo network meshes.
type Mesh struct {
Backend
cni bool
cniPath string
encapsulate Encapsulate
externalIP *net.IPNet
granularity Granularity
hostname string
internalIP *net.IPNet
ipset *ipset.Set
ipTables *iptables.Controller
kiloIface int
key []byte
@ -205,7 +207,7 @@ type Mesh struct {
}
// New returns a new Mesh instance.
func New(backend Backend, encapsulate Encapsulate, granularity Granularity, hostname string, port uint32, subnet *net.IPNet, local bool, logger log.Logger) (*Mesh, error) {
func New(backend Backend, encapsulate Encapsulate, granularity Granularity, hostname string, port uint32, subnet *net.IPNet, local, cni bool, cniPath string, logger log.Logger) (*Mesh, error) {
if err := os.MkdirAll(KiloPath, 0700); err != nil {
return nil, fmt.Errorf("failed to create directory to store configuration: %v", err)
}
@ -259,28 +261,27 @@ func New(backend Backend, encapsulate Encapsulate, granularity Granularity, host
}
return &Mesh{
Backend: backend,
cni: cni,
cniPath: cniPath,
encapsulate: encapsulate,
externalIP: publicIP,
granularity: granularity,
hostname: hostname,
internalIP: privateIP,
// This is a patch until Calico supports
// other hosts adding IPIP iptables rules.
ipset: ipset.New("cali40all-hosts-net"),
ipTables: ipTables,
kiloIface: kiloIface,
nodes: make(map[string]*Node),
peers: make(map[string]*Peer),
port: port,
priv: private,
privIface: privIface,
pub: public,
pubIface: pubIface,
local: local,
stop: make(chan struct{}),
subnet: subnet,
table: route.NewTable(),
tunlIface: tunlIface,
ipTables: ipTables,
kiloIface: kiloIface,
nodes: make(map[string]*Node),
peers: make(map[string]*Peer),
port: port,
priv: private,
privIface: privIface,
pub: public,
pubIface: pubIface,
local: local,
stop: make(chan struct{}),
subnet: subnet,
table: route.NewTable(),
tunlIface: tunlIface,
errorCounter: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "kilo_errors_total",
Help: "Number of errors that occurred while administering the mesh.",
@ -309,10 +310,6 @@ func (m *Mesh) Run() error {
if err := m.Peers().Init(m.stop); err != nil {
return fmt.Errorf("failed to initialize peer backend: %v", err)
}
ipsetErrors, err := m.ipset.Run(m.stop)
if err != nil {
return fmt.Errorf("failed to watch for ipset updates: %v", err)
}
ipTablesErrors, err := m.ipTables.Run(m.stop)
if err != nil {
return fmt.Errorf("failed to watch for IP tables updates: %v", err)
@ -325,7 +322,6 @@ func (m *Mesh) Run() error {
for {
var err error
select {
case err = <-ipsetErrors:
case err = <-ipTablesErrors:
case err = <-routeErrors:
case <-m.stop:
@ -351,6 +347,9 @@ func (m *Mesh) Run() error {
m.syncPeers(pe)
case <-t.C:
m.checkIn()
if m.cni {
m.updateCNIConfig()
}
m.syncEndpoints()
m.applyTopology()
t.Reset(resyncPeriod)
@ -585,45 +584,40 @@ func (m *Mesh) applyTopology() {
m.errorCounter.WithLabelValues("apply").Inc()
return
}
var private *net.IPNet
// If we are not encapsulating packets to the local private network,
// then pass the private IP to add an exception to the NAT rule.
if m.encapsulate != AlwaysEncapsulate {
private = t.privateIP
}
rules := iptables.MasqueradeRules(private, m.nodes[m.hostname].Subnet, t.RemoteSubnets())
rules = append(rules, iptables.ForwardRules(m.subnet)...)
rules := iptables.ForwardRules(m.subnet)
var peerCIDRs []*net.IPNet
for _, p := range m.peers {
rules = append(rules, iptables.ForwardRules(p.AllowedIPs...)...)
peerCIDRs = append(peerCIDRs, p.AllowedIPs...)
}
rules = append(rules, iptables.MasqueradeRules(m.subnet, oneAddressCIDR(t.privateIP.IP), m.nodes[m.hostname].Subnet, t.RemoteSubnets(), peerCIDRs)...)
// If we are handling local routes, ensure the local
// tunnel has an IP address and IPIP traffic is allowed.
if m.encapsulate != NeverEncapsulate && m.local {
var cidrs []*net.IPNet
for _, s := range t.segments {
if s.location == m.nodes[m.hostname].Location {
for i := range s.privateIPs {
cidrs = append(cidrs, oneAddressCIDR(s.privateIPs[i]))
}
break
}
}
rules = append(rules, iptables.EncapsulateRules(cidrs)...)
// If we are handling local routes, ensure the local
// tunnel has an IP address.
if err := iproute.SetAddress(m.tunlIface, oneAddressCIDR(newAllocator(*m.nodes[m.hostname].Subnet).next().IP)); err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
}
if err := m.ipTables.Set(rules); err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
if m.encapsulate != NeverEncapsulate {
var peers []net.IP
for _, s := range t.segments {
if s.location == m.nodes[m.hostname].Location {
peers = s.privateIPs
break
}
}
if err := m.ipset.Set(peers); err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
// If we are handling local routes, ensure the local
// tunnel has an IP address.
if m.local {
if err := iproute.SetAddress(m.tunlIface, oneAddressCIDR(newAllocator(*m.nodes[m.hostname].Subnet).next().IP)); err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
}
}
if t.leader {
if err := iproute.SetAddress(m.kiloIface, t.wireGuardCIDR); err != nil {
level.Error(m.logger).Log("error", err)
@ -720,10 +714,6 @@ func (m *Mesh) cleanUp() {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up peer backend: %v", err))
m.errorCounter.WithLabelValues("cleanUp").Inc()
}
if err := m.ipset.CleanUp(); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up ipset: %v", err))
m.errorCounter.WithLabelValues("cleanUp").Inc()
}
}
func isSelf(hostname string, node *Node) bool {
@ -781,6 +771,12 @@ func ipNetsEqual(a, b *net.IPNet) bool {
}
func subnetsEqual(a, b *net.IPNet) bool {
if a == nil && b == nil {
return true
}
if (a != nil) != (b != nil) {
return false
}
if a.Mask.String() != b.Mask.String() {
return false
}

View File

@ -51,7 +51,6 @@ type Topology struct {
}
type segment struct {
// Some fields need to be exported so that the template can read them.
allowedIPs []*net.IPNet
endpoint net.IP
key []byte