manifests,pkg/encapsulation: Flannel compatibility

This commit adds basic support to run in compatibility mode with
Flannel. This allows clusters running Flannel as their principal
networking solution to leverage some advances Kilo features. In certain
Flannel setups, the clusters can even leverage muti-cloud. For this, the
cluster needs to either run in a full mesh, or Flannel needs to use the
API server's external IP address.
This commit is contained in:
Lucas Servén Marín 2019-05-14 01:01:53 +02:00
parent cd6eeeb1e7
commit 81d6077fc2
No known key found for this signature in database
GPG Key ID: 586FEAF680DA74AD
12 changed files with 582 additions and 57 deletions

View File

@ -20,7 +20,7 @@ Kilo uses [WireGuard](https://www.wireguard.com/), a performant and secure VPN,
The Kilo agent, `kg`, runs on every node in the cluster, setting up the public and private keys for the VPN as well as the necessary rules to route packets between locations.
Kilo can operate both as a complete, independent networking provider as well as an add-on complimenting the cluster-networking solution currently installed on a cluster.
This means that if a cluster uses, for example, Calico for networking, Kilo can be installed on top to enable pools of nodes in different locations to join; Kilo will take care of the network between locations, while Calico will take care of the network within locations.
This means that if a cluster uses, for example, Flannel for networking, Kilo can be installed on top to enable pools of nodes in different locations to join; Kilo will take care of the network between locations, while Flannel will take care of the network within locations.
## Installing on Kubernetes
@ -88,6 +88,20 @@ To run Kilo on k3s:
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/master/manifests/kilo-k3s.yaml
```
## Add-on Mode
Administrators of existing clusters who do not want to swap out the existing networking solution can run Kilo in add-on mode.
In this mode, Kilo will add advanced features to the cluster, such as VPN and multi-cluster services, while delegating CNI management and local networking to the cluster's current networking provider.
Currently, Kilo, supports running on top of Flannel.
For example, to run Kilo on Typhoon cluster running Flannel:
```shell
kubectl apply -f https://raw.githubusercontent.com/squat/kilo/master/manifests/kilo-typhoon-flannel.yaml
```
[See the manifests directory for more examples](./manifests).
## VPN
Kilo enables peers outside of a Kubernetes cluster to also connect to the VPN, allowing cluster applications to securely access external services and permitting developers and support to securely debug cluster resources.

View File

@ -54,6 +54,9 @@ var (
availableBackends = strings.Join([]string{
k8s.Backend,
}, ", ")
availableCompatibilities = strings.Join([]string{
"flannel",
}, ", ")
availableEncapsulations = strings.Join([]string{
string(encapsulation.Never),
string(encapsulation.CrossSubnet),
@ -78,6 +81,7 @@ func Main() error {
backend := flag.String("backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends))
cni := flag.Bool("cni", true, "Should Kilo manage the node's CNI configuration.")
cniPath := flag.String("cni-path", mesh.DefaultCNIPath, "Path to CNI config.")
compatibility := flag.String("compatibility", "", fmt.Sprintf("Should Kilo run in compatibility mode? Possible values: %s", availableCompatibilities))
encapsulate := flag.String("encapsulate", string(encapsulation.Always), fmt.Sprintf("When should Kilo encapsulate packets within a location. Possible values: %s", availableEncapsulations))
granularity := flag.String("mesh-granularity", string(mesh.LogicalGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", availableGranularities))
kubeconfig := flag.String("kubeconfig", "", "Path to kubeconfig.")
@ -130,7 +134,6 @@ func Main() error {
logger = log.With(logger, "ts", log.DefaultTimestampUTC)
logger = log.With(logger, "caller", log.DefaultCaller)
var enc encapsulation.Interface
e := encapsulation.Strategy(*encapsulate)
switch e {
case encapsulation.Never:
@ -139,7 +142,14 @@ func Main() error {
default:
return fmt.Errorf("encapsulation %v unknown; possible values are: %s", *encapsulate, availableEncapsulations)
}
enc = encapsulation.NewIPIP(e)
var enc encapsulation.Encapsulator
switch *compatibility {
case "flannel":
enc = encapsulation.NewFlannel(e)
default:
enc = encapsulation.NewIPIP(e)
}
gr := mesh.Granularity(*granularity)
switch gr {

View File

@ -0,0 +1,90 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kilo
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kilo
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- patch
- watch
- apiGroups:
- kilo.squat.ai
resources:
- peers
verbs:
- list
- update
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kilo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kilo
subjects:
- kind: ServiceAccount
name: kilo
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
spec:
template:
metadata:
labels:
app.kubernetes.io/name: kilo
spec:
serviceAccountName: kilo
hostNetwork: true
containers:
- name: kilo
image: squat/kilo
args:
- --kubeconfig=/etc/kubernetes/kubeconfig
- --cni=false
- --compatibility=flannel
- --local=false
securityContext:
privileged: true
volumeMounts:
- name: kilo-dir
mountPath: /var/lib/kilo
- name: kubeconfig
mountPath: /etc/kubernetes/kubeconfig
readOnly: true
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- name: kilo-dir
hostPath:
path: /var/lib/kilo
- name: kubeconfig
hostPath:
path: /etc/kubernetes/kubeconfig

View File

@ -0,0 +1,90 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kilo
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kilo
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- patch
- watch
- apiGroups:
- kilo.squat.ai
resources:
- peers
verbs:
- list
- update
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kilo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kilo
subjects:
- kind: ServiceAccount
name: kilo
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
spec:
template:
metadata:
labels:
app.kubernetes.io/name: kilo
spec:
serviceAccountName: kilo
hostNetwork: true
containers:
- name: kilo
image: squat/kilo
args:
- --kubeconfig=/etc/kubernetes/kubeconfig
- --cni=false
- --compatibility=flannel
- --local=false
securityContext:
privileged: true
volumeMounts:
- name: kilo-dir
mountPath: /var/lib/kilo
- name: kubeconfig
mountPath: /etc/kubernetes/kubeconfig
readOnly: true
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- name: kilo-dir
hostPath:
path: /var/lib/kilo
- name: kubeconfig
hostPath:
path: /etc/rancher/k3s/k3s.yaml

View File

@ -0,0 +1,93 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kilo
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kilo
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- patch
- watch
- apiGroups:
- kilo.squat.ai
resources:
- peers
verbs:
- list
- update
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kilo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kilo
subjects:
- kind: ServiceAccount
name: kilo
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
spec:
template:
metadata:
labels:
app.kubernetes.io/name: kilo
spec:
serviceAccountName: kilo
hostNetwork: true
containers:
- name: kilo
image: squat/kilo
args:
- --kubeconfig=/etc/kubernetes/kubeconfig
- --cni=false
- --compatibility=flannel
- --local=false
securityContext:
privileged: true
volumeMounts:
- name: kilo-dir
mountPath: /var/lib/kilo
- name: kubeconfig
mountPath: /etc/kubernetes
readOnly: true
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- name: kilo-dir
hostPath:
path: /var/lib/kilo
- name: kubeconfig
configMap:
name: kube-proxy
items:
- key: kubeconfig.conf
path: kubeconfig

View File

@ -0,0 +1,90 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kilo
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kilo
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- patch
- watch
- apiGroups:
- kilo.squat.ai
resources:
- peers
verbs:
- list
- update
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kilo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kilo
subjects:
- kind: ServiceAccount
name: kilo
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
spec:
template:
metadata:
labels:
app.kubernetes.io/name: kilo
spec:
serviceAccountName: kilo
hostNetwork: true
containers:
- name: kilo
image: squat/kilo
args:
- --kubeconfig=/etc/kubernetes/kubeconfig
- --cni=false
- --compatibility=flannel
- --local=false
securityContext:
privileged: true
volumeMounts:
- name: kilo-dir
mountPath: /var/lib/kilo
- name: kubeconfig
mountPath: /etc/kubernetes
readOnly: true
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- name: kilo-dir
hostPath:
path: /var/lib/kilo
- name: kubeconfig
configMap:
name: kubeconfig-in-cluster

View File

@ -0,0 +1,55 @@
// Copyright 2019 the Kilo authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encapsulation
import (
"net"
"github.com/squat/kilo/pkg/iptables"
)
// Strategy identifies which packets within a location should
// be encapsulated.
type Strategy string
const (
// Never indicates that no packets within a location
// should be encapsulated.
Never Strategy = "never"
// CrossSubnet indicates that only packets that
// traverse subnets within a location should be encapsulated.
CrossSubnet Strategy = "crosssubnet"
// Always indicates that all packets within a location
// should be encapsulated.
Always Strategy = "always"
)
// Encapsulator can:
// * configure the encapsulation interface;
// * determine the gateway IP corresponding to a node;
// * get the encapsulation interface index;
// * set the interface IP address;
// * return the required IPTables rules;
// * return the encapsulation strategy; and
// * clean up any changes applied to the backend.
type Encapsulator interface {
CleanUp() error
Gw(net.IP, net.IP, *net.IPNet) net.IP
Index() int
Init(int) error
Rules([]*net.IPNet) []iptables.Rule
Set(*net.IPNet) error
Strategy() Strategy
}

View File

@ -0,0 +1,108 @@
// Copyright 2019 the Kilo authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encapsulation
import (
"fmt"
"net"
"sync"
"github.com/squat/kilo/pkg/iptables"
"github.com/vishvananda/netlink"
)
const flannelDeviceName = "flannel.1"
type flannel struct {
iface int
strategy Strategy
ch chan netlink.LinkUpdate
done chan struct{}
// mu guards updates to the iface field.
mu sync.Mutex
}
// NewFlannel returns an encapsulator that uses Flannel.
func NewFlannel(strategy Strategy) Encapsulator {
return &flannel{
ch: make(chan netlink.LinkUpdate),
done: make(chan struct{}),
strategy: strategy,
}
}
// CleanUp is a no-op.
func (f *flannel) CleanUp() error {
close(f.done)
return nil
}
// Gw returns the correct gateway IP associated with the given node.
func (f *flannel) Gw(_, _ net.IP, subnet *net.IPNet) net.IP {
return subnet.IP
}
// Index returns the index of the Flannel interface.
func (f *flannel) Index() int {
return f.iface
}
// Init finds the Flannel interface index.
func (f *flannel) Init(_ int) error {
if err := netlink.LinkSubscribe(f.ch, f.done); err != nil {
return fmt.Errorf("failed to subscribe to updates to %s: %v", flannelDeviceName, err)
}
go func() {
var lu netlink.LinkUpdate
for {
select {
case lu = <-f.ch:
if lu.Attrs().Name == flannelDeviceName {
f.mu.Lock()
f.iface = lu.Attrs().Index
f.mu.Unlock()
}
case <-f.done:
return
}
}
}()
i, err := netlink.LinkByName(flannelDeviceName)
if _, ok := err.(netlink.LinkNotFoundError); ok {
return nil
}
if err != nil {
return fmt.Errorf("failed to query for Flannel interface: %v", err)
}
f.mu.Lock()
f.iface = i.Attrs().Index
f.mu.Unlock()
return nil
}
// Rules is a no-op.
func (f *flannel) Rules(_ []*net.IPNet) []iptables.Rule {
return nil
}
// Set is a no-op.
func (f *flannel) Set(_ *net.IPNet) error {
return nil
}
// Strategy returns the configured strategy for encapsulation.
func (f *flannel) Strategy() Strategy {
return f.strategy
}

View File

@ -22,45 +22,13 @@ import (
"github.com/squat/kilo/pkg/iptables"
)
// Strategy identifies which packets within a location should
// be encapsulated.
type Strategy string
const (
// Never indicates that no packets within a location
// should be encapsulated.
Never Strategy = "never"
// CrossSubnet indicates that only packets that
// traverse subnets within a location should be encapsulated.
CrossSubnet Strategy = "crosssubnet"
// Always indicates that all packets within a location
// should be encapsulated.
Always Strategy = "always"
)
// Interface can configure
// the encapsulation interface, init itself,
// get the encapsulation interface index,
// set the interface IP address,
// return the required IPTables rules,
// return the encapsulation strategy,
// and clean up any changes applied to the backend.
type Interface interface {
CleanUp() error
Index() int
Init(int) error
Rules([]*net.IPNet) []iptables.Rule
Set(*net.IPNet) error
Strategy() Strategy
}
type ipip struct {
iface int
strategy Strategy
}
// NewIPIP returns an encapsulation that uses IPIP.
func NewIPIP(strategy Strategy) Interface {
// NewIPIP returns an encapsulator that uses IPIP.
func NewIPIP(strategy Strategy) Encapsulator {
return &ipip{strategy: strategy}
}
@ -72,6 +40,11 @@ func (i *ipip) CleanUp() error {
return iproute.RemoveInterface(i.iface)
}
// Gw returns the correct gateway IP associated with the given node.
func (i *ipip) Gw(_, internal net.IP, _ *net.IPNet) net.IP {
return internal
}
// Index returns the index of the IPIP interface.
func (i *ipip) Index() int {
return i.iface

View File

@ -169,7 +169,7 @@ type Mesh struct {
Backend
cni bool
cniPath string
enc encapsulation.Interface
enc encapsulation.Encapsulator
externalIP *net.IPNet
granularity Granularity
hostname string
@ -202,7 +202,7 @@ type Mesh struct {
}
// New returns a new Mesh instance.
func New(backend Backend, enc encapsulation.Interface, granularity Granularity, hostname string, port uint32, subnet *net.IPNet, local, cni bool, cniPath string, logger log.Logger) (*Mesh, error) {
func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularity, hostname string, port uint32, subnet *net.IPNet, local, cni bool, cniPath string, logger log.Logger) (*Mesh, error) {
if err := os.MkdirAll(KiloPath, 0700); err != nil {
return nil, fmt.Errorf("failed to create directory to store configuration: %v", err)
}
@ -245,7 +245,7 @@ func New(backend Backend, enc encapsulation.Interface, granularity Granularity,
}
if enc.Strategy() != encapsulation.Never {
if err := enc.Init(privIface); err != nil {
return nil, fmt.Errorf("failed to initialize encapsulation: %v", err)
return nil, fmt.Errorf("failed to initialize encapsulator: %v", err)
}
}
level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the private IP address", privateIP.String()))
@ -674,7 +674,7 @@ func (m *Mesh) applyTopology() {
}
// We need to add routes last since they may depend
// on the WireGuard interface.
routes := t.Routes(m.kiloIface, m.privIface, m.enc.Index(), m.local, m.enc.Strategy())
routes := t.Routes(m.kiloIface, m.privIface, m.enc.Index(), m.local, m.enc)
if err := m.table.Set(routes); err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
@ -723,7 +723,7 @@ func (m *Mesh) cleanUp() {
m.errorCounter.WithLabelValues("cleanUp").Inc()
}
if err := m.enc.CleanUp(); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up encapsulation: %v", err))
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up encapsulator: %v", err))
m.errorCounter.WithLabelValues("cleanUp").Inc()
}
}

View File

@ -172,14 +172,16 @@ func (t *Topology) RemoteSubnets() []*net.IPNet {
}
// Routes generates a slice of routes for a given Topology.
func (t *Topology) Routes(kiloIface, privIface, tunlIface int, local bool, encapsulate encapsulation.Strategy) []*netlink.Route {
func (t *Topology) Routes(kiloIface, privIface, tunlIface int, local bool, enc encapsulation.Encapsulator) []*netlink.Route {
var routes []*netlink.Route
if !t.leader {
// Find the leader for this segment.
var leader net.IP
// Find the GW for this segment.
// This will be the an IP of the leader.
// In an IPIP encapsulated mesh it is the leader's private IP.
var gw net.IP
for _, segment := range t.segments {
if segment.location == t.location {
leader = segment.privateIPs[segment.leader]
gw = enc.Gw(segment.endpoint, segment.privateIPs[segment.leader], segment.cidrs[segment.leader])
break
}
}
@ -188,10 +190,10 @@ func (t *Topology) Routes(kiloIface, privIface, tunlIface int, local bool, encap
routes = append(routes, encapsulateRoute(&netlink.Route{
Dst: oneAddressCIDR(segment.wireGuardIP),
Flags: int(netlink.FLAG_ONLINK),
Gw: leader,
Gw: gw,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
}, enc.Strategy(), t.privateIP, tunlIface))
// Add routes for the current segment if local is true.
if segment.location == t.location {
if local {
@ -206,7 +208,7 @@ func (t *Topology) Routes(kiloIface, privIface, tunlIface int, local bool, encap
Gw: segment.privateIPs[i],
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
}, enc.Strategy(), t.privateIP, tunlIface))
}
}
continue
@ -216,20 +218,20 @@ func (t *Topology) Routes(kiloIface, privIface, tunlIface int, local bool, encap
routes = append(routes, encapsulateRoute(&netlink.Route{
Dst: segment.cidrs[i],
Flags: int(netlink.FLAG_ONLINK),
Gw: leader,
Gw: gw,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
}, enc.Strategy(), t.privateIP, tunlIface))
// Add routes to the private IPs of nodes in other segments.
// Number of CIDRs and private IPs always match so
// we can reuse the loop.
routes = append(routes, encapsulateRoute(&netlink.Route{
Dst: oneAddressCIDR(segment.privateIPs[i]),
Flags: int(netlink.FLAG_ONLINK),
Gw: leader,
Gw: gw,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
}, enc.Strategy(), t.privateIP, tunlIface))
}
}
// Add routes for the allowed IPs of peers.
@ -238,10 +240,10 @@ func (t *Topology) Routes(kiloIface, privIface, tunlIface int, local bool, encap
routes = append(routes, encapsulateRoute(&netlink.Route{
Dst: peer.AllowedIPs[i],
Flags: int(netlink.FLAG_ONLINK),
Gw: leader,
Gw: gw,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
}, enc.Strategy(), t.privateIP, tunlIface))
}
}
return routes
@ -261,7 +263,7 @@ func (t *Topology) Routes(kiloIface, privIface, tunlIface int, local bool, encap
Gw: segment.privateIPs[i],
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
}, enc.Strategy(), t.privateIP, tunlIface))
}
}
continue

View File

@ -979,7 +979,7 @@ func TestRoutes(t *testing.T) {
},
},
} {
routes := tc.topology.Routes(kiloIface, privIface, pubIface, tc.local, encapsulation.Never)
routes := tc.topology.Routes(kiloIface, privIface, pubIface, tc.local, encapsulation.NewIPIP(encapsulation.Never))
if diff := pretty.Compare(routes, tc.result); diff != "" {
t.Errorf("test case %q: got diff: %v", tc.name, diff)
}