2019-01-18 01:50:10 +00:00
|
|
|
// Copyright 2019 the Kilo authors
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package k8s
|
|
|
|
|
|
|
|
import (
|
2021-05-15 10:08:31 +00:00
|
|
|
"context"
|
2019-01-18 01:50:10 +00:00
|
|
|
"encoding/json"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"net"
|
|
|
|
"path"
|
2019-04-02 16:25:08 +00:00
|
|
|
"strconv"
|
2019-01-18 01:50:10 +00:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2022-01-30 16:38:45 +00:00
|
|
|
"github.com/go-kit/kit/log"
|
|
|
|
"github.com/go-kit/kit/log/level"
|
|
|
|
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
2019-04-02 16:25:08 +00:00
|
|
|
v1 "k8s.io/api/core/v1"
|
2019-05-03 10:53:40 +00:00
|
|
|
apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
2021-05-15 10:08:31 +00:00
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
2019-01-18 01:50:10 +00:00
|
|
|
"k8s.io/apimachinery/pkg/labels"
|
|
|
|
"k8s.io/apimachinery/pkg/types"
|
|
|
|
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
|
|
|
v1informers "k8s.io/client-go/informers/core/v1"
|
|
|
|
"k8s.io/client-go/kubernetes"
|
|
|
|
v1listers "k8s.io/client-go/listers/core/v1"
|
|
|
|
"k8s.io/client-go/tools/cache"
|
|
|
|
|
2019-05-03 10:53:40 +00:00
|
|
|
"github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1"
|
|
|
|
kiloclient "github.com/squat/kilo/pkg/k8s/clientset/versioned"
|
|
|
|
v1alpha1informers "github.com/squat/kilo/pkg/k8s/informers/kilo/v1alpha1"
|
|
|
|
v1alpha1listers "github.com/squat/kilo/pkg/k8s/listers/kilo/v1alpha1"
|
2019-01-18 01:50:10 +00:00
|
|
|
"github.com/squat/kilo/pkg/mesh"
|
2019-05-03 10:53:40 +00:00
|
|
|
"github.com/squat/kilo/pkg/wireguard"
|
2019-01-18 01:50:10 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// Backend is the name of this mesh backend.
|
|
|
|
Backend = "kubernetes"
|
2020-02-22 16:17:13 +00:00
|
|
|
endpointAnnotationKey = "kilo.squat.ai/endpoint"
|
|
|
|
forceEndpointAnnotationKey = "kilo.squat.ai/force-endpoint"
|
2019-07-15 15:24:21 +00:00
|
|
|
forceInternalIPAnnotationKey = "kilo.squat.ai/force-internal-ip"
|
2019-01-18 01:50:10 +00:00
|
|
|
internalIPAnnotationKey = "kilo.squat.ai/internal-ip"
|
|
|
|
keyAnnotationKey = "kilo.squat.ai/key"
|
2019-04-02 16:25:08 +00:00
|
|
|
lastSeenAnnotationKey = "kilo.squat.ai/last-seen"
|
2019-01-18 01:50:10 +00:00
|
|
|
leaderAnnotationKey = "kilo.squat.ai/leader"
|
|
|
|
locationAnnotationKey = "kilo.squat.ai/location"
|
2020-02-13 09:16:55 +00:00
|
|
|
persistentKeepaliveKey = "kilo.squat.ai/persistent-keepalive"
|
2019-05-10 00:05:57 +00:00
|
|
|
wireGuardIPAnnotationKey = "kilo.squat.ai/wireguard-ip"
|
2021-04-21 17:47:29 +00:00
|
|
|
discoveredEndpointsKey = "kilo.squat.ai/discovered-endpoints"
|
2021-05-27 07:01:22 +00:00
|
|
|
allowedLocationIPsKey = "kilo.squat.ai/allowed-location-ips"
|
2021-06-18 10:10:23 +00:00
|
|
|
granularityKey = "kilo.squat.ai/granularity"
|
2020-12-14 08:20:53 +00:00
|
|
|
// RegionLabelKey is the key for the well-known Kubernetes topology region label.
|
2020-12-11 14:44:20 +00:00
|
|
|
RegionLabelKey = "topology.kubernetes.io/region"
|
2019-05-10 00:05:57 +00:00
|
|
|
jsonPatchSlash = "~1"
|
|
|
|
jsonRemovePatch = `{"op": "remove", "path": "%s"}`
|
2019-01-18 01:50:10 +00:00
|
|
|
)
|
|
|
|
|
2022-01-30 16:38:45 +00:00
|
|
|
var logger = log.NewNopLogger()
|
|
|
|
|
2019-01-18 01:50:10 +00:00
|
|
|
type backend struct {
|
2019-05-03 10:53:40 +00:00
|
|
|
nodes *nodeBackend
|
|
|
|
peers *peerBackend
|
|
|
|
}
|
|
|
|
|
|
|
|
// Nodes implements the mesh.Backend interface.
|
|
|
|
func (b *backend) Nodes() mesh.NodeBackend {
|
|
|
|
return b.nodes
|
|
|
|
}
|
|
|
|
|
|
|
|
// Peers implements the mesh.Backend interface.
|
|
|
|
func (b *backend) Peers() mesh.PeerBackend {
|
|
|
|
return b.peers
|
|
|
|
}
|
|
|
|
|
|
|
|
type nodeBackend struct {
|
2020-12-11 14:44:20 +00:00
|
|
|
client kubernetes.Interface
|
|
|
|
events chan *mesh.NodeEvent
|
|
|
|
informer cache.SharedIndexInformer
|
|
|
|
lister v1listers.NodeLister
|
|
|
|
topologyLabel string
|
2019-01-18 01:50:10 +00:00
|
|
|
}
|
|
|
|
|
2019-05-03 10:53:40 +00:00
|
|
|
type peerBackend struct {
|
|
|
|
client kiloclient.Interface
|
|
|
|
extensionsClient apiextensions.Interface
|
|
|
|
events chan *mesh.PeerEvent
|
|
|
|
informer cache.SharedIndexInformer
|
|
|
|
lister v1alpha1listers.PeerLister
|
|
|
|
}
|
|
|
|
|
2019-01-18 01:50:10 +00:00
|
|
|
// New creates a new instance of a mesh.Backend.
|
2022-01-30 16:38:45 +00:00
|
|
|
func New(c kubernetes.Interface, kc kiloclient.Interface, ec apiextensions.Interface, topologyLabel string, l log.Logger) mesh.Backend {
|
2019-05-03 10:53:40 +00:00
|
|
|
ni := v1informers.NewNodeInformer(c, 5*time.Minute, nil)
|
|
|
|
pi := v1alpha1informers.NewPeerInformer(kc, 5*time.Minute, nil)
|
2019-01-18 01:50:10 +00:00
|
|
|
|
2022-01-30 16:38:45 +00:00
|
|
|
logger = l
|
|
|
|
|
2019-05-03 10:53:40 +00:00
|
|
|
return &backend{
|
|
|
|
&nodeBackend{
|
2020-12-11 14:44:20 +00:00
|
|
|
client: c,
|
|
|
|
events: make(chan *mesh.NodeEvent),
|
|
|
|
informer: ni,
|
|
|
|
lister: v1listers.NewNodeLister(ni.GetIndexer()),
|
|
|
|
topologyLabel: topologyLabel,
|
2019-05-03 10:53:40 +00:00
|
|
|
},
|
|
|
|
&peerBackend{
|
|
|
|
client: kc,
|
|
|
|
extensionsClient: ec,
|
|
|
|
events: make(chan *mesh.PeerEvent),
|
|
|
|
informer: pi,
|
|
|
|
lister: v1alpha1listers.NewPeerLister(pi.GetIndexer()),
|
|
|
|
},
|
2019-01-18 01:50:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CleanUp removes configuration applied to the backend.
|
2019-05-03 10:53:40 +00:00
|
|
|
func (nb *nodeBackend) CleanUp(name string) error {
|
2019-01-18 01:50:10 +00:00
|
|
|
patch := []byte("[" + strings.Join([]string{
|
2020-02-22 16:17:13 +00:00
|
|
|
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(endpointAnnotationKey, "/", jsonPatchSlash, 1))),
|
2019-01-18 01:50:10 +00:00
|
|
|
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(internalIPAnnotationKey, "/", jsonPatchSlash, 1))),
|
|
|
|
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(keyAnnotationKey, "/", jsonPatchSlash, 1))),
|
2019-04-02 16:25:08 +00:00
|
|
|
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(lastSeenAnnotationKey, "/", jsonPatchSlash, 1))),
|
2019-05-10 00:05:57 +00:00
|
|
|
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(wireGuardIPAnnotationKey, "/", jsonPatchSlash, 1))),
|
2021-04-21 17:47:29 +00:00
|
|
|
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(discoveredEndpointsKey, "/", jsonPatchSlash, 1))),
|
2021-06-18 10:10:23 +00:00
|
|
|
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(granularityKey, "/", jsonPatchSlash, 1))),
|
2019-01-18 01:50:10 +00:00
|
|
|
}, ",") + "]")
|
2021-05-15 10:08:31 +00:00
|
|
|
if _, err := nb.client.CoreV1().Nodes().Patch(context.TODO(), name, types.JSONPatchType, patch, metav1.PatchOptions{}); err != nil {
|
2019-01-18 01:50:10 +00:00
|
|
|
return fmt.Errorf("failed to patch node: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get gets a single Node by name.
|
2019-05-03 10:53:40 +00:00
|
|
|
func (nb *nodeBackend) Get(name string) (*mesh.Node, error) {
|
|
|
|
n, err := nb.lister.Get(name)
|
2019-01-18 01:50:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-12-11 14:44:20 +00:00
|
|
|
return translateNode(n, nb.topologyLabel), nil
|
2019-01-18 01:50:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Init initializes the backend; for this backend that means
|
|
|
|
// syncing the informer cache.
|
2019-05-03 10:53:40 +00:00
|
|
|
func (nb *nodeBackend) Init(stop <-chan struct{}) error {
|
|
|
|
go nb.informer.Run(stop)
|
2019-01-18 01:50:10 +00:00
|
|
|
if ok := cache.WaitForCacheSync(stop, func() bool {
|
2019-05-03 10:53:40 +00:00
|
|
|
return nb.informer.HasSynced()
|
2019-01-18 01:50:10 +00:00
|
|
|
}); !ok {
|
2019-05-06 23:49:55 +00:00
|
|
|
return errors.New("failed to sync node cache")
|
2019-01-18 01:50:10 +00:00
|
|
|
}
|
2019-05-03 10:53:40 +00:00
|
|
|
nb.informer.AddEventHandler(
|
2019-01-18 01:50:10 +00:00
|
|
|
cache.ResourceEventHandlerFuncs{
|
|
|
|
AddFunc: func(obj interface{}) {
|
|
|
|
n, ok := obj.(*v1.Node)
|
|
|
|
if !ok {
|
|
|
|
// Failed to decode Node; ignoring...
|
|
|
|
return
|
|
|
|
}
|
2020-12-11 14:44:20 +00:00
|
|
|
nb.events <- &mesh.NodeEvent{Type: mesh.AddEvent, Node: translateNode(n, nb.topologyLabel)}
|
2019-01-18 01:50:10 +00:00
|
|
|
},
|
2019-05-08 15:10:33 +00:00
|
|
|
UpdateFunc: func(old, obj interface{}) {
|
2019-01-18 01:50:10 +00:00
|
|
|
n, ok := obj.(*v1.Node)
|
|
|
|
if !ok {
|
|
|
|
// Failed to decode Node; ignoring...
|
|
|
|
return
|
|
|
|
}
|
2019-05-08 15:10:33 +00:00
|
|
|
o, ok := old.(*v1.Node)
|
|
|
|
if !ok {
|
|
|
|
// Failed to decode Node; ignoring...
|
|
|
|
return
|
|
|
|
}
|
2020-12-11 14:44:20 +00:00
|
|
|
nb.events <- &mesh.NodeEvent{Type: mesh.UpdateEvent, Node: translateNode(n, nb.topologyLabel), Old: translateNode(o, nb.topologyLabel)}
|
2019-01-18 01:50:10 +00:00
|
|
|
},
|
|
|
|
DeleteFunc: func(obj interface{}) {
|
|
|
|
n, ok := obj.(*v1.Node)
|
|
|
|
if !ok {
|
|
|
|
// Failed to decode Node; ignoring...
|
|
|
|
return
|
|
|
|
}
|
2020-12-11 14:44:20 +00:00
|
|
|
nb.events <- &mesh.NodeEvent{Type: mesh.DeleteEvent, Node: translateNode(n, nb.topologyLabel)}
|
2019-01-18 01:50:10 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// List gets all the Nodes in the cluster.
|
2019-05-03 10:53:40 +00:00
|
|
|
func (nb *nodeBackend) List() ([]*mesh.Node, error) {
|
|
|
|
ns, err := nb.lister.List(labels.Everything())
|
2019-01-18 01:50:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
nodes := make([]*mesh.Node, len(ns))
|
|
|
|
for i := range ns {
|
2020-12-11 14:44:20 +00:00
|
|
|
nodes[i] = translateNode(ns[i], nb.topologyLabel)
|
2019-01-18 01:50:10 +00:00
|
|
|
}
|
|
|
|
return nodes, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set sets the fields of a node.
|
2019-05-03 10:53:40 +00:00
|
|
|
func (nb *nodeBackend) Set(name string, node *mesh.Node) error {
|
|
|
|
old, err := nb.lister.Get(name)
|
2019-01-18 01:50:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to find node: %v", err)
|
|
|
|
}
|
|
|
|
n := old.DeepCopy()
|
2020-02-22 16:17:13 +00:00
|
|
|
n.ObjectMeta.Annotations[endpointAnnotationKey] = node.Endpoint.String()
|
2021-01-24 13:19:01 +00:00
|
|
|
if node.InternalIP == nil {
|
|
|
|
n.ObjectMeta.Annotations[internalIPAnnotationKey] = ""
|
|
|
|
} else {
|
|
|
|
n.ObjectMeta.Annotations[internalIPAnnotationKey] = node.InternalIP.String()
|
|
|
|
}
|
2022-01-30 16:38:45 +00:00
|
|
|
n.ObjectMeta.Annotations[keyAnnotationKey] = node.Key.String()
|
2019-04-02 16:25:08 +00:00
|
|
|
n.ObjectMeta.Annotations[lastSeenAnnotationKey] = strconv.FormatInt(node.LastSeen, 10)
|
2019-05-10 00:05:57 +00:00
|
|
|
if node.WireGuardIP == nil {
|
|
|
|
n.ObjectMeta.Annotations[wireGuardIPAnnotationKey] = ""
|
|
|
|
} else {
|
|
|
|
n.ObjectMeta.Annotations[wireGuardIPAnnotationKey] = node.WireGuardIP.String()
|
|
|
|
}
|
2021-04-21 17:47:29 +00:00
|
|
|
if node.DiscoveredEndpoints == nil {
|
|
|
|
n.ObjectMeta.Annotations[discoveredEndpointsKey] = ""
|
|
|
|
} else {
|
|
|
|
discoveredEndpoints, err := json.Marshal(node.DiscoveredEndpoints)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
n.ObjectMeta.Annotations[discoveredEndpointsKey] = string(discoveredEndpoints)
|
|
|
|
}
|
2021-06-18 10:10:23 +00:00
|
|
|
n.ObjectMeta.Annotations[granularityKey] = string(node.Granularity)
|
2019-01-18 01:50:10 +00:00
|
|
|
oldData, err := json.Marshal(old)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
newData, err := json.Marshal(n)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
patch, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to create patch for node %q: %v", n.Name, err)
|
|
|
|
}
|
2021-05-15 10:08:31 +00:00
|
|
|
if _, err = nb.client.CoreV1().Nodes().Patch(context.TODO(), name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}); err != nil {
|
2019-01-18 01:50:10 +00:00
|
|
|
return fmt.Errorf("failed to patch node: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Watch returns a chan of node events.
|
2019-05-03 10:53:40 +00:00
|
|
|
func (nb *nodeBackend) Watch() <-chan *mesh.NodeEvent {
|
|
|
|
return nb.events
|
2019-01-18 01:50:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// translateNode translates a Kubernetes Node to a mesh.Node.
|
2020-12-11 14:44:20 +00:00
|
|
|
func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
|
2019-01-18 01:50:10 +00:00
|
|
|
if node == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
_, subnet, err := net.ParseCIDR(node.Spec.PodCIDR)
|
|
|
|
// The subnet should only ever fail to parse if the pod CIDR has not been set,
|
|
|
|
// so in this case set the subnet to nil and let the node be updated.
|
|
|
|
if err != nil {
|
|
|
|
subnet = nil
|
|
|
|
}
|
|
|
|
_, leader := node.ObjectMeta.Annotations[leaderAnnotationKey]
|
|
|
|
// Allow the region to be overridden by an explicit location.
|
|
|
|
location, ok := node.ObjectMeta.Annotations[locationAnnotationKey]
|
|
|
|
if !ok {
|
2020-12-11 14:44:20 +00:00
|
|
|
location = node.ObjectMeta.Labels[topologyLabel]
|
2019-01-18 01:50:10 +00:00
|
|
|
}
|
2020-02-22 16:17:13 +00:00
|
|
|
// Allow the endpoint to be overridden.
|
2022-01-30 16:38:45 +00:00
|
|
|
endpoint := wireguard.ParseEndpoint(node.ObjectMeta.Annotations[forceEndpointAnnotationKey])
|
2020-02-22 16:17:13 +00:00
|
|
|
if endpoint == nil {
|
2022-01-30 16:38:45 +00:00
|
|
|
endpoint = wireguard.ParseEndpoint(node.ObjectMeta.Annotations[endpointAnnotationKey])
|
2019-01-18 01:50:10 +00:00
|
|
|
}
|
2020-02-22 16:17:13 +00:00
|
|
|
// Allow the internal IP to be overridden.
|
|
|
|
internalIP := normalizeIP(node.ObjectMeta.Annotations[forceInternalIPAnnotationKey])
|
|
|
|
if internalIP == nil {
|
|
|
|
internalIP = normalizeIP(node.ObjectMeta.Annotations[internalIPAnnotationKey])
|
2019-07-15 15:24:21 +00:00
|
|
|
}
|
2021-02-22 19:28:16 +00:00
|
|
|
// Set the ForceInternalIP flag, if force-internal-ip annotation was set to "".
|
|
|
|
noInternalIP := false
|
|
|
|
if s, ok := node.ObjectMeta.Annotations[forceInternalIPAnnotationKey]; ok && (s == "" || s == "-") {
|
|
|
|
noInternalIP = true
|
|
|
|
internalIP = nil
|
|
|
|
}
|
2020-02-13 09:16:55 +00:00
|
|
|
// Set Wireguard PersistentKeepalive setting for the node.
|
2022-01-30 16:38:45 +00:00
|
|
|
var persistentKeepalive = time.Duration(0)
|
|
|
|
if keepAlive, ok := node.ObjectMeta.Annotations[persistentKeepaliveKey]; ok {
|
|
|
|
// We can ignore the error, because p will be set to 0 if an error occures.
|
|
|
|
p, _ := strconv.ParseInt(keepAlive, 10, 64)
|
|
|
|
persistentKeepalive = time.Duration(p) * time.Second
|
2020-02-13 09:16:55 +00:00
|
|
|
}
|
2019-04-02 16:25:08 +00:00
|
|
|
var lastSeen int64
|
|
|
|
if ls, ok := node.ObjectMeta.Annotations[lastSeenAnnotationKey]; !ok {
|
|
|
|
lastSeen = 0
|
|
|
|
} else {
|
|
|
|
if lastSeen, err = strconv.ParseInt(ls, 10, 64); err != nil {
|
|
|
|
lastSeen = 0
|
|
|
|
}
|
|
|
|
}
|
2022-01-30 16:38:45 +00:00
|
|
|
var discoveredEndpoints map[string]*net.UDPAddr
|
2021-04-21 17:47:29 +00:00
|
|
|
if de, ok := node.ObjectMeta.Annotations[discoveredEndpointsKey]; ok {
|
|
|
|
err := json.Unmarshal([]byte(de), &discoveredEndpoints)
|
|
|
|
if err != nil {
|
|
|
|
discoveredEndpoints = nil
|
|
|
|
}
|
|
|
|
}
|
2021-05-27 07:01:22 +00:00
|
|
|
// Set allowed IPs for a location.
|
2022-01-30 16:38:45 +00:00
|
|
|
var allowedLocationIPs []net.IPNet
|
2021-05-27 07:01:22 +00:00
|
|
|
if str, ok := node.ObjectMeta.Annotations[allowedLocationIPsKey]; ok {
|
|
|
|
for _, ip := range strings.Split(str, ",") {
|
|
|
|
if ipnet := normalizeIP(ip); ipnet != nil {
|
2022-01-30 16:38:45 +00:00
|
|
|
allowedLocationIPs = append(allowedLocationIPs, *ipnet)
|
2021-05-27 07:01:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-06-18 10:10:23 +00:00
|
|
|
var meshGranularity mesh.Granularity
|
|
|
|
if gr, ok := node.ObjectMeta.Annotations[granularityKey]; ok {
|
|
|
|
meshGranularity = mesh.Granularity(gr)
|
|
|
|
switch meshGranularity {
|
|
|
|
case mesh.LogicalGranularity:
|
|
|
|
case mesh.FullGranularity:
|
|
|
|
default:
|
|
|
|
meshGranularity = ""
|
|
|
|
}
|
|
|
|
}
|
2021-04-21 17:47:29 +00:00
|
|
|
|
2022-01-30 16:38:45 +00:00
|
|
|
// TODO log some error or warning.
|
|
|
|
key, _ := wgtypes.ParseKey(node.ObjectMeta.Annotations[keyAnnotationKey])
|
|
|
|
|
2019-01-18 01:50:10 +00:00
|
|
|
return &mesh.Node{
|
2020-02-22 16:17:13 +00:00
|
|
|
// Endpoint and InternalIP should only ever fail to parse if the
|
2019-05-03 10:53:40 +00:00
|
|
|
// remote node's agent has not yet set its IP address;
|
2019-01-18 01:50:10 +00:00
|
|
|
// in this case the IP will be nil and
|
|
|
|
// the mesh can wait for the node to be updated.
|
2021-01-24 13:19:01 +00:00
|
|
|
// It is valid for the InternalIP to be nil,
|
|
|
|
// if the given node only has public IP addresses.
|
2020-02-22 16:17:13 +00:00
|
|
|
Endpoint: endpoint,
|
2021-02-22 19:28:16 +00:00
|
|
|
NoInternalIP: noInternalIP,
|
2020-02-22 16:17:13 +00:00
|
|
|
InternalIP: internalIP,
|
2022-01-30 16:38:45 +00:00
|
|
|
Key: key,
|
2020-02-13 09:16:55 +00:00
|
|
|
LastSeen: lastSeen,
|
|
|
|
Leader: leader,
|
|
|
|
Location: location,
|
|
|
|
Name: node.Name,
|
2022-01-30 16:38:45 +00:00
|
|
|
PersistentKeepalive: persistentKeepalive,
|
2020-02-13 09:16:55 +00:00
|
|
|
Subnet: subnet,
|
2019-05-10 00:05:57 +00:00
|
|
|
// WireGuardIP can fail to parse if the node is not a leader or if
|
|
|
|
// the node's agent has not yet reconciled. In either case, the IP
|
|
|
|
// will parse as nil.
|
2021-04-21 17:47:29 +00:00
|
|
|
WireGuardIP: normalizeIP(node.ObjectMeta.Annotations[wireGuardIPAnnotationKey]),
|
|
|
|
DiscoveredEndpoints: discoveredEndpoints,
|
2021-05-27 07:01:22 +00:00
|
|
|
AllowedLocationIPs: allowedLocationIPs,
|
2021-06-18 10:10:23 +00:00
|
|
|
Granularity: meshGranularity,
|
2019-01-18 01:50:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-03 10:53:40 +00:00
|
|
|
// translatePeer translates a Peer CRD to a mesh.Peer.
|
|
|
|
func translatePeer(peer *v1alpha1.Peer) *mesh.Peer {
|
|
|
|
if peer == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2022-01-30 16:38:45 +00:00
|
|
|
var aips []net.IPNet
|
2019-05-03 10:53:40 +00:00
|
|
|
for _, aip := range peer.Spec.AllowedIPs {
|
|
|
|
aip := normalizeIP(aip)
|
|
|
|
// Skip any invalid IPs.
|
|
|
|
if aip == nil {
|
|
|
|
continue
|
|
|
|
}
|
2022-01-30 16:38:45 +00:00
|
|
|
aips = append(aips, *aip)
|
2019-05-03 10:53:40 +00:00
|
|
|
}
|
|
|
|
var endpoint *wireguard.Endpoint
|
|
|
|
if peer.Spec.Endpoint != nil {
|
|
|
|
ip := net.ParseIP(peer.Spec.Endpoint.IP)
|
|
|
|
if ip4 := ip.To4(); ip4 != nil {
|
|
|
|
ip = ip4
|
|
|
|
} else {
|
|
|
|
ip = ip.To16()
|
|
|
|
}
|
2022-01-30 16:38:45 +00:00
|
|
|
if peer.Spec.Endpoint.Port > 0 {
|
|
|
|
if ip != nil {
|
|
|
|
endpoint = wireguard.NewEndpoint(ip, int(peer.Spec.Endpoint.Port))
|
|
|
|
}
|
|
|
|
if peer.Spec.Endpoint.DNS != "" {
|
|
|
|
endpoint = wireguard.ParseEndpoint(fmt.Sprintf("%s:%d", peer.Spec.Endpoint.DNS, peer.Spec.Endpoint.Port))
|
2019-05-03 10:53:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-01-30 16:38:45 +00:00
|
|
|
|
|
|
|
key, err := wgtypes.ParseKey(peer.Spec.PublicKey)
|
|
|
|
if err != nil {
|
|
|
|
level.Error(logger).Log("msg", "failed to parse public key", "peer", peer.Name, "err", err.Error())
|
2019-05-03 10:53:40 +00:00
|
|
|
}
|
2022-01-30 16:38:45 +00:00
|
|
|
var psk *wgtypes.Key
|
|
|
|
if k, err := wgtypes.ParseKey(peer.Spec.PresharedKey); err != nil {
|
|
|
|
// Set key to nil to avoid setting a key to the zero value wgtypes.Key{}
|
|
|
|
psk = nil
|
|
|
|
} else {
|
|
|
|
psk = &k
|
2020-05-05 09:36:39 +00:00
|
|
|
}
|
2022-01-30 16:38:45 +00:00
|
|
|
var pka time.Duration
|
2019-05-03 10:53:40 +00:00
|
|
|
if peer.Spec.PersistentKeepalive > 0 {
|
2022-01-30 16:38:45 +00:00
|
|
|
pka = time.Duration(peer.Spec.PersistentKeepalive)
|
2019-05-03 10:53:40 +00:00
|
|
|
}
|
|
|
|
return &mesh.Peer{
|
|
|
|
Name: peer.Name,
|
|
|
|
Peer: wireguard.Peer{
|
2022-01-30 16:38:45 +00:00
|
|
|
PeerConfig: wgtypes.PeerConfig{
|
|
|
|
AllowedIPs: aips,
|
|
|
|
PersistentKeepaliveInterval: &pka,
|
|
|
|
PresharedKey: psk,
|
|
|
|
PublicKey: key,
|
|
|
|
},
|
|
|
|
Endpoint: endpoint,
|
2019-05-03 10:53:40 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CleanUp removes configuration applied to the backend.
|
|
|
|
func (pb *peerBackend) CleanUp(name string) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get gets a single Peer by name.
|
|
|
|
func (pb *peerBackend) Get(name string) (*mesh.Peer, error) {
|
|
|
|
p, err := pb.lister.Get(name)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return translatePeer(p), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Init initializes the backend; for this backend that means
|
|
|
|
// syncing the informer cache.
|
|
|
|
func (pb *peerBackend) Init(stop <-chan struct{}) error {
|
2021-06-14 07:08:46 +00:00
|
|
|
// Check the presents of the CRD peers.kilo.squat.ai.
|
|
|
|
if _, err := pb.extensionsClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), strings.Join([]string{v1alpha1.PeerPlural, v1alpha1.GroupName}, "."), metav1.GetOptions{}); err != nil {
|
|
|
|
return fmt.Errorf("CRD is not present: %v", err)
|
2019-05-03 10:53:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
go pb.informer.Run(stop)
|
|
|
|
if ok := cache.WaitForCacheSync(stop, func() bool {
|
|
|
|
return pb.informer.HasSynced()
|
|
|
|
}); !ok {
|
2019-05-06 23:49:55 +00:00
|
|
|
return errors.New("failed to sync peer cache")
|
2019-05-03 10:53:40 +00:00
|
|
|
}
|
|
|
|
pb.informer.AddEventHandler(
|
|
|
|
cache.ResourceEventHandlerFuncs{
|
|
|
|
AddFunc: func(obj interface{}) {
|
|
|
|
p, ok := obj.(*v1alpha1.Peer)
|
|
|
|
if !ok || p.Validate() != nil {
|
|
|
|
// Failed to decode Peer; ignoring...
|
|
|
|
return
|
|
|
|
}
|
|
|
|
pb.events <- &mesh.PeerEvent{Type: mesh.AddEvent, Peer: translatePeer(p)}
|
|
|
|
},
|
2019-05-08 15:10:33 +00:00
|
|
|
UpdateFunc: func(old, obj interface{}) {
|
2019-05-03 10:53:40 +00:00
|
|
|
p, ok := obj.(*v1alpha1.Peer)
|
|
|
|
if !ok || p.Validate() != nil {
|
|
|
|
// Failed to decode Peer; ignoring...
|
|
|
|
return
|
|
|
|
}
|
2019-05-08 15:10:33 +00:00
|
|
|
o, ok := old.(*v1alpha1.Peer)
|
|
|
|
if !ok || o.Validate() != nil {
|
|
|
|
// Failed to decode Peer; ignoring...
|
|
|
|
return
|
|
|
|
}
|
|
|
|
pb.events <- &mesh.PeerEvent{Type: mesh.UpdateEvent, Peer: translatePeer(p), Old: translatePeer(o)}
|
2019-05-03 10:53:40 +00:00
|
|
|
},
|
|
|
|
DeleteFunc: func(obj interface{}) {
|
|
|
|
p, ok := obj.(*v1alpha1.Peer)
|
|
|
|
if !ok || p.Validate() != nil {
|
|
|
|
// Failed to decode Peer; ignoring...
|
|
|
|
return
|
|
|
|
}
|
|
|
|
pb.events <- &mesh.PeerEvent{Type: mesh.DeleteEvent, Peer: translatePeer(p)}
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// List gets all the Peers in the cluster.
|
|
|
|
func (pb *peerBackend) List() ([]*mesh.Peer, error) {
|
|
|
|
ps, err := pb.lister.List(labels.Everything())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
peers := make([]*mesh.Peer, len(ps))
|
|
|
|
for i := range ps {
|
|
|
|
// Skip invalid peers.
|
|
|
|
if ps[i].Validate() != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
peers[i] = translatePeer(ps[i])
|
|
|
|
}
|
|
|
|
return peers, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set sets the fields of a peer.
|
|
|
|
func (pb *peerBackend) Set(name string, peer *mesh.Peer) error {
|
|
|
|
old, err := pb.lister.Get(name)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to find peer: %v", err)
|
|
|
|
}
|
|
|
|
p := old.DeepCopy()
|
|
|
|
p.Spec.AllowedIPs = make([]string, len(peer.AllowedIPs))
|
|
|
|
for i := range peer.AllowedIPs {
|
|
|
|
p.Spec.AllowedIPs[i] = peer.AllowedIPs[i].String()
|
|
|
|
}
|
|
|
|
if peer.Endpoint != nil {
|
|
|
|
p.Spec.Endpoint = &v1alpha1.PeerEndpoint{
|
2020-02-28 14:48:32 +00:00
|
|
|
DNSOrIP: v1alpha1.DNSOrIP{
|
2022-01-30 16:38:45 +00:00
|
|
|
IP: peer.Endpoint.IP().String(),
|
|
|
|
DNS: peer.Endpoint.DNS(),
|
2020-02-28 14:48:32 +00:00
|
|
|
},
|
2022-01-30 16:38:45 +00:00
|
|
|
Port: uint32(peer.Endpoint.Port()),
|
2019-05-03 10:53:40 +00:00
|
|
|
}
|
|
|
|
}
|
2022-01-30 16:38:45 +00:00
|
|
|
if peer.PersistentKeepaliveInterval == nil {
|
|
|
|
p.Spec.PersistentKeepalive = 0
|
|
|
|
} else {
|
|
|
|
p.Spec.PersistentKeepalive = int(*peer.PersistentKeepaliveInterval)
|
|
|
|
}
|
|
|
|
if peer.PresharedKey == nil {
|
|
|
|
p.Spec.PresharedKey = ""
|
|
|
|
} else {
|
|
|
|
p.Spec.PresharedKey = peer.PresharedKey.String()
|
|
|
|
}
|
|
|
|
p.Spec.PublicKey = peer.PublicKey.String()
|
2021-05-15 10:08:31 +00:00
|
|
|
if _, err = pb.client.KiloV1alpha1().Peers().Update(context.TODO(), p, metav1.UpdateOptions{}); err != nil {
|
2019-05-03 10:53:40 +00:00
|
|
|
return fmt.Errorf("failed to update peer: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Watch returns a chan of peer events.
|
|
|
|
func (pb *peerBackend) Watch() <-chan *mesh.PeerEvent {
|
|
|
|
return pb.events
|
|
|
|
}
|
|
|
|
|
2019-01-18 01:50:10 +00:00
|
|
|
func normalizeIP(ip string) *net.IPNet {
|
2019-05-03 10:53:40 +00:00
|
|
|
i, ipNet, err := net.ParseCIDR(ip)
|
|
|
|
if err != nil || ipNet == nil {
|
|
|
|
return nil
|
2019-01-18 01:50:10 +00:00
|
|
|
}
|
|
|
|
if ip4 := i.To4(); ip4 != nil {
|
|
|
|
ipNet.IP = ip4
|
|
|
|
return ipNet
|
|
|
|
}
|
|
|
|
ipNet.IP = i.To16()
|
|
|
|
return ipNet
|
|
|
|
}
|