Merge pull request #197 from squat/autodetect_granularity
pkg/ cmd/: kgctl autodetect mesh granularity
This commit is contained in:
commit
9f23e39fca
@ -38,6 +38,11 @@ func runGraph(_ *cobra.Command, _ []string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list peers: %v", err)
|
||||
}
|
||||
// Obtain the Granularity by looking at the annotation of the first node.
|
||||
if opts.granularity, err = optainGranularity(opts.granularity, ns); err != nil {
|
||||
return fmt.Errorf("failed to obtain granularity: %w", err)
|
||||
}
|
||||
|
||||
var hostname string
|
||||
subnet := mesh.DefaultKiloSubnet
|
||||
nodes := make(map[string]*mesh.Node)
|
||||
|
@ -15,6 +15,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -47,6 +48,7 @@ var (
|
||||
availableGranularities = strings.Join([]string{
|
||||
string(mesh.LogicalGranularity),
|
||||
string(mesh.FullGranularity),
|
||||
string(mesh.AutoGranularity),
|
||||
}, ", ")
|
||||
availableLogLevels = strings.Join([]string{
|
||||
logLevelAll,
|
||||
@ -72,6 +74,7 @@ func runRoot(_ *cobra.Command, _ []string) error {
|
||||
switch opts.granularity {
|
||||
case mesh.LogicalGranularity:
|
||||
case mesh.FullGranularity:
|
||||
case mesh.AutoGranularity:
|
||||
default:
|
||||
return fmt.Errorf("mesh granularity %v unknown; posible values are: %s", granularity, availableGranularities)
|
||||
}
|
||||
@ -109,7 +112,7 @@ func main() {
|
||||
Version: version.Version,
|
||||
}
|
||||
cmd.PersistentFlags().StringVar(&backend, "backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends))
|
||||
cmd.PersistentFlags().StringVar(&granularity, "mesh-granularity", string(mesh.LogicalGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", availableGranularities))
|
||||
cmd.PersistentFlags().StringVar(&granularity, "mesh-granularity", string(mesh.AutoGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", availableGranularities))
|
||||
defaultKubeconfig := os.Getenv("KUBECONFIG")
|
||||
if _, err := os.Stat(defaultKubeconfig); os.IsNotExist(err) {
|
||||
defaultKubeconfig = filepath.Join(os.Getenv("HOME"), ".kube/config")
|
||||
@ -130,3 +133,20 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func optainGranularity(gr mesh.Granularity, ns []*mesh.Node) (mesh.Granularity, error) {
|
||||
if gr == mesh.AutoGranularity {
|
||||
if len(ns) == 0 {
|
||||
return gr, errors.New("could not get any nodes")
|
||||
}
|
||||
ret := mesh.Granularity(ns[0].Granularity)
|
||||
switch ret {
|
||||
case mesh.LogicalGranularity:
|
||||
case mesh.FullGranularity:
|
||||
default:
|
||||
return ret, fmt.Errorf("mesh granularity %v is not supported", opts.granularity)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
return gr, nil
|
||||
}
|
||||
|
@ -121,6 +121,10 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list peers: %v", err)
|
||||
}
|
||||
// Obtain the Granularity by looking at the annotation of the first node.
|
||||
if opts.granularity, err = optainGranularity(opts.granularity, ns); err != nil {
|
||||
return fmt.Errorf("failed to obtain granularity: %w", err)
|
||||
}
|
||||
hostname := args[0]
|
||||
subnet := mesh.DefaultKiloSubnet
|
||||
nodes := make(map[string]*mesh.Node)
|
||||
@ -208,6 +212,10 @@ func runShowConfPeer(_ *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list peers: %v", err)
|
||||
}
|
||||
// Obtain the Granularity by looking at the annotation of the first node.
|
||||
if opts.granularity, err = optainGranularity(opts.granularity, ns); err != nil {
|
||||
return fmt.Errorf("failed to obtain granularity: %w", err)
|
||||
}
|
||||
var hostname string
|
||||
subnet := mesh.DefaultKiloSubnet
|
||||
nodes := make(map[string]*mesh.Node)
|
||||
|
@ -42,3 +42,7 @@ test_reject_peer_empty_allowed_ips() {
|
||||
test_reject_peer_empty_public_key() {
|
||||
assert_fail "create_peer e2e 10.5.0.1/32 0 ''" "should not be able to create Peer with empty public key"
|
||||
}
|
||||
|
||||
test_mesh_granularity_auto_detect() {
|
||||
assert_equals "$($KGCTL_BINARY graph)" "$($KGCTL_BINARY graph --mesh-granularity full)"
|
||||
}
|
||||
|
@ -159,6 +159,7 @@ check_peer() {
|
||||
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" link set "$INTERFACE" up
|
||||
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" route add 10.42/16 dev "$INTERFACE"
|
||||
assert "retry 10 5 '' check_ping --local" "should be able to ping Pods from host"
|
||||
assert_equals "$($KGCTL_BINARY showconf peer "$PEER")" "$($KGCTL_BINARY showconf peer "$PEER" --mesh-granularity="$GRANULARITY")" "kgctl should be able to auto detect the mesh granularity"
|
||||
rm "$INTERFACE" "$PEER".ini
|
||||
delete_peer "$PEER"
|
||||
delete_interface "$INTERFACE"
|
||||
|
@ -22,6 +22,10 @@ test_location_mesh_peer() {
|
||||
check_peer wg1 e2e 10.5.0.1/32 location
|
||||
}
|
||||
|
||||
test_mesh_granularity_auto_detect() {
|
||||
assert_equals "$($KGCTL_BINARY graph)" "$($KGCTL_BINARY graph --mesh-granularity location)"
|
||||
}
|
||||
|
||||
teardown_suite () {
|
||||
delete_cluster
|
||||
}
|
||||
|
@ -60,6 +60,7 @@ const (
|
||||
wireGuardIPAnnotationKey = "kilo.squat.ai/wireguard-ip"
|
||||
discoveredEndpointsKey = "kilo.squat.ai/discovered-endpoints"
|
||||
allowedLocationIPsKey = "kilo.squat.ai/allowed-location-ips"
|
||||
granularityKey = "kilo.squat.ai/granularity"
|
||||
// RegionLabelKey is the key for the well-known Kubernetes topology region label.
|
||||
RegionLabelKey = "topology.kubernetes.io/region"
|
||||
jsonPatchSlash = "~1"
|
||||
@ -129,6 +130,7 @@ func (nb *nodeBackend) CleanUp(name string) error {
|
||||
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(lastSeenAnnotationKey, "/", jsonPatchSlash, 1))),
|
||||
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(wireGuardIPAnnotationKey, "/", jsonPatchSlash, 1))),
|
||||
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(discoveredEndpointsKey, "/", jsonPatchSlash, 1))),
|
||||
fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(granularityKey, "/", jsonPatchSlash, 1))),
|
||||
}, ",") + "]")
|
||||
if _, err := nb.client.CoreV1().Nodes().Patch(context.TODO(), name, types.JSONPatchType, patch, metav1.PatchOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to patch node: %v", err)
|
||||
@ -232,6 +234,7 @@ func (nb *nodeBackend) Set(name string, node *mesh.Node) error {
|
||||
}
|
||||
n.ObjectMeta.Annotations[discoveredEndpointsKey] = string(discoveredEndpoints)
|
||||
}
|
||||
n.ObjectMeta.Annotations[granularityKey] = string(node.Granularity)
|
||||
oldData, err := json.Marshal(old)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -321,6 +324,16 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
|
||||
}
|
||||
}
|
||||
}
|
||||
var meshGranularity mesh.Granularity
|
||||
if gr, ok := node.ObjectMeta.Annotations[granularityKey]; ok {
|
||||
meshGranularity = mesh.Granularity(gr)
|
||||
switch meshGranularity {
|
||||
case mesh.LogicalGranularity:
|
||||
case mesh.FullGranularity:
|
||||
default:
|
||||
meshGranularity = ""
|
||||
}
|
||||
}
|
||||
|
||||
return &mesh.Node{
|
||||
// Endpoint and InternalIP should only ever fail to parse if the
|
||||
@ -345,6 +358,7 @@ func translateNode(node *v1.Node, topologyLabel string) *mesh.Node {
|
||||
WireGuardIP: normalizeIP(node.ObjectMeta.Annotations[wireGuardIPAnnotationKey]),
|
||||
DiscoveredEndpoints: discoveredEndpoints,
|
||||
AllowedLocationIPs: allowedLocationIPs,
|
||||
Granularity: meshGranularity,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -47,6 +47,9 @@ const (
|
||||
// FullGranularity indicates that the network should create
|
||||
// a mesh between every node.
|
||||
FullGranularity Granularity = "full"
|
||||
// AutoGranularity can be used with kgctl to obtain
|
||||
// the granularity automatically.
|
||||
AutoGranularity Granularity = "auto"
|
||||
)
|
||||
|
||||
// Node represents a node in the network.
|
||||
@ -68,6 +71,7 @@ type Node struct {
|
||||
WireGuardIP *net.IPNet
|
||||
DiscoveredEndpoints map[string]*wireguard.Endpoint
|
||||
AllowedLocationIPs []*net.IPNet
|
||||
Granularity Granularity
|
||||
}
|
||||
|
||||
// Ready indicates whether or not the node is ready.
|
||||
|
@ -381,6 +381,7 @@ func (m *Mesh) handleLocal(n *Node) {
|
||||
WireGuardIP: m.wireGuardIP,
|
||||
DiscoveredEndpoints: n.DiscoveredEndpoints,
|
||||
AllowedLocationIPs: n.AllowedLocationIPs,
|
||||
Granularity: m.granularity,
|
||||
}
|
||||
if !nodesAreEqual(n, local) {
|
||||
level.Debug(m.logger).Log("msg", "local node differs from backend")
|
||||
@ -420,6 +421,7 @@ func (m *Mesh) applyTopology() {
|
||||
nodes := make(map[string]*Node)
|
||||
var readyNodes float64
|
||||
for k := range m.nodes {
|
||||
m.nodes[k].Granularity = m.granularity
|
||||
if !m.nodes[k].Ready() {
|
||||
continue
|
||||
}
|
||||
@ -675,7 +677,7 @@ func nodesAreEqual(a, b *Node) bool {
|
||||
// Ignore LastSeen when comparing equality we want to check if the nodes are
|
||||
// equivalent. However, we do want to check if LastSeen has transitioned
|
||||
// between valid and invalid.
|
||||
return string(a.Key) == string(b.Key) && ipNetsEqual(a.WireGuardIP, b.WireGuardIP) && ipNetsEqual(a.InternalIP, b.InternalIP) && a.Leader == b.Leader && a.Location == b.Location && a.Name == b.Name && subnetsEqual(a.Subnet, b.Subnet) && a.Ready() == b.Ready() && a.PersistentKeepalive == b.PersistentKeepalive && discoveredEndpointsAreEqual(a.DiscoveredEndpoints, b.DiscoveredEndpoints) && ipNetSlicesEqual(a.AllowedLocationIPs, b.AllowedLocationIPs)
|
||||
return string(a.Key) == string(b.Key) && ipNetsEqual(a.WireGuardIP, b.WireGuardIP) && ipNetsEqual(a.InternalIP, b.InternalIP) && a.Leader == b.Leader && a.Location == b.Location && a.Name == b.Name && subnetsEqual(a.Subnet, b.Subnet) && a.Ready() == b.Ready() && a.PersistentKeepalive == b.PersistentKeepalive && discoveredEndpointsAreEqual(a.DiscoveredEndpoints, b.DiscoveredEndpoints) && ipNetSlicesEqual(a.AllowedLocationIPs, b.AllowedLocationIPs) && a.Granularity == b.Granularity
|
||||
}
|
||||
|
||||
func peersAreEqual(a, b *Peer) bool {
|
||||
|
Loading…
Reference in New Issue
Block a user