cmd/kgctl: finish connect command

This commit fixes some bugs and finishes the implementation of the
`kgctl connect` command.

Signed-off-by: Lucas Servén Marín <lserven@gmail.com>
This commit is contained in:
Lucas Servén Marín 2022-04-01 14:03:29 +02:00
parent 27d59816f5
commit 58bd349f69
No known key found for this signature in database
GPG Key ID: 586FEAF680DA74AD
6 changed files with 306 additions and 291 deletions

View File

@ -21,10 +21,9 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"io/ioutil"
logg "log"
"net" "net"
"os" "os"
"sort"
"strings" "strings"
"syscall" "syscall"
"time" "time"
@ -37,36 +36,54 @@ import (
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
"golang.zx2c4.com/wireguard/wgctrl" "golang.zx2c4.com/wireguard/wgctrl"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes" "golang.zx2c4.com/wireguard/wgctrl/wgtypes"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
"github.com/squat/kilo/pkg/iproute" "github.com/squat/kilo/pkg/iproute"
"github.com/squat/kilo/pkg/k8s"
"github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1" "github.com/squat/kilo/pkg/k8s/apis/kilo/v1alpha1"
kiloclient "github.com/squat/kilo/pkg/k8s/clientset/versioned"
"github.com/squat/kilo/pkg/mesh" "github.com/squat/kilo/pkg/mesh"
"github.com/squat/kilo/pkg/route" "github.com/squat/kilo/pkg/route"
"github.com/squat/kilo/pkg/wireguard" "github.com/squat/kilo/pkg/wireguard"
) )
func takeIPNet(_ net.IP, i *net.IPNet, _ error) *net.IPNet { var (
logLevel string
connectOpts struct {
allowedIP net.IPNet
allowedIPs []net.IPNet
privateKey string
cleanUp bool
mtu uint
resyncPeriod time.Duration
interfaceName string
persistentKeepalive int
}
)
func takeIPNet(_ net.IP, i *net.IPNet, err error) *net.IPNet {
if err != nil {
panic(err)
}
return i return i
} }
func connect() *cobra.Command { func connect() *cobra.Command {
cmd := &cobra.Command{ cmd := &cobra.Command{
Use: "connect", Use: "connect",
Args: cobra.MaximumNArgs(1), Args: cobra.ExactArgs(1),
RunE: connectAsPeer, RunE: runConnect,
Short: "connect to a Kilo cluster as a peer over WireGuard", Short: "connect to a Kilo cluster as a peer over WireGuard",
SilenceUsage: true,
} }
cmd.Flags().IPNetP("allowed-ip", "a", *takeIPNet(net.ParseCIDR("10.10.10.10/32")), "Allowed IP of the peer") cmd.Flags().IPNetVarP(&connectOpts.allowedIP, "allowed-ip", "a", *takeIPNet(net.ParseCIDR("10.10.10.10/32")), "Allowed IP of the peer.")
cmd.Flags().IPNetP("service-cidr", "c", *takeIPNet(net.ParseCIDR("10.43.0.0/16")), "service CIDR of the cluster") cmd.Flags().StringSliceVar(&allowedIPs, "allowed-ips", []string{}, "Additional allowed IPs of the cluster, e.g. the service CIDR.")
cmd.Flags().String("log-level", logLevelInfo, fmt.Sprintf("Log level to use. Possible values: %s", availableLogLevels)) cmd.Flags().StringVar(&logLevel, "log-level", logLevelInfo, fmt.Sprintf("Log level to use. Possible values: %s", availableLogLevels))
cmd.Flags().String("config-path", "/tmp/wg.ini", "path to WireGuard configuation file") cmd.Flags().StringVar(&connectOpts.privateKey, "private-key", "", "Path to an existing WireGuard private key file.")
cmd.Flags().Bool("clean-up", true, "clean up routes and interface") cmd.Flags().BoolVar(&connectOpts.cleanUp, "clean-up", true, "Should Kilo clean up the routes and interface when it shuts down?")
cmd.Flags().Uint("mtu", uint(1420), "clean up routes and interface") cmd.Flags().UintVar(&connectOpts.mtu, "mtu", uint(1420), "The MTU for the WireGuard interface.")
cmd.Flags().Duration("resync-period", 30*time.Second, "How often should Kilo reconcile?") cmd.Flags().DurationVar(&connectOpts.resyncPeriod, "resync-period", 30*time.Second, "How often should Kilo reconcile?")
cmd.Flags().StringVarP(&connectOpts.interfaceName, "interface", "i", mesh.DefaultKiloInterface, "Name of the Kilo interface to use; if it does not exist, it will be created.")
cmd.Flags().IntVar(&connectOpts.persistentKeepalive, "persistent-keepalive", 10, "How often should WireGuard send keepalives? Setting to 0 will disable sending keepalives.")
availableLogLevels = strings.Join([]string{ availableLogLevels = strings.Join([]string{
logLevelAll, logLevelAll,
@ -80,35 +97,11 @@ func connect() *cobra.Command {
return cmd return cmd
} }
func connectAsPeer(cmd *cobra.Command, args []string) error { func runConnect(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
resyncPersiod, err := cmd.Flags().GetDuration("resync-period")
if err != nil {
return err
}
mtu, err := cmd.Flags().GetUint("mtu")
if err != nil {
return err
}
configPath, err := cmd.Flags().GetString("config-path")
if err != nil {
return err
}
serviceCIDR, err := cmd.Flags().GetIPNet("service-cidr")
if err != nil {
return err
}
allowedIP, err := cmd.Flags().GetIPNet("allowed-ip")
if err != nil {
return err
}
logger := log.NewJSONLogger(log.NewSyncWriter(os.Stdout)) logger := log.NewJSONLogger(log.NewSyncWriter(os.Stdout))
logLevel, err := cmd.Flags().GetString("log-level")
if err != nil {
return err
}
switch logLevel { switch logLevel {
case logLevelAll: case logLevelAll:
logger = level.NewFilter(logger, level.AllowAll()) logger = level.NewFilter(logger, level.AllowAll())
@ -127,61 +120,82 @@ func connectAsPeer(cmd *cobra.Command, args []string) error {
} }
logger = log.With(logger, "ts", log.DefaultTimestampUTC) logger = log.With(logger, "ts", log.DefaultTimestampUTC)
logger = log.With(logger, "caller", log.DefaultCaller) logger = log.With(logger, "caller", log.DefaultCaller)
peername := "random" peerName := args[0]
if len(args) == 1 {
peername = args[0] for i := range allowedIPs {
_, aip, err := net.ParseCIDR(allowedIPs[i])
if err != nil {
return err
}
connectOpts.allowedIPs = append(connectOpts.allowedIPs, *aip)
} }
var kiloClient *kiloclient.Clientset var privateKey wgtypes.Key
switch backend { var err error
case k8s.Backend: if connectOpts.privateKey == "" {
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) privateKey, err = wgtypes.GeneratePrivateKey()
if err != nil {
return fmt.Errorf("failed to create Kubernetes config: %v", err)
}
kiloClient = kiloclient.NewForConfigOrDie(config)
default:
return fmt.Errorf("backend %v unknown; posible values are: %s", backend, availableBackends)
}
privateKey, err := wgtypes.GeneratePrivateKey()
if err != nil { if err != nil {
return fmt.Errorf("failed to generate private key: %w", err) return fmt.Errorf("failed to generate private key: %w", err)
} }
} else {
raw, err := os.ReadFile(connectOpts.privateKey)
if err != nil {
return fmt.Errorf("failed to read private key: %w", err)
}
privateKey, err = wgtypes.ParseKey(string(raw))
if err != nil {
return fmt.Errorf("failed to parse private key: %w", err)
}
}
publicKey := privateKey.PublicKey() publicKey := privateKey.PublicKey()
level.Info(logger).Log("msg", "generated public key", "key", publicKey) level.Info(logger).Log("msg", "generated public key", "key", publicKey)
if _, err := opts.kc.KiloV1alpha1().Peers().Get(ctx, peerName, metav1.GetOptions{}); apierrors.IsNotFound(err) {
peer := &v1alpha1.Peer{ peer := &v1alpha1.Peer{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: peername, Name: peerName,
}, },
Spec: v1alpha1.PeerSpec{ Spec: v1alpha1.PeerSpec{
AllowedIPs: []string{allowedIP.String()}, AllowedIPs: []string{connectOpts.allowedIP.String()},
PersistentKeepalive: 10, PersistentKeepalive: connectOpts.persistentKeepalive,
PublicKey: publicKey.String(), PublicKey: publicKey.String(),
}, },
} }
if p, err := kiloClient.KiloV1alpha1().Peers().Get(ctx, peername, metav1.GetOptions{}); err != nil || p == nil { if _, err := opts.kc.KiloV1alpha1().Peers().Create(ctx, peer, metav1.CreateOptions{}); err != nil {
peer, err = kiloClient.KiloV1alpha1().Peers().Create(ctx, peer, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create peer: %w", err) return fmt.Errorf("failed to create peer: %w", err)
} }
level.Info(logger).Log("msg", "created peer", "peer", peerName)
if connectOpts.cleanUp {
defer func() {
ctxWithTimeout, cancelWithTimeout := context.WithTimeout(context.Background(), 10*time.Second)
defer cancelWithTimeout()
if err := opts.kc.KiloV1alpha1().Peers().Delete(ctxWithTimeout, peerName, metav1.DeleteOptions{}); err != nil {
level.Error(logger).Log("err", fmt.Sprintf("failed to delete peer: %v", err))
} else {
level.Info(logger).Log("msg", "deleted peer", "peer", peerName)
}
}()
} }
kiloIfaceName := "kilo0" } else if err != nil {
return fmt.Errorf("failed to get peer: %w", err)
}
iface, _, err := wireguard.New(kiloIfaceName, mtu) iface, _, err := wireguard.New(connectOpts.interfaceName, connectOpts.mtu)
if err != nil { if err != nil {
return fmt.Errorf("failed to create wg interface: %w", err) return fmt.Errorf("failed to create wg interface: %w", err)
} }
level.Info(logger).Log("msg", "successfully created wg interface", "name", kiloIfaceName, "no", iface) level.Info(logger).Log("msg", "created WireGuard interface", "name", connectOpts.interfaceName, "index", iface)
if err := iproute.Set(iface, false); err != nil {
return err table := route.NewTable()
if connectOpts.cleanUp {
defer cleanUp(iface, table, logger)
} }
if err := iproute.SetAddress(iface, &allowedIP); err != nil { if err := iproute.SetAddress(iface, &connectOpts.allowedIP); err != nil {
return err return err
} }
level.Info(logger).Log("mag", "successfully set IP address of wg interface", "IP", allowedIP.String()) level.Info(logger).Log("msg", "set IP address of WireGuard interface", "IP", connectOpts.allowedIP.String())
if err := iproute.Set(iface, true); err != nil { if err := iproute.Set(iface, true); err != nil {
return err return err
@ -190,13 +204,13 @@ func connectAsPeer(cmd *cobra.Command, args []string) error {
var g run.Group var g run.Group
g.Add(run.SignalHandler(ctx, syscall.SIGINT, syscall.SIGTERM)) g.Add(run.SignalHandler(ctx, syscall.SIGINT, syscall.SIGTERM))
table := route.NewTable()
stop := make(chan struct{}, 1)
errCh := make(<-chan error, 1)
{ {
ch := make(chan struct{}, 1)
g.Add( g.Add(
func() error { func() error {
errCh, err := table.Run(ctx.Done())
if err != nil {
return fmt.Errorf("failed to watch for route table updates: %w", err)
}
for { for {
select { select {
case err, ok := <-errCh: case err, ok := <-errCh:
@ -205,28 +219,73 @@ func connectAsPeer(cmd *cobra.Command, args []string) error {
} else { } else {
return nil return nil
} }
case <-ch: case <-ctx.Done():
return nil return nil
} }
} }
}, },
func(err error) { func(err error) {
ch <- struct{}{} cancel()
close(ch) var serr run.SignalError
stop <- struct{}{} if ok := errors.As(err, &serr); ok {
close(stop) level.Debug(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
level.Error(logger).Log("msg", "stopped ip routes table", "err", err.Error()) } else {
level.Error(logger).Log("msg", "received error", "err", err.Error())
}
}, },
) )
} }
{ {
ch := make(chan struct{}, 1)
g.Add( g.Add(
func() error { func() error {
level.Info(logger).Log("msg", "starting syncer")
for { for {
if err := sync(table, peerName, privateKey, iface, logger); err != nil {
level.Error(logger).Log("msg", "failed to sync", "err", err.Error())
}
select {
case <-time.After(connectOpts.resyncPeriod):
case <-ctx.Done():
return nil
}
}
}, func(err error) {
cancel()
var serr run.SignalError
if ok := errors.As(err, &serr); ok {
level.Debug(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
} else {
level.Error(logger).Log("msg", "received error", "err", err.Error())
}
})
}
err = g.Run()
var serr run.SignalError
if ok := errors.As(err, &serr); ok {
return nil
}
return err
}
func cleanUp(iface int, t *route.Table, logger log.Logger) {
if err := iproute.Set(iface, false); err != nil {
level.Error(logger).Log("err", fmt.Sprintf("failed to set WireGuard interface down: %v", err))
}
if err := iproute.RemoveInterface(iface); err != nil {
level.Error(logger).Log("err", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
}
if err := t.CleanUp(); err != nil {
level.Error(logger).Log("failed to clean up routes: %v", err)
}
return
}
func sync(table *route.Table, peerName string, privateKey wgtypes.Key, iface int, logger log.Logger) error {
ns, err := opts.backend.Nodes().List() ns, err := opts.backend.Nodes().List()
if err != nil { if err != nil {
return fmt.Errorf("failed to list nodes: %v", err) return fmt.Errorf("failed to list nodes: %w", err)
} }
for _, n := range ns { for _, n := range ns {
_, err := n.Endpoint.UDPAddr(true) _, err := n.Endpoint.UDPAddr(true)
@ -236,69 +295,77 @@ func connectAsPeer(cmd *cobra.Command, args []string) error {
} }
ps, err := opts.backend.Peers().List() ps, err := opts.backend.Peers().List()
if err != nil { if err != nil {
return fmt.Errorf("failed to list peers: %v", err) return fmt.Errorf("failed to list peers: %w", err)
} }
// Obtain the Granularity by looking at the annotation of the first node. // Obtain the Granularity by looking at the annotation of the first node.
if opts.granularity, err = optainGranularity(opts.granularity, ns); err != nil { if opts.granularity, err = determineGranularity(opts.granularity, ns); err != nil {
return fmt.Errorf("failed to obtain granularity: %w", err) return fmt.Errorf("failed to determine granularity: %w", err)
} }
var hostname string var hostname string
subnet := mesh.DefaultKiloSubnet var subnet *net.IPNet
nodes := make(map[string]*mesh.Node) nodes := make(map[string]*mesh.Node)
var nodeNames []string
for _, n := range ns { for _, n := range ns {
if n.Ready() { if n.Ready() {
nodes[n.Name] = n nodes[n.Name] = n
hostname = n.Name hostname = n.Name
nodeNames = append(nodeNames, n.Name)
} }
if n.WireGuardIP != nil { if n.WireGuardIP != nil && subnet == nil {
subnet = n.WireGuardIP subnet = n.WireGuardIP
} }
} }
subnet.IP = subnet.IP.Mask(subnet.Mask)
if len(nodes) == 0 { if len(nodes) == 0 {
return errors.New("did not find any valid Kilo nodes in the cluster") return errors.New("did not find any valid Kilo nodes in the cluster")
} }
if subnet == nil {
return errors.New("did not find a valid Kilo subnet on any node")
}
subnet.IP = subnet.IP.Mask(subnet.Mask)
sort.Strings(nodeNames)
nodes[nodeNames[0]].AllowedLocationIPs = append(nodes[nodeNames[0]].AllowedLocationIPs, connectOpts.allowedIPs...)
peers := make(map[string]*mesh.Peer) peers := make(map[string]*mesh.Peer)
for _, p := range ps { for _, p := range ps {
if p.Ready() { if p.Ready() {
peers[p.Name] = p peers[p.Name] = p
} }
} }
if _, ok := peers[peername]; !ok { if _, ok := peers[peerName]; !ok {
return fmt.Errorf("did not find any peer named %q in the cluster", peername) return fmt.Errorf("did not find any peer named %q in the cluster", peerName)
} }
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, opts.port, wgtypes.Key{}, subnet, *peers[peername].PersistentKeepaliveInterval, logger) t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, opts.port, wgtypes.Key{}, subnet, *peers[peerName].PersistentKeepaliveInterval, logger)
if err != nil { if err != nil {
return fmt.Errorf("failed to create topology: %v", err) return fmt.Errorf("failed to create topology: %w", err)
} }
conf := t.PeerConf(peername) conf := t.PeerConf(peerName)
conf.PrivateKey = &privateKey conf.PrivateKey = &privateKey
port, err := cmd.Flags().GetInt("port") conf.ListenPort = &opts.port
if err != nil {
logg.Fatal(err)
}
conf.ListenPort = &port
buf, err := conf.Bytes()
if err != nil {
return err
}
if err := ioutil.WriteFile("/tmp/wg.ini", buf, 0o600); err != nil {
return err
}
wgClient, err := wgctrl.New() wgClient, err := wgctrl.New()
if err != nil { if err != nil {
return fmt.Errorf("failed to initialize wg Client: %w", err)
}
defer wgClient.Close()
if err := wgClient.ConfigureDevice(kiloIfaceName, conf.WGConfig()); err != nil {
return err return err
} }
wgConf := wgtypes.Config{ defer wgClient.Close()
PrivateKey: &privateKey,
current, err := wgClient.Device(connectOpts.interfaceName)
if err != nil {
return err
}
var equal bool
var diff string
equal, diff = conf.Equal(current)
if !equal {
// If the key is empty, then it's the first time we are running
// so don't bother printing a diff.
if current.PrivateKey != [wgtypes.KeyLen]byte{} {
level.Info(logger).Log("msg", "WireGuard configurations are different", "diff", diff)
}
level.Debug(logger).Log("msg", "setting WireGuard config", "config", conf.WGConfig())
if err := wgClient.ConfigureDevice(connectOpts.interfaceName, conf.WGConfig()); err != nil {
return err
} }
if err := wgClient.ConfigureDevice(kiloIfaceName, wgConf); err != nil {
return fmt.Errorf("failed to configure wg interface: %w", err)
} }
var routes []*netlink.Route var routes []*netlink.Route
@ -341,6 +408,10 @@ func connectAsPeer(cmd *cobra.Command, args []string) error {
} }
// Add routes for the allowed IPs of peers. // Add routes for the allowed IPs of peers.
for _, peer := range t.Peers() { for _, peer := range t.Peers() {
// Don't add routes to ourselves.
if peer.Name == peerName {
continue
}
for i := range peer.AllowedIPs { for i := range peer.AllowedIPs {
routes = append(routes, &netlink.Route{ routes = append(routes, &netlink.Route{
Dst: &peer.AllowedIPs[i], Dst: &peer.AllowedIPs[i],
@ -349,76 +420,20 @@ func connectAsPeer(cmd *cobra.Command, args []string) error {
}) })
} }
} }
for i := range connectOpts.allowedIPs {
routes = append(routes, &netlink.Route{ routes = append(routes, &netlink.Route{
Dst: &serviceCIDR, Dst: &connectOpts.allowedIPs[i],
Flags: int(netlink.FLAG_ONLINK), Flags: int(netlink.FLAG_ONLINK),
Gw: t.Segments[0].WireGuardIP(), Gw: t.Segments[0].WireGuardIP(),
LinkIndex: iface, LinkIndex: iface,
Protocol: unix.RTPROT_STATIC, Protocol: unix.RTPROT_STATIC,
}) })
}
level.Debug(logger).Log("routes", routes) level.Debug(logger).Log("routes", routes)
if err := table.Set(routes, []*netlink.Rule{}); err != nil { if err := table.Set(routes, []*netlink.Rule{}); err != nil {
return fmt.Errorf("failed to set ip routes table: %w", err) return fmt.Errorf("failed to update route table: %w", err)
} }
errCh, err = table.Run(stop)
if err != nil {
return fmt.Errorf("failed to start ip routes tables: %w", err)
}
select {
case <-time.After(resyncPersiod):
case <-ch:
return nil
}
}
}, func(err error) {
// Cancel the root context in the very end.
defer cancel()
ch <- struct{}{}
var serr run.SignalError
if ok := errors.As(err, &serr); ok {
level.Info(logger).Log("msg", "received signal", "signal", serr.Signal.String(), "err", err.Error())
} else {
level.Error(logger).Log("msg", "received error", "err", err.Error())
}
level.Debug(logger).Log("msg", "stoped ip routes table")
ctxWithTimeOut, cancelWithTimeOut := context.WithTimeout(ctx, 10*time.Second)
defer func() {
cancelWithTimeOut()
level.Debug(logger).Log("msg", "canceled timed context")
}()
if err := kiloClient.KiloV1alpha1().Peers().Delete(ctxWithTimeOut, peername, metav1.DeleteOptions{}); err != nil {
level.Error(logger).Log("failed to delete peer: %w", err)
} else {
level.Info(logger).Log("msg", "deleted peer", "peer", peername)
}
if ok, err := cmd.Flags().GetBool("clean-up"); err != nil {
level.Error(logger).Log("err", err.Error(), "msg", "failed to get value from clean-up flag")
} else if ok {
cleanUp(iface, table, configPath, logger)
}
})
}
err = g.Run()
var serr run.SignalError
if ok := errors.As(err, &serr); ok {
return nil
}
return err
}
func cleanUp(iface int, t *route.Table, configPath string, logger log.Logger) { return nil
if err := iproute.Set(iface, false); err != nil {
level.Error(logger).Log("err", err.Error(), "msg", "failed to set down wg interface")
}
if err := os.Remove(configPath); err != nil {
level.Error(logger).Log("error", fmt.Sprintf("failed to delete configuration file: %v", err))
}
if err := iproute.RemoveInterface(iface); err != nil {
level.Error(logger).Log("error", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
}
if err := t.CleanUp(); err != nil {
level.Error(logger).Log("failed to clean up routes: %w", err)
}
return
} }

View File

@ -26,9 +26,9 @@ import (
func connect() *cobra.Command { func connect() *cobra.Command {
cmd := &cobra.Command{ cmd := &cobra.Command{
Use: "connect", Use: "connect",
Short: "not supporred on you OS", Short: "not supporred on this OS",
RunE: func(_ *cobra.Command, _ []string) error { RunE: func(_ *cobra.Command, _ []string) error {
return errors.New("this command is not supported on your OS") return errors.New("this command is not supported on this OS")
}, },
} }
return cmd return cmd

View File

@ -34,15 +34,15 @@ func graph() *cobra.Command {
func runGraph(_ *cobra.Command, _ []string) error { func runGraph(_ *cobra.Command, _ []string) error {
ns, err := opts.backend.Nodes().List() ns, err := opts.backend.Nodes().List()
if err != nil { if err != nil {
return fmt.Errorf("failed to list nodes: %v", err) return fmt.Errorf("failed to list nodes: %w", err)
} }
ps, err := opts.backend.Peers().List() ps, err := opts.backend.Peers().List()
if err != nil { if err != nil {
return fmt.Errorf("failed to list peers: %v", err) return fmt.Errorf("failed to list peers: %w", err)
} }
// Obtain the Granularity by looking at the annotation of the first node. // Obtain the Granularity by looking at the annotation of the first node.
if opts.granularity, err = optainGranularity(opts.granularity, ns); err != nil { if opts.granularity, err = determineGranularity(opts.granularity, ns); err != nil {
return fmt.Errorf("failed to obtain granularity: %w", err) return fmt.Errorf("failed to determine granularity: %w", err)
} }
var hostname string var hostname string
@ -69,11 +69,11 @@ func runGraph(_ *cobra.Command, _ []string) error {
} }
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, 0, wgtypes.Key{}, subnet, nodes[hostname].PersistentKeepalive, nil) t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, 0, wgtypes.Key{}, subnet, nodes[hostname].PersistentKeepalive, nil)
if err != nil { if err != nil {
return fmt.Errorf("failed to create topology: %v", err) return fmt.Errorf("failed to create topology: %w", err)
} }
g, err := t.Dot() g, err := t.Dot()
if err != nil { if err != nil {
return fmt.Errorf("failed to generate graph: %v", err) return fmt.Errorf("failed to generate graph: %w", err)
} }
fmt.Println(g) fmt.Println(g)
return nil return nil

View File

@ -1 +0,0 @@
hello

View File

@ -62,6 +62,7 @@ var (
opts struct { opts struct {
backend mesh.Backend backend mesh.Backend
granularity mesh.Granularity granularity mesh.Granularity
kc kiloclient.Interface
port int port int
} }
backend string backend string
@ -81,29 +82,29 @@ func runRoot(_ *cobra.Command, _ []string) error {
case mesh.FullGranularity: case mesh.FullGranularity:
case mesh.AutoGranularity: case mesh.AutoGranularity:
default: default:
return fmt.Errorf("mesh granularity %v unknown; posible values are: %s", granularity, availableGranularities) return fmt.Errorf("mesh granularity %s unknown; posible values are: %s", granularity, availableGranularities)
} }
switch backend { switch backend {
case k8s.Backend: case k8s.Backend:
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Kubernetes config: %v", err) return fmt.Errorf("failed to create Kubernetes config: %w", err)
} }
c := kubernetes.NewForConfigOrDie(config) c := kubernetes.NewForConfigOrDie(config)
kc := kiloclient.NewForConfigOrDie(config) opts.kc = kiloclient.NewForConfigOrDie(config)
ec := apiextensions.NewForConfigOrDie(config) ec := apiextensions.NewForConfigOrDie(config)
opts.backend = k8s.New(c, kc, ec, topologyLabel, log.NewNopLogger()) opts.backend = k8s.New(c, opts.kc, ec, topologyLabel, log.NewNopLogger())
default: default:
return fmt.Errorf("backend %v unknown; posible values are: %s", backend, availableBackends) return fmt.Errorf("backend %s unknown; posible values are: %s", backend, availableBackends)
} }
if err := opts.backend.Nodes().Init(make(chan struct{})); err != nil { if err := opts.backend.Nodes().Init(make(chan struct{})); err != nil {
return fmt.Errorf("failed to initialize node backend: %v", err) return fmt.Errorf("failed to initialize node backend: %w", err)
} }
if err := opts.backend.Peers().Init(make(chan struct{})); err != nil { if err := opts.backend.Peers().Init(make(chan struct{})); err != nil {
return fmt.Errorf("failed to initialize peer backend: %v", err) return fmt.Errorf("failed to initialize peer backend: %w", err)
} }
return nil return nil
} }
@ -141,7 +142,7 @@ func main() {
} }
} }
func optainGranularity(gr mesh.Granularity, ns []*mesh.Node) (mesh.Granularity, error) { func determineGranularity(gr mesh.Granularity, ns []*mesh.Node) (mesh.Granularity, error) {
if gr == mesh.AutoGranularity { if gr == mesh.AutoGranularity {
if len(ns) == 0 { if len(ns) == 0 {
return gr, errors.New("could not get any nodes") return gr, errors.New("could not get any nodes")
@ -151,7 +152,7 @@ func optainGranularity(gr mesh.Granularity, ns []*mesh.Node) (mesh.Granularity,
case mesh.LogicalGranularity: case mesh.LogicalGranularity:
case mesh.FullGranularity: case mesh.FullGranularity:
default: default:
return ret, fmt.Errorf("mesh granularity %v is not supported", opts.granularity) return ret, fmt.Errorf("mesh granularity %s is not supported", opts.granularity)
} }
return ret, nil return ret, nil
} }

View File

@ -83,7 +83,7 @@ func runShowConf(c *cobra.Command, args []string) error {
case outputFormatYAML: case outputFormatYAML:
showConfOpts.serializer = json.NewYAMLSerializer(json.DefaultMetaFactory, peerCreatorTyper{}, peerCreatorTyper{}) showConfOpts.serializer = json.NewYAMLSerializer(json.DefaultMetaFactory, peerCreatorTyper{}, peerCreatorTyper{})
default: default:
return fmt.Errorf("output format %v unknown; posible values are: %s", showConfOpts.output, availableOutputFormats) return fmt.Errorf("output format %s unknown; posible values are: %s", showConfOpts.output, availableOutputFormats)
} }
for i := range allowedIPs { for i := range allowedIPs {
_, aip, err := net.ParseCIDR(allowedIPs[i]) _, aip, err := net.ParseCIDR(allowedIPs[i])
@ -116,15 +116,15 @@ func showConfPeer() *cobra.Command {
func runShowConfNode(_ *cobra.Command, args []string) error { func runShowConfNode(_ *cobra.Command, args []string) error {
ns, err := opts.backend.Nodes().List() ns, err := opts.backend.Nodes().List()
if err != nil { if err != nil {
return fmt.Errorf("failed to list nodes: %v", err) return fmt.Errorf("failed to list nodes: %w", err)
} }
ps, err := opts.backend.Peers().List() ps, err := opts.backend.Peers().List()
if err != nil { if err != nil {
return fmt.Errorf("failed to list peers: %v", err) return fmt.Errorf("failed to list peers: %w", err)
} }
// Obtain the Granularity by looking at the annotation of the first node. // Obtain the Granularity by looking at the annotation of the first node.
if opts.granularity, err = optainGranularity(opts.granularity, ns); err != nil { if opts.granularity, err = determineGranularity(opts.granularity, ns); err != nil {
return fmt.Errorf("failed to obtain granularity: %w", err) return fmt.Errorf("failed to determine granularity: %w", err)
} }
hostname := args[0] hostname := args[0]
subnet := mesh.DefaultKiloSubnet subnet := mesh.DefaultKiloSubnet
@ -154,7 +154,7 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, int(opts.port), wgtypes.Key{}, subnet, nodes[hostname].PersistentKeepalive, nil) t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, int(opts.port), wgtypes.Key{}, subnet, nodes[hostname].PersistentKeepalive, nil)
if err != nil { if err != nil {
return fmt.Errorf("failed to create topology: %v", err) return fmt.Errorf("failed to create topology: %w", err)
} }
var found bool var found bool
@ -172,7 +172,7 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
if !showConfOpts.asPeer { if !showConfOpts.asPeer {
c, err := t.Conf().Bytes() c, err := t.Conf().Bytes()
if err != nil { if err != nil {
return fmt.Errorf("failed to generate configuration: %v", err) return fmt.Errorf("failed to generate configuration: %w", err)
} }
_, err = os.Stdout.Write(c) _, err = os.Stdout.Write(c)
return err return err
@ -202,7 +202,7 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
Peers: []wireguard.Peer{*p}, Peers: []wireguard.Peer{*p},
}).Bytes() }).Bytes()
if err != nil { if err != nil {
return fmt.Errorf("failed to generate configuration: %v", err) return fmt.Errorf("failed to generate configuration: %w", err)
} }
_, err = os.Stdout.Write(c) _, err = os.Stdout.Write(c)
return err return err
@ -213,15 +213,15 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
func runShowConfPeer(_ *cobra.Command, args []string) error { func runShowConfPeer(_ *cobra.Command, args []string) error {
ns, err := opts.backend.Nodes().List() ns, err := opts.backend.Nodes().List()
if err != nil { if err != nil {
return fmt.Errorf("failed to list nodes: %v", err) return fmt.Errorf("failed to list nodes: %w", err)
} }
ps, err := opts.backend.Peers().List() ps, err := opts.backend.Peers().List()
if err != nil { if err != nil {
return fmt.Errorf("failed to list peers: %v", err) return fmt.Errorf("failed to list peers: %w", err)
} }
// Obtain the Granularity by looking at the annotation of the first node. // Obtain the Granularity by looking at the annotation of the first node.
if opts.granularity, err = optainGranularity(opts.granularity, ns); err != nil { if opts.granularity, err = determineGranularity(opts.granularity, ns); err != nil {
return fmt.Errorf("failed to obtain granularity: %w", err) return fmt.Errorf("failed to determine granularity: %w", err)
} }
var hostname string var hostname string
subnet := mesh.DefaultKiloSubnet subnet := mesh.DefaultKiloSubnet
@ -257,12 +257,12 @@ func runShowConfPeer(_ *cobra.Command, args []string) error {
} }
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, mesh.DefaultKiloPort, wgtypes.Key{}, subnet, pka, nil) t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, mesh.DefaultKiloPort, wgtypes.Key{}, subnet, pka, nil)
if err != nil { if err != nil {
return fmt.Errorf("failed to create topology: %v", err) return fmt.Errorf("failed to create topology: %w", err)
} }
if !showConfOpts.asPeer { if !showConfOpts.asPeer {
c, err := t.PeerConf(peer).Bytes() c, err := t.PeerConf(peer).Bytes()
if err != nil { if err != nil {
return fmt.Errorf("failed to generate configuration: %v", err) return fmt.Errorf("failed to generate configuration: %w", err)
} }
_, err = os.Stdout.Write(c) _, err = os.Stdout.Write(c)
return err return err
@ -286,7 +286,7 @@ func runShowConfPeer(_ *cobra.Command, args []string) error {
Peers: []wireguard.Peer{*p}, Peers: []wireguard.Peer{*p},
}).Bytes() }).Bytes()
if err != nil { if err != nil {
return fmt.Errorf("failed to generate configuration: %v", err) return fmt.Errorf("failed to generate configuration: %w", err)
} }
_, err = os.Stdout.Write(c) _, err = os.Stdout.Write(c)
return err return err