This commit is contained in:
Lucas Serven
2019-01-18 02:50:10 +01:00
commit e989f0a25f
1789 changed files with 680059 additions and 0 deletions

101
pkg/mesh/graph.go Normal file
View File

@@ -0,0 +1,101 @@
// Copyright 2019 the Kilo authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mesh
import (
"fmt"
"net"
"github.com/awalterschulze/gographviz"
)
// Dot generates a Graphviz graph of the Topology in DOT fomat.
func (t *Topology) Dot() (string, error) {
g := gographviz.NewGraph()
g.Name = "kilo"
if err := g.AddAttr("kilo", string(gographviz.Label), graphEscape(t.subnet.String())); err != nil {
return "", fmt.Errorf("failed to add label to graph")
}
if err := g.AddAttr("kilo", string(gographviz.LabelLOC), "t"); err != nil {
return "", fmt.Errorf("failed to add label location to graph")
}
if err := g.AddAttr("kilo", string(gographviz.Overlap), "false"); err != nil {
return "", fmt.Errorf("failed to disable graph overlap")
}
if err := g.SetDir(true); err != nil {
return "", fmt.Errorf("failed to set direction")
}
leaders := make([]string, len(t.Segments))
nodeAttrs := map[string]string{
string(gographviz.Shape): "ellipse",
}
for i, s := range t.Segments {
if err := g.AddSubGraph("kilo", subGraphName(s.Location), nil); err != nil {
return "", fmt.Errorf("failed to add subgraph")
}
if err := g.AddAttr(subGraphName(s.Location), string(gographviz.Label), graphEscape(s.Location)); err != nil {
return "", fmt.Errorf("failed to add label to subgraph")
}
if err := g.AddAttr(subGraphName(s.Location), string(gographviz.Style), `"dashed,rounded"`); err != nil {
return "", fmt.Errorf("failed to add style to subgraph")
}
for j := range s.cidrs {
if err := g.AddNode(subGraphName(s.Location), graphEscape(s.hostnames[j]), nodeAttrs); err != nil {
return "", fmt.Errorf("failed to add node to subgraph")
}
var wg net.IP
if j == s.leader {
wg = s.wireGuardIP
if err := g.Nodes.Lookup[graphEscape(s.hostnames[j])].Attrs.Add(string(gographviz.Rank), "1"); err != nil {
return "", fmt.Errorf("failed to add rank to node")
}
}
if err := g.Nodes.Lookup[graphEscape(s.hostnames[j])].Attrs.Add(string(gographviz.Label), nodeLabel(s.Location, s.hostnames[j], s.cidrs[j], s.privateIPs[j], wg)); err != nil {
return "", fmt.Errorf("failed to add label to node")
}
}
meshSubGraph(g, g.Relations.SortedChildren(subGraphName(s.Location)), s.leader)
leaders[i] = graphEscape(s.hostnames[s.leader])
}
meshSubGraph(g, leaders, 0)
return g.String(), nil
}
func meshSubGraph(g *gographviz.Graph, nodes []string, leader int) {
for i := range nodes {
if i == leader {
continue
}
a := make(gographviz.Attrs)
a[gographviz.Dir] = "both"
g.Edges.Add(&gographviz.Edge{Src: nodes[leader], Dst: nodes[i], Dir: true, Attrs: a})
}
}
func graphEscape(s string) string {
return fmt.Sprintf("\"%s\"", s)
}
func subGraphName(name string) string {
return graphEscape(fmt.Sprintf("cluster_%s", name))
}
func nodeLabel(location, name string, cidr *net.IPNet, priv, wgIP net.IP) string {
var wg string
if wgIP != nil {
wg = wgIP.String()
}
return graphEscape(fmt.Sprintf("%s\n%s\n%s\n%s\n%s", location, name, cidr.String(), priv.String(), wg))
}

348
pkg/mesh/ip.go Normal file
View File

@@ -0,0 +1,348 @@
// Copyright 2019 the Kilo authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mesh
import (
"errors"
"fmt"
"net"
"sort"
"github.com/vishvananda/netlink"
)
// getIP returns a private and public IP address for the local node.
// It selects the private IP address in the following order:
// - private IP to which hostname resolves
// - private IP assigned to interface of default route
// - private IP assigned to local interface
// - public IP to which hostname resolves
// - public IP assigned to interface of default route
// - public IP assigned to local interface
// It selects the public IP address in the following order:
// - public IP to which hostname resolves
// - public IP assigned to interface of default route
// - public IP assigned to local interface
// - private IP to which hostname resolves
// - private IP assigned to interface of default route
// - private IP assigned to local interface
// - if no IP was found, return nil and an error.
func getIP(hostname string) (*net.IPNet, *net.IPNet, error) {
var hostPriv, hostPub []*net.IPNet
{
// Check IPs to which hostname resolves first.
ips, err := ipsForHostname(hostname)
if err != nil {
return nil, nil, err
}
for _, ip := range ips {
ok, mask, err := assignedToInterface(ip)
if err != nil {
return nil, nil, fmt.Errorf("failed to search locally assigned addresses: %v", err)
}
if !ok {
continue
}
ip.Mask = mask
if isPublic(ip) {
hostPub = append(hostPub, ip)
continue
}
hostPriv = append(hostPriv, ip)
}
sortIPs(hostPriv)
sortIPs(hostPub)
}
var defaultPriv, defaultPub []*net.IPNet
{
// Check IPs on interface for default route next.
iface, err := defaultInterface()
if err != nil {
return nil, nil, err
}
ips, err := ipsForInterface(iface)
if err != nil {
return nil, nil, err
}
for _, ip := range ips {
if isLocal(ip.IP) {
continue
}
if isPublic(ip) {
defaultPub = append(defaultPub, ip)
continue
}
defaultPriv = append(defaultPriv, ip)
}
sortIPs(defaultPriv)
sortIPs(defaultPub)
}
var interfacePriv, interfacePub []*net.IPNet
{
// Finally look for IPs on all interfaces.
ips, err := ipsForAllInterfaces()
if err != nil {
return nil, nil, err
}
for _, ip := range ips {
if isLocal(ip.IP) {
continue
}
if isPublic(ip) {
interfacePub = append(interfacePub, ip)
continue
}
interfacePriv = append(interfacePriv, ip)
}
sortIPs(interfacePriv)
sortIPs(interfacePub)
}
var priv, pub []*net.IPNet
priv = append(priv, hostPriv...)
priv = append(priv, defaultPriv...)
priv = append(priv, interfacePriv...)
pub = append(pub, hostPub...)
pub = append(pub, defaultPub...)
pub = append(pub, interfacePub...)
if len(priv) == 0 && len(pub) == 0 {
return nil, nil, errors.New("no valid IP was found")
}
if len(priv) == 0 {
priv = pub
}
if len(pub) == 0 {
pub = priv
}
return priv[0], pub[0], nil
}
// sortIPs sorts IPs so the result is stable.
// It will first sort IPs by type, to prefer selecting
// IPs of the same type, and then by value.
func sortIPs(ips []*net.IPNet) {
sort.Slice(ips, func(i, j int) bool {
i4, j4 := ips[i].IP.To4(), ips[j].IP.To4()
if i4 != nil && j4 == nil {
return true
}
if j4 != nil && i4 == nil {
return false
}
return ips[i].String() < ips[j].String()
})
}
func assignedToInterface(ip *net.IPNet) (bool, net.IPMask, error) {
links, err := netlink.LinkList()
if err != nil {
return false, nil, fmt.Errorf("failed to list interfaces: %v", err)
}
// Sort the links for stability.
sort.Slice(links, func(i, j int) bool {
return links[i].Attrs().Name < links[j].Attrs().Name
})
for _, link := range links {
addrs, err := netlink.AddrList(link, netlink.FAMILY_ALL)
if err != nil {
return false, nil, fmt.Errorf("failed to list addresses for %s: %v", link.Attrs().Name, err)
}
// Sort the IPs for stability.
sort.Slice(addrs, func(i, j int) bool {
return addrs[i].String() < addrs[j].String()
})
for i := range addrs {
if ip.IP.Equal(addrs[i].IP) {
return true, addrs[i].Mask, nil
}
}
}
return false, nil, nil
}
func isLocal(ip net.IP) bool {
return ip.IsLoopback() || ip.IsLinkLocalMulticast() || ip.IsLinkLocalUnicast()
}
func isPublic(ip *net.IPNet) bool {
// Check RFC 1918 addresses.
if ip4 := ip.IP.To4(); ip4 != nil {
switch true {
// Check for 10.0.0.0/8.
case ip4[0] == 10:
return false
// Check for 172.16.0.0/12.
case ip4[0] == 172 && ip4[1]&0xf0 == 0x01:
return false
// Check for 192.168.0.0/16.
case ip4[0] == 192 && ip4[1] == 168:
return false
default:
return true
}
}
// Check RFC 4193 addresses.
if len(ip.IP) == net.IPv6len {
switch true {
// Check for fd00::/8.
case ip.IP[0] == 0xfd && ip.IP[1] == 0x00:
return false
default:
return true
}
}
return false
}
// ipsForHostname returns a slice of IPs to which the
// given hostname resolves.
func ipsForHostname(hostname string) ([]*net.IPNet, error) {
if ip := net.ParseIP(hostname); ip != nil {
return []*net.IPNet{oneAddressCIDR(ip)}, nil
}
ips, err := net.LookupIP(hostname)
if err != nil {
return nil, fmt.Errorf("failed to lookip IPs of hostname: %v", err)
}
nets := make([]*net.IPNet, len(ips))
for i := range ips {
nets[i] = oneAddressCIDR(ips[i])
}
return nets, nil
}
// ipsForAllInterfaces returns a slice of IPs assigned to all the
// interfaces on the host.
func ipsForAllInterfaces() ([]*net.IPNet, error) {
ifaces, err := net.Interfaces()
if err != nil {
return nil, fmt.Errorf("failed to list interfaces: %v", err)
}
var nets []*net.IPNet
for _, iface := range ifaces {
ips, err := ipsForInterface(&iface)
if err != nil {
return nil, fmt.Errorf("failed to list addresses for %s: %v", iface.Name, err)
}
nets = append(nets, ips...)
}
return nets, nil
}
// ipsForInterface returns a slice of IPs assigned to the given interface.
func ipsForInterface(iface *net.Interface) ([]*net.IPNet, error) {
link, err := netlink.LinkByIndex(iface.Index)
if err != nil {
return nil, fmt.Errorf("failed to get link: %s", err)
}
addrs, err := netlink.AddrList(link, netlink.FAMILY_ALL)
if err != nil {
return nil, fmt.Errorf("failed to list addresses for %s: %v", iface.Name, err)
}
var ips []*net.IPNet
for _, a := range addrs {
if a.IPNet != nil {
ips = append(ips, a.IPNet)
}
}
return ips, nil
}
// interfacesForIP returns a slice of interfaces withthe given IP.
func interfacesForIP(ip *net.IPNet) ([]net.Interface, error) {
ifaces, err := net.Interfaces()
if err != nil {
return nil, fmt.Errorf("failed to list interfaces: %v", err)
}
var interfaces []net.Interface
for _, iface := range ifaces {
ips, err := ipsForInterface(&iface)
if err != nil {
return nil, fmt.Errorf("failed to list addresses for %s: %v", iface.Name, err)
}
for i := range ips {
if ip.IP.Equal(ips[i].IP) {
interfaces = append(interfaces, iface)
break
}
}
}
if len(interfaces) == 0 {
return nil, fmt.Errorf("no interface has %s assigned", ip.String())
}
return interfaces, nil
}
// defaultInterface returns the interface for the default route of the host.
func defaultInterface() (*net.Interface, error) {
routes, err := netlink.RouteList(nil, netlink.FAMILY_ALL)
if err != nil {
return nil, err
}
for _, route := range routes {
if route.Dst == nil || route.Dst.String() == "0.0.0.0/0" || route.Dst.String() == "::/0" {
if route.LinkIndex <= 0 {
return nil, errors.New("failed to determine interface of route")
}
return net.InterfaceByIndex(route.LinkIndex)
}
}
return nil, errors.New("failed to find default route")
}
type allocator struct {
bits int
cidr *net.IPNet
current net.IP
}
func newAllocator(cidr net.IPNet) *allocator {
_, bits := cidr.Mask.Size()
current := make(net.IP, len(cidr.IP))
copy(current, cidr.IP)
if ip4 := current.To4(); ip4 != nil {
current = ip4
}
return &allocator{
bits: bits,
cidr: &cidr,
current: current,
}
}
func (a *allocator) next() *net.IPNet {
if a.current == nil {
return nil
}
for i := len(a.current) - 1; i >= 0; i-- {
a.current[i]++
// if we haven't overflowed, then we can exit.
if a.current[i] != 0 {
break
}
}
if !a.cidr.Contains(a.current) {
a.current = nil
}
ip := make(net.IP, len(a.current))
copy(ip, a.current)
return &net.IPNet{IP: ip, Mask: net.CIDRMask(a.bits, a.bits)}
}

75
pkg/mesh/ip_test.go Normal file
View File

@@ -0,0 +1,75 @@
// Copyright 2019 the Kilo authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mesh
import (
"net"
"testing"
)
func TestSortIPs(t *testing.T) {
ip1 := oneAddressCIDR(net.ParseIP("10.0.0.1"))
ip2 := oneAddressCIDR(net.ParseIP("10.0.0.2"))
ip3 := oneAddressCIDR(net.ParseIP("192.168.0.1"))
ip4 := oneAddressCIDR(net.ParseIP("2001::7"))
ip5 := oneAddressCIDR(net.ParseIP("fd68:da49:09da:b27f::"))
for _, tc := range []struct {
name string
ips []*net.IPNet
out []*net.IPNet
}{
{
name: "single",
ips: []*net.IPNet{ip1},
out: []*net.IPNet{ip1},
},
{
name: "IPv4s",
ips: []*net.IPNet{ip2, ip3, ip1},
out: []*net.IPNet{ip1, ip2, ip3},
},
{
name: "IPv4 and IPv6",
ips: []*net.IPNet{ip4, ip1},
out: []*net.IPNet{ip1, ip4},
},
{
name: "IPv6s",
ips: []*net.IPNet{ip5, ip4},
out: []*net.IPNet{ip4, ip5},
},
{
name: "all",
ips: []*net.IPNet{ip3, ip4, ip2, ip5, ip1},
out: []*net.IPNet{ip1, ip2, ip3, ip4, ip5},
},
} {
sortIPs(tc.ips)
equal := true
if len(tc.ips) != len(tc.out) {
equal = false
} else {
for i := range tc.ips {
if !ipNetsEqual(tc.ips[i], tc.out[i]) {
equal = false
break
}
}
}
if !equal {
t.Errorf("test case %q: expected %s, got %s", tc.name, tc.out, tc.ips)
}
}
}

581
pkg/mesh/mesh.go Normal file
View File

@@ -0,0 +1,581 @@
// Copyright 2019 the Kilo authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mesh
import (
"fmt"
"io/ioutil"
"net"
"os"
"sync"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/vishvananda/netlink"
"github.com/squat/kilo/pkg/iproute"
"github.com/squat/kilo/pkg/ipset"
"github.com/squat/kilo/pkg/iptables"
"github.com/squat/kilo/pkg/route"
"github.com/squat/kilo/pkg/wireguard"
)
const resyncPeriod = 30 * time.Second
const (
// KiloPath is the directory where Kilo stores its configuration.
KiloPath = "/var/lib/kilo"
// PrivateKeyPath is the filepath where the WireGuard private key is stored.
PrivateKeyPath = KiloPath + "/key"
// ConfPath is the filepath where the WireGuard configuration is stored.
ConfPath = KiloPath + "/conf"
)
// Granularity represents the abstraction level at which the network
// should be meshed.
type Granularity string
// Encapsulate identifies what packets within a location should
// be encapsulated.
type Encapsulate string
const (
// DataCenterGranularity indicates that the network should create
// a mesh between data-centers but not between nodes within a
// single data-center.
DataCenterGranularity Granularity = "data-center"
// NodeGranularity indicates that the network should create
// a mesh between every node.
NodeGranularity Granularity = "node"
// NeverEncapsulate indicates that no packets within a location
// should be encapsulated.
NeverEncapsulate Encapsulate = "never"
// CrossSubnetEncapsulate indicates that only packets that
// traverse subnets within a location should be encapsulated.
CrossSubnetEncapsulate Encapsulate = "crosssubnet"
// AlwaysEncapsulate indicates that all packets within a location
// should be encapsulated.
AlwaysEncapsulate Encapsulate = "always"
)
// Node represents a node in the network.
type Node struct {
ExternalIP *net.IPNet
Key []byte
InternalIP *net.IPNet
// Leader is a suggestion to Kilo that
// the node wants to lead its segment.
Leader bool
Location string
Name string
Subnet *net.IPNet
}
// Ready indicates whether or not the node is ready.
func (n *Node) Ready() bool {
return n != nil && n.ExternalIP != nil && n.Key != nil && n.InternalIP != nil && n.Subnet != nil
}
// EventType describes what kind of an action an event represents.
type EventType string
const (
// AddEvent represents an action where an item was added.
AddEvent EventType = "add"
// DeleteEvent represents an action where an item was removed.
DeleteEvent EventType = "delete"
// UpdateEvent represents an action where an item was updated.
UpdateEvent EventType = "update"
)
// Event represents an update event concerning a node in the cluster.
type Event struct {
Type EventType
Node *Node
}
// Backend can get nodes by name, init itself,
// list the nodes that should be meshed,
// set Kilo properties for a node,
// clean up any changes applied to the backend,
// and watch for changes to nodes.
type Backend interface {
CleanUp(string) error
Get(string) (*Node, error)
Init(<-chan struct{}) error
List() ([]*Node, error)
Set(string, *Node) error
Watch() <-chan *Event
}
// Mesh is able to create Kilo network meshes.
type Mesh struct {
Backend
encapsulate Encapsulate
externalIP *net.IPNet
granularity Granularity
hostname string
internalIP *net.IPNet
ipset *ipset.Set
ipTables *iptables.Controller
kiloIface int
key []byte
local bool
port int
priv []byte
privIface int
pub []byte
pubIface int
stop chan struct{}
subnet *net.IPNet
table *route.Table
tunlIface int
// nodes is a mutable field in the struct
// and needs to be guarded.
nodes map[string]*Node
mu sync.Mutex
errorCounter *prometheus.CounterVec
nodesGuage prometheus.Gauge
logger log.Logger
}
// New returns a new Mesh instance.
func New(backend Backend, encapsulate Encapsulate, granularity Granularity, hostname string, port int, subnet *net.IPNet, local bool, logger log.Logger) (*Mesh, error) {
if err := os.MkdirAll(KiloPath, 0700); err != nil {
return nil, fmt.Errorf("failed to create directory to store configuration: %v", err)
}
private, err := ioutil.ReadFile(PrivateKeyPath)
if err != nil {
level.Warn(logger).Log("msg", "no private key found on disk; generating one now")
if private, err = wireguard.GenKey(); err != nil {
return nil, err
}
}
public, err := wireguard.PubKey(private)
if err != nil {
return nil, err
}
if err := ioutil.WriteFile(PrivateKeyPath, private, 0600); err != nil {
return nil, fmt.Errorf("failed to write private key to disk: %v", err)
}
privateIP, publicIP, err := getIP(hostname)
if err != nil {
return nil, fmt.Errorf("failed to find public IP: %v", err)
}
ifaces, err := interfacesForIP(privateIP)
if err != nil {
return nil, fmt.Errorf("failed to find interface for private IP: %v", err)
}
privIface := ifaces[0].Index
ifaces, err = interfacesForIP(publicIP)
if err != nil {
return nil, fmt.Errorf("failed to find interface for public IP: %v", err)
}
pubIface := ifaces[0].Index
kiloIface, err := wireguard.New("kilo")
if err != nil {
return nil, fmt.Errorf("failed to create WireGuard interface: %v", err)
}
var tunlIface int
if encapsulate != NeverEncapsulate {
if tunlIface, err = iproute.NewIPIP(privIface); err != nil {
return nil, fmt.Errorf("failed to create tunnel interface: %v", err)
}
if err := iproute.Set(tunlIface, true); err != nil {
return nil, fmt.Errorf("failed to set tunnel interface up: %v", err)
}
}
level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the private IP address", privateIP.String()))
level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the public IP address", publicIP.String()))
ipTables, err := iptables.New(len(subnet.IP))
if err != nil {
return nil, fmt.Errorf("failed to IP tables controller: %v", err)
}
return &Mesh{
Backend: backend,
encapsulate: encapsulate,
externalIP: publicIP,
granularity: granularity,
hostname: hostname,
internalIP: privateIP,
// This is a patch until Calico supports
// other hosts adding IPIP iptables rules.
ipset: ipset.New("cali40all-hosts-net"),
ipTables: ipTables,
kiloIface: kiloIface,
nodes: make(map[string]*Node),
port: port,
priv: private,
privIface: privIface,
pub: public,
pubIface: pubIface,
local: local,
stop: make(chan struct{}),
subnet: subnet,
table: route.NewTable(),
tunlIface: tunlIface,
errorCounter: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "kilo_errors_total",
Help: "Number of errors that occurred while administering the mesh.",
}, []string{"event"}),
nodesGuage: prometheus.NewGauge(prometheus.GaugeOpts{
Name: "kilo_nodes",
Help: "Number of in the mesh.",
}),
logger: logger,
}, nil
}
// Run starts the mesh.
func (m *Mesh) Run() error {
if err := m.Init(m.stop); err != nil {
return fmt.Errorf("failed to initialize backend: %v", err)
}
ipsetErrors, err := m.ipset.Run(m.stop)
if err != nil {
return fmt.Errorf("failed to watch for ipset updates: %v", err)
}
ipTablesErrors, err := m.ipTables.Run(m.stop)
if err != nil {
return fmt.Errorf("failed to watch for IP tables updates: %v", err)
}
routeErrors, err := m.table.Run(m.stop)
if err != nil {
return fmt.Errorf("failed to watch for route table updates: %v", err)
}
go func() {
for {
var err error
select {
case err = <-ipsetErrors:
case err = <-ipTablesErrors:
case err = <-routeErrors:
case <-m.stop:
return
}
if err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("run").Inc()
}
}
}()
defer m.cleanUp()
t := time.NewTimer(resyncPeriod)
w := m.Watch()
for {
var e *Event
select {
case e = <-w:
m.sync(e)
case <-t.C:
m.applyTopology()
t.Reset(resyncPeriod)
case <-m.stop:
return nil
}
}
}
func (m *Mesh) sync(e *Event) {
logger := log.With(m.logger, "event", e.Type)
level.Debug(logger).Log("msg", "syncing", "event", e.Type)
if isSelf(m.hostname, e.Node) {
level.Debug(logger).Log("msg", "processing local node", "node", e.Node)
m.handleLocal(e.Node)
return
}
var diff bool
m.mu.Lock()
if !e.Node.Ready() {
level.Debug(logger).Log("msg", "received incomplete node", "node", e.Node)
// An existing node is no longer valid
// so remove it from the mesh.
if _, ok := m.nodes[e.Node.Name]; ok {
level.Info(logger).Log("msg", "node is no longer in the mesh", "node", e.Node)
delete(m.nodes, e.Node.Name)
diff = true
}
} else {
switch e.Type {
case AddEvent:
fallthrough
case UpdateEvent:
if !nodesAreEqual(m.nodes[e.Node.Name], e.Node) {
m.nodes[e.Node.Name] = e.Node
diff = true
}
case DeleteEvent:
delete(m.nodes, e.Node.Name)
diff = true
}
}
m.mu.Unlock()
if diff {
level.Info(logger).Log("node", e.Node)
m.applyTopology()
}
}
func (m *Mesh) handleLocal(n *Node) {
// Allow the external IP to be overridden.
if n.ExternalIP == nil {
n.ExternalIP = m.externalIP
}
// Compare the given node to the calculated local node.
// Take leader, location, and subnet from the argument, as these
// are not determined by kilo.
local := &Node{ExternalIP: n.ExternalIP, Key: m.pub, InternalIP: m.internalIP, Leader: n.Leader, Location: n.Location, Name: m.hostname, Subnet: n.Subnet}
if !nodesAreEqual(n, local) {
level.Debug(m.logger).Log("msg", "local node differs from backend")
if err := m.Set(m.hostname, local); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", local)
m.errorCounter.WithLabelValues("local").Inc()
return
}
level.Debug(m.logger).Log("msg", "successfully reconciled local node against backend")
}
m.mu.Lock()
n = m.nodes[m.hostname]
if n == nil {
n = &Node{}
}
m.mu.Unlock()
if !nodesAreEqual(n, local) {
m.mu.Lock()
m.nodes[local.Name] = local
m.mu.Unlock()
m.applyTopology()
}
}
func (m *Mesh) applyTopology() {
m.mu.Lock()
defer m.mu.Unlock()
// Ensure all unready nodes are removed.
var ready float64
for n := range m.nodes {
if !m.nodes[n].Ready() {
delete(m.nodes, n)
continue
}
ready++
}
m.nodesGuage.Set(ready)
// We cannot do anything with the topology until the local node is available.
if m.nodes[m.hostname] == nil {
return
}
t, err := NewTopology(m.nodes, m.granularity, m.hostname, m.port, m.priv, m.subnet)
if err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
conf, err := t.Conf()
if err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
}
if err := ioutil.WriteFile(ConfPath, conf, 0600); err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
var private *net.IPNet
// If we are not encapsulating packets to the local private network,
// then pass the private IP to add an exception to the NAT rule.
if m.encapsulate != AlwaysEncapsulate {
private = t.privateIP
}
rules := iptables.MasqueradeRules(private, m.nodes[m.hostname].Subnet, t.RemoteSubnets())
rules = append(rules, iptables.ForwardRules(m.subnet)...)
if err := m.ipTables.Set(rules); err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
if m.encapsulate != NeverEncapsulate {
var peers []net.IP
for _, s := range t.Segments {
if s.Location == m.nodes[m.hostname].Location {
peers = s.privateIPs
break
}
}
if err := m.ipset.Set(peers); err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
if m.local {
if err := iproute.SetAddress(m.tunlIface, oneAddressCIDR(newAllocator(*m.nodes[m.hostname].Subnet).next().IP)); err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
}
}
if t.leader {
if err := iproute.SetAddress(m.kiloIface, t.wireGuardCIDR); err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
link, err := linkByIndex(m.kiloIface)
if err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
oldConf, err := wireguard.ShowConf(link.Attrs().Name)
if err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
// Setting the WireGuard configuration interrupts existing connections
// so only set the configuration if it has changed.
equal, err := wireguard.CompareConf(conf, oldConf)
if err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
// Don't return here, simply overwrite the old configuration.
equal = false
}
if !equal {
if err := wireguard.SetConf(link.Attrs().Name, ConfPath); err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
}
if err := iproute.Set(m.kiloIface, true); err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
} else {
level.Debug(m.logger).Log("msg", "local node is not the leader")
if err := iproute.Set(m.kiloIface, false); err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
return
}
}
// We need to add routes last since they may depend
// on the WireGuard interface.
routes := t.Routes(m.kiloIface, m.privIface, m.tunlIface, m.local, m.encapsulate)
if err := m.table.Set(routes); err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
}
}
// RegisterMetrics registers Prometheus metrics on the given Prometheus
// registerer.
func (m *Mesh) RegisterMetrics(r prometheus.Registerer) {
r.MustRegister(
m.errorCounter,
m.nodesGuage,
)
}
// Stop stops the mesh.
func (m *Mesh) Stop() {
close(m.stop)
}
func (m *Mesh) cleanUp() {
if err := m.ipTables.CleanUp(); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up IP tables: %v", err))
m.errorCounter.WithLabelValues("cleanUp").Inc()
}
if err := m.table.CleanUp(); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up routes: %v", err))
m.errorCounter.WithLabelValues("cleanUp").Inc()
}
if err := os.Remove(PrivateKeyPath); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to delete private key: %v", err))
m.errorCounter.WithLabelValues("cleanUp").Inc()
}
if err := os.Remove(ConfPath); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to delete configuration file: %v", err))
m.errorCounter.WithLabelValues("cleanUp").Inc()
}
if err := iproute.RemoveInterface(m.kiloIface); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to remove wireguard interface: %v", err))
m.errorCounter.WithLabelValues("cleanUp").Inc()
}
if err := m.CleanUp(m.hostname); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up backend: %v", err))
m.errorCounter.WithLabelValues("cleanUp").Inc()
}
if err := m.ipset.CleanUp(); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up ipset: %v", err))
m.errorCounter.WithLabelValues("cleanUp").Inc()
}
}
func isSelf(hostname string, node *Node) bool {
return node != nil && node.Name == hostname
}
func nodesAreEqual(a, b *Node) bool {
if !(a != nil) == (b != nil) {
return false
}
if a == b {
return true
}
return ipNetsEqual(a.ExternalIP, b.ExternalIP) && string(a.Key) == string(b.Key) && ipNetsEqual(a.InternalIP, b.InternalIP) && a.Leader == b.Leader && a.Location == b.Location && a.Name == b.Name && subnetsEqual(a.Subnet, b.Subnet)
}
func ipNetsEqual(a, b *net.IPNet) bool {
if a == nil && b == nil {
return true
}
if (a != nil) != (b != nil) {
return false
}
if a.Mask.String() != b.Mask.String() {
return false
}
return a.IP.Equal(b.IP)
}
func subnetsEqual(a, b *net.IPNet) bool {
if a.Mask.String() != b.Mask.String() {
return false
}
if !a.Contains(b.IP) {
return false
}
if !b.Contains(a.IP) {
return false
}
return true
}
func linkByIndex(index int) (netlink.Link, error) {
link, err := netlink.LinkByIndex(index)
if err != nil {
return nil, fmt.Errorf("failed to get interface: %v", err)
}
return link, nil
}

146
pkg/mesh/mesh_test.go Normal file
View File

@@ -0,0 +1,146 @@
// Copyright 2019 the Kilo authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mesh
import (
"net"
"testing"
)
func TestNewAllocator(t *testing.T) {
_, c1, err := net.ParseCIDR("10.1.0.0/16")
if err != nil {
t.Fatalf("failed to parse CIDR: %v", err)
}
a1 := newAllocator(*c1)
_, c2, err := net.ParseCIDR("10.1.0.0/32")
if err != nil {
t.Fatalf("failed to parse CIDR: %v", err)
}
a2 := newAllocator(*c2)
_, c3, err := net.ParseCIDR("10.1.0.0/31")
if err != nil {
t.Fatalf("failed to parse CIDR: %v", err)
}
a3 := newAllocator(*c3)
for _, tc := range []struct {
name string
a *allocator
next string
}{
{
name: "10.1.0.0/16 first",
a: a1,
next: "10.1.0.1/32",
},
{
name: "10.1.0.0/16 second",
a: a1,
next: "10.1.0.2/32",
},
{
name: "10.1.0.0/32",
a: a2,
next: "<nil>",
},
{
name: "10.1.0.0/31 first",
a: a3,
next: "10.1.0.1/32",
},
{
name: "10.1.0.0/31 second",
a: a3,
next: "<nil>",
},
} {
next := tc.a.next()
if next.String() != tc.next {
t.Errorf("test case %q: expected %s, got %s", tc.name, tc.next, next.String())
}
}
}
func TestReady(t *testing.T) {
internalIP := oneAddressCIDR(net.ParseIP("1.1.1.1"))
externalIP := oneAddressCIDR(net.ParseIP("2.2.2.2"))
for _, tc := range []struct {
name string
node *Node
ready bool
}{
{
name: "nil",
node: nil,
ready: false,
},
{
name: "empty fields",
node: &Node{},
ready: false,
},
{
name: "empty external IP",
node: &Node{
InternalIP: internalIP,
Key: []byte{},
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)},
},
ready: false,
},
{
name: "empty internal IP",
node: &Node{
ExternalIP: externalIP,
Key: []byte{},
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)},
},
ready: false,
},
{
name: "empty key",
node: &Node{
ExternalIP: externalIP,
InternalIP: internalIP,
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)},
},
ready: false,
},
{
name: "empty subnet",
node: &Node{
ExternalIP: externalIP,
InternalIP: internalIP,
Key: []byte{},
},
ready: false,
},
{
name: "valid",
node: &Node{
ExternalIP: externalIP,
InternalIP: internalIP,
Key: []byte{},
Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)},
},
ready: true,
},
} {
ready := tc.node.Ready()
if ready != tc.ready {
t.Errorf("test case %q: expected %t, got %t", tc.name, tc.ready, ready)
}
}
}

334
pkg/mesh/topology.go Normal file
View File

@@ -0,0 +1,334 @@
// Copyright 2019 the Kilo authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mesh
import (
"bytes"
"errors"
"fmt"
"net"
"sort"
"strings"
"text/template"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
)
var (
confTemplate = template.Must(template.New("").Parse(`[Interface]
PrivateKey = {{.Key}}
ListenPort = {{.Port}}
{{range .Segments -}}
{{if ne .Location $.Location}}
[Peer]
PublicKey = {{.Key}}
Endpoint = {{.Endpoint}}:{{$.Port}}
AllowedIPs = {{.AllowedIPs}}
{{end}}
{{- end -}}
`))
)
// Topology represents the logical structure of the overlay network.
type Topology struct {
// Some fields need to be exported so that the template can read them.
Key string
Port int
// Location is the logical location of the local host.
Location string
Segments []*segment
// hostname is the hostname of the local host.
hostname string
// leader represents whether or not the local host
// is the segment leader.
leader bool
// subnet is the entire subnet from which IPs
// for the WireGuard interfaces will be allocated.
subnet *net.IPNet
// privateIP is the private IP address of the local node.
privateIP *net.IPNet
// wireGuardCIDR is the allocated CIDR of the WireGuard
// interface of the local node. If the local node is not
// the leader, then it is nil.
wireGuardCIDR *net.IPNet
}
type segment struct {
// Some fields need to be exported so that the template can read them.
AllowedIPs string
Endpoint string
Key string
// Location is the logical location of this segment.
Location string
// cidrs is a slice of subnets of all peers in the segment.
cidrs []*net.IPNet
// hostnames is a slice of the hostnames of the peers in the segment.
hostnames []string
// leader is the index of the leader of the segment.
leader int
// privateIPs is a slice of private IPs of all peers in the segment.
privateIPs []net.IP
// wireGuardIP is the allocated IP address of the WireGuard
// interface on the leader of the segment.
wireGuardIP net.IP
}
// NewTopology creates a new Topology struct from a given set of nodes.
func NewTopology(nodes map[string]*Node, granularity Granularity, hostname string, port int, key []byte, subnet *net.IPNet) (*Topology, error) {
topoMap := make(map[string][]*Node)
for _, node := range nodes {
var location string
switch granularity {
case DataCenterGranularity:
location = node.Location
case NodeGranularity:
location = node.Name
}
topoMap[location] = append(topoMap[location], node)
}
var localLocation string
switch granularity {
case DataCenterGranularity:
localLocation = nodes[hostname].Location
case NodeGranularity:
localLocation = hostname
}
t := Topology{Key: strings.TrimSpace(string(key)), Port: port, hostname: hostname, Location: localLocation, subnet: subnet, privateIP: nodes[hostname].InternalIP}
for location := range topoMap {
// Sort the location so the result is stable.
sort.Slice(topoMap[location], func(i, j int) bool {
return topoMap[location][i].Name < topoMap[location][j].Name
})
leader := findLeader(topoMap[location])
if location == localLocation && topoMap[location][leader].Name == hostname {
t.leader = true
}
var allowedIPs []string
var cidrs []*net.IPNet
var hostnames []string
var privateIPs []net.IP
for _, node := range topoMap[location] {
// Allowed IPs should include:
// - the node's allocated subnet
// - the node's WireGuard IP
// - the node's internal IP
allowedIPs = append(allowedIPs, node.Subnet.String(), oneAddressCIDR(node.InternalIP.IP).String())
cidrs = append(cidrs, node.Subnet)
hostnames = append(hostnames, node.Name)
privateIPs = append(privateIPs, node.InternalIP.IP)
}
t.Segments = append(t.Segments, &segment{
AllowedIPs: strings.Join(allowedIPs, ", "),
Endpoint: topoMap[location][leader].ExternalIP.IP.String(),
Key: strings.TrimSpace(string(topoMap[location][leader].Key)),
Location: location,
cidrs: cidrs,
hostnames: hostnames,
leader: leader,
privateIPs: privateIPs,
})
}
// Sort the Topology so the result is stable.
sort.Slice(t.Segments, func(i, j int) bool {
return t.Segments[i].Location < t.Segments[j].Location
})
// Allocate IPs to the segment leaders in a stable, coordination-free manner.
a := newAllocator(*subnet)
for _, segment := range t.Segments {
ipNet := a.next()
if ipNet == nil {
return nil, errors.New("failed to allocate an IP address; ran out of IP addresses")
}
segment.wireGuardIP = ipNet.IP
segment.AllowedIPs = fmt.Sprintf("%s, %s", segment.AllowedIPs, ipNet.String())
if t.leader && segment.Location == t.Location {
t.wireGuardCIDR = &net.IPNet{IP: ipNet.IP, Mask: t.subnet.Mask}
}
}
return &t, nil
}
// RemoteSubnets identifies the subnets of the hosts in segments different than the host's.
func (t *Topology) RemoteSubnets() []*net.IPNet {
var remote []*net.IPNet
for _, s := range t.Segments {
if s == nil || s.Location == t.Location {
continue
}
remote = append(remote, s.cidrs...)
}
return remote
}
// Routes generates a slice of routes for a given Topology.
func (t *Topology) Routes(kiloIface, privIface, tunlIface int, local bool, encapsulate Encapsulate) []*netlink.Route {
var routes []*netlink.Route
if !t.leader {
// Find the leader for this segment.
var leader net.IP
for _, segment := range t.Segments {
if segment.Location == t.Location {
leader = segment.privateIPs[segment.leader]
break
}
}
for _, segment := range t.Segments {
// First, add a route to the WireGuard IP of the segment.
routes = append(routes, encapsulateRoute(&netlink.Route{
Dst: oneAddressCIDR(segment.wireGuardIP),
Flags: int(netlink.FLAG_ONLINK),
Gw: leader,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
// Add routes for the current segment if local is true.
if segment.Location == t.Location {
if local {
for i := range segment.cidrs {
// Don't add routes for the local node.
if segment.privateIPs[i].Equal(t.privateIP.IP) {
continue
}
routes = append(routes, encapsulateRoute(&netlink.Route{
Dst: segment.cidrs[i],
Flags: int(netlink.FLAG_ONLINK),
Gw: segment.privateIPs[i],
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
}
}
continue
}
for i := range segment.cidrs {
// Add routes to the Pod CIDRs of nodes in other segments.
routes = append(routes, encapsulateRoute(&netlink.Route{
Dst: segment.cidrs[i],
Flags: int(netlink.FLAG_ONLINK),
Gw: leader,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
// Add routes to the private IPs of nodes in other segments.
// Number of CIDRs and private IPs always match so
// we can reuse the loop.
routes = append(routes, encapsulateRoute(&netlink.Route{
Dst: oneAddressCIDR(segment.privateIPs[i]),
Flags: int(netlink.FLAG_ONLINK),
Gw: leader,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
}
}
return routes
}
for _, segment := range t.Segments {
// Add routes for the current segment if local is true.
if segment.Location == t.Location {
if local {
for i := range segment.cidrs {
// Don't add routes for the local node.
if segment.privateIPs[i].Equal(t.privateIP.IP) {
continue
}
routes = append(routes, encapsulateRoute(&netlink.Route{
Dst: segment.cidrs[i],
Flags: int(netlink.FLAG_ONLINK),
Gw: segment.privateIPs[i],
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
}
}
continue
}
for i := range segment.cidrs {
// Add routes to the Pod CIDRs of nodes in other segments.
routes = append(routes, &netlink.Route{
Dst: segment.cidrs[i],
Flags: int(netlink.FLAG_ONLINK),
Gw: segment.wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
})
// Add routes to the private IPs of nodes in other segments.
// Number of CIDRs and private IPs always match so
// we can reuse the loop.
routes = append(routes, &netlink.Route{
Dst: oneAddressCIDR(segment.privateIPs[i]),
Flags: int(netlink.FLAG_ONLINK),
Gw: segment.wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
})
}
}
return routes
}
func encapsulateRoute(route *netlink.Route, encapsulate Encapsulate, subnet *net.IPNet, tunlIface int) *netlink.Route {
if encapsulate == AlwaysEncapsulate || (encapsulate == CrossSubnetEncapsulate && !subnet.Contains(route.Gw)) {
route.LinkIndex = tunlIface
}
return route
}
// Conf generates a WireGuard configuration file for a given Topology.
func (t *Topology) Conf() ([]byte, error) {
conf := new(bytes.Buffer)
if err := confTemplate.Execute(conf, t); err != nil {
return nil, err
}
return conf.Bytes(), nil
}
// oneAddressCIDR takes an IP address and returns a CIDR
// that contains only that address.
func oneAddressCIDR(ip net.IP) *net.IPNet {
return &net.IPNet{IP: ip, Mask: net.CIDRMask(len(ip)*8, len(ip)*8)}
}
// findLeader selects a leader for the nodes in a segment;
// it will select the first node that says it should lead
// or the first node in the segment if none have volunteered,
// always preferring those with a public external IP address,
func findLeader(nodes []*Node) int {
var leaders, public []int
for i := range nodes {
if nodes[i].Leader {
if isPublic(nodes[i].ExternalIP) {
return i
}
leaders = append(leaders, i)
}
if isPublic(nodes[i].ExternalIP) {
public = append(public, i)
}
}
if len(leaders) != 0 {
return leaders[0]
}
if len(public) != 0 {
return public[0]
}
return 0
}

982
pkg/mesh/topology_test.go Normal file
View File

@@ -0,0 +1,982 @@
// Copyright 2019 the Kilo authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mesh
import (
"net"
"strings"
"testing"
"github.com/kylelemons/godebug/pretty"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
)
func allowedIPs(ips ...string) string {
return strings.Join(ips, ", ")
}
func setup(t *testing.T) (map[string]*Node, []byte, int, *net.IPNet) {
key := []byte("private")
port := 51820
_, kiloNet, err := net.ParseCIDR("10.4.0.0/16")
if err != nil {
t.Fatalf("failed to parse Kilo subnet CIDR: %v", err)
}
ip, e1, err := net.ParseCIDR("10.1.0.1/16")
if err != nil {
t.Fatalf("failed to parse external IP CIDR: %v", err)
}
e1.IP = ip
ip, e2, err := net.ParseCIDR("10.1.0.2/16")
if err != nil {
t.Fatalf("failed to parse external IP CIDR: %v", err)
}
e2.IP = ip
ip, e3, err := net.ParseCIDR("10.1.0.3/16")
if err != nil {
t.Fatalf("failed to parse external IP CIDR: %v", err)
}
e3.IP = ip
ip, i1, err := net.ParseCIDR("192.168.0.1/24")
if err != nil {
t.Fatalf("failed to parse internal IP CIDR: %v", err)
}
i1.IP = ip
ip, i2, err := net.ParseCIDR("192.168.0.2/24")
if err != nil {
t.Fatalf("failed to parse internal IP CIDR: %v", err)
}
i2.IP = ip
nodes := map[string]*Node{
"a": {
Name: "a",
ExternalIP: e1,
InternalIP: i1,
Location: "1",
Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)},
Key: []byte("key1"),
},
"b": {
Name: "b",
ExternalIP: e2,
InternalIP: i1,
Location: "2",
Subnet: &net.IPNet{IP: net.ParseIP("10.2.2.0"), Mask: net.CIDRMask(24, 32)},
Key: []byte("key2"),
},
"c": {
Name: "c",
ExternalIP: e3,
InternalIP: i2,
// Same location a node b.
Location: "2",
Subnet: &net.IPNet{IP: net.ParseIP("10.2.3.0"), Mask: net.CIDRMask(24, 32)},
Key: []byte("key3"),
},
}
return nodes, key, port, kiloNet
}
func TestNewTopology(t *testing.T) {
nodes, key, port, kiloNet := setup(t)
w1 := net.ParseIP("10.4.0.1").To4()
w2 := net.ParseIP("10.4.0.2").To4()
w3 := net.ParseIP("10.4.0.3").To4()
for _, tc := range []struct {
name string
granularity Granularity
hostname string
result *Topology
}{
{
name: "datacenter from a",
granularity: DataCenterGranularity,
hostname: nodes["a"].Name,
result: &Topology{
hostname: nodes["a"].Name,
leader: true,
Location: nodes["a"].Location,
subnet: kiloNet,
privateIP: nodes["a"].InternalIP,
wireGuardCIDR: &net.IPNet{IP: w1, Mask: net.CIDRMask(16, 32)},
Segments: []*segment{
{
AllowedIPs: allowedIPs(nodes["a"].Subnet.String(), "192.168.0.1/32", "10.4.0.1/32"),
Endpoint: nodes["a"].ExternalIP.IP.String(),
Key: string(nodes["a"].Key),
Location: nodes["a"].Location,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
wireGuardIP: w1,
},
{
AllowedIPs: allowedIPs(nodes["b"].Subnet.String(), "192.168.0.1/32", nodes["c"].Subnet.String(), "192.168.0.2/32", "10.4.0.2/32"),
Endpoint: nodes["b"].ExternalIP.IP.String(),
Key: string(nodes["b"].Key),
Location: nodes["b"].Location,
cidrs: []*net.IPNet{nodes["b"].Subnet, nodes["c"].Subnet},
hostnames: []string{"b", "c"},
privateIPs: []net.IP{nodes["b"].InternalIP.IP, nodes["c"].InternalIP.IP},
wireGuardIP: w2,
},
},
},
},
{
name: "datacenter from b",
granularity: DataCenterGranularity,
hostname: nodes["b"].Name,
result: &Topology{
hostname: nodes["b"].Name,
leader: true,
Location: nodes["b"].Location,
subnet: kiloNet,
privateIP: nodes["b"].InternalIP,
wireGuardCIDR: &net.IPNet{IP: w2, Mask: net.CIDRMask(16, 32)},
Segments: []*segment{
{
AllowedIPs: allowedIPs(nodes["a"].Subnet.String(), "192.168.0.1/32", "10.4.0.1/32"),
Endpoint: nodes["a"].ExternalIP.IP.String(),
Key: string(nodes["a"].Key),
Location: nodes["a"].Location,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
wireGuardIP: w1,
},
{
AllowedIPs: allowedIPs(nodes["b"].Subnet.String(), "192.168.0.1/32", nodes["c"].Subnet.String(), "192.168.0.2/32", "10.4.0.2/32"),
Endpoint: nodes["b"].ExternalIP.IP.String(),
Key: string(nodes["b"].Key),
Location: nodes["b"].Location,
cidrs: []*net.IPNet{nodes["b"].Subnet, nodes["c"].Subnet},
hostnames: []string{"b", "c"},
privateIPs: []net.IP{nodes["b"].InternalIP.IP, nodes["c"].InternalIP.IP},
wireGuardIP: w2,
},
},
},
},
{
name: "datacenter from c",
granularity: DataCenterGranularity,
hostname: nodes["c"].Name,
result: &Topology{
hostname: nodes["c"].Name,
leader: false,
Location: nodes["b"].Location,
subnet: kiloNet,
privateIP: nodes["c"].InternalIP,
wireGuardCIDR: nil,
Segments: []*segment{
{
AllowedIPs: allowedIPs(nodes["a"].Subnet.String(), "192.168.0.1/32", "10.4.0.1/32"),
Endpoint: nodes["a"].ExternalIP.IP.String(),
Key: string(nodes["a"].Key),
Location: nodes["a"].Location,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
wireGuardIP: w1,
},
{
AllowedIPs: allowedIPs(nodes["b"].Subnet.String(), "192.168.0.1/32", nodes["c"].Subnet.String(), "192.168.0.2/32", "10.4.0.2/32"),
Endpoint: nodes["b"].ExternalIP.IP.String(),
Key: string(nodes["b"].Key),
Location: nodes["b"].Location,
cidrs: []*net.IPNet{nodes["b"].Subnet, nodes["c"].Subnet},
hostnames: []string{"b", "c"},
privateIPs: []net.IP{nodes["b"].InternalIP.IP, nodes["c"].InternalIP.IP},
wireGuardIP: w2,
},
},
},
},
{
name: "node from a",
granularity: NodeGranularity,
hostname: nodes["a"].Name,
result: &Topology{
hostname: nodes["a"].Name,
leader: true,
Location: nodes["a"].Name,
subnet: kiloNet,
privateIP: nodes["a"].InternalIP,
wireGuardCIDR: &net.IPNet{IP: w1, Mask: net.CIDRMask(16, 32)},
Segments: []*segment{
{
AllowedIPs: allowedIPs(nodes["a"].Subnet.String(), "192.168.0.1/32", "10.4.0.1/32"),
Endpoint: nodes["a"].ExternalIP.IP.String(),
Key: string(nodes["a"].Key),
Location: nodes["a"].Name,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
wireGuardIP: w1,
},
{
AllowedIPs: allowedIPs(nodes["b"].Subnet.String(), "192.168.0.1/32", "10.4.0.2/32"),
Endpoint: nodes["b"].ExternalIP.IP.String(),
Key: string(nodes["b"].Key),
Location: nodes["b"].Name,
cidrs: []*net.IPNet{nodes["b"].Subnet},
hostnames: []string{"b"},
privateIPs: []net.IP{nodes["b"].InternalIP.IP},
wireGuardIP: w2,
},
{
AllowedIPs: allowedIPs(nodes["c"].Subnet.String(), "192.168.0.2/32", "10.4.0.3/32"),
Endpoint: nodes["c"].ExternalIP.IP.String(),
Key: string(nodes["c"].Key),
Location: nodes["c"].Name,
cidrs: []*net.IPNet{nodes["c"].Subnet},
hostnames: []string{"c"},
privateIPs: []net.IP{nodes["c"].InternalIP.IP},
wireGuardIP: w3,
},
},
},
},
{
name: "node from b",
granularity: NodeGranularity,
hostname: nodes["b"].Name,
result: &Topology{
hostname: nodes["b"].Name,
leader: true,
Location: nodes["b"].Name,
subnet: kiloNet,
privateIP: nodes["b"].InternalIP,
wireGuardCIDR: &net.IPNet{IP: w2, Mask: net.CIDRMask(16, 32)},
Segments: []*segment{
{
AllowedIPs: allowedIPs(nodes["a"].Subnet.String(), "192.168.0.1/32", "10.4.0.1/32"),
Endpoint: nodes["a"].ExternalIP.IP.String(),
Key: string(nodes["a"].Key),
Location: nodes["a"].Name,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
wireGuardIP: w1,
},
{
AllowedIPs: allowedIPs(nodes["b"].Subnet.String(), "192.168.0.1/32", "10.4.0.2/32"),
Endpoint: nodes["b"].ExternalIP.IP.String(),
Key: string(nodes["b"].Key),
Location: nodes["b"].Name,
cidrs: []*net.IPNet{nodes["b"].Subnet},
hostnames: []string{"b"},
privateIPs: []net.IP{nodes["b"].InternalIP.IP},
wireGuardIP: w2,
},
{
AllowedIPs: allowedIPs(nodes["c"].Subnet.String(), "192.168.0.2/32", "10.4.0.3/32"),
Endpoint: nodes["c"].ExternalIP.IP.String(),
Key: string(nodes["c"].Key),
Location: nodes["c"].Name,
cidrs: []*net.IPNet{nodes["c"].Subnet},
hostnames: []string{"c"},
privateIPs: []net.IP{nodes["c"].InternalIP.IP},
wireGuardIP: w3,
},
},
},
},
{
name: "node from c",
granularity: NodeGranularity,
hostname: nodes["c"].Name,
result: &Topology{
hostname: nodes["c"].Name,
leader: true,
Location: nodes["c"].Name,
subnet: kiloNet,
privateIP: nodes["c"].InternalIP,
wireGuardCIDR: &net.IPNet{IP: w3, Mask: net.CIDRMask(16, 32)},
Segments: []*segment{
{
AllowedIPs: allowedIPs(nodes["a"].Subnet.String(), "192.168.0.1/32", "10.4.0.1/32"),
Endpoint: nodes["a"].ExternalIP.IP.String(),
Key: string(nodes["a"].Key),
Location: nodes["a"].Name,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
wireGuardIP: w1,
},
{
AllowedIPs: allowedIPs(nodes["b"].Subnet.String(), "192.168.0.1/32", "10.4.0.2/32"),
Endpoint: nodes["b"].ExternalIP.IP.String(),
Key: string(nodes["b"].Key),
Location: nodes["b"].Name,
cidrs: []*net.IPNet{nodes["b"].Subnet},
hostnames: []string{"b"},
privateIPs: []net.IP{nodes["b"].InternalIP.IP},
wireGuardIP: w2,
},
{
AllowedIPs: allowedIPs(nodes["c"].Subnet.String(), "192.168.0.2/32", "10.4.0.3/32"),
Endpoint: nodes["c"].ExternalIP.IP.String(),
Key: string(nodes["c"].Key),
Location: nodes["c"].Name,
cidrs: []*net.IPNet{nodes["c"].Subnet},
hostnames: []string{"c"},
privateIPs: []net.IP{nodes["c"].InternalIP.IP},
wireGuardIP: w3,
},
},
},
},
} {
tc.result.Key = string(key)
tc.result.Port = port
topo, err := NewTopology(nodes, tc.granularity, tc.hostname, port, key, kiloNet)
if err != nil {
t.Errorf("test case %q: failed to generate Topology: %v", tc.name, err)
}
if diff := pretty.Compare(topo, tc.result); diff != "" {
t.Errorf("test case %q: got diff: %v", tc.name, diff)
}
}
}
func mustTopo(t *testing.T, nodes map[string]*Node, granularity Granularity, hostname string, port int, key []byte, subnet *net.IPNet) *Topology {
topo, err := NewTopology(nodes, granularity, hostname, port, key, subnet)
if err != nil {
t.Errorf("failed to generate Topology: %v", err)
}
return topo
}
func TestRoutes(t *testing.T) {
nodes, key, port, kiloNet := setup(t)
kiloIface := 0
privIface := 1
pubIface := 2
mustTopoForGranularityAndHost := func(granularity Granularity, hostname string) *Topology {
return mustTopo(t, nodes, granularity, hostname, port, key, kiloNet)
}
for _, tc := range []struct {
name string
local bool
topology *Topology
result []*netlink.Route
}{
{
name: "datacenter from a",
topology: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name),
result: []*netlink.Route{
{
Dst: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].cidrs[0],
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["b"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].cidrs[1],
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["c"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
},
},
{
name: "datacenter from b",
topology: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["b"].Name),
result: []*netlink.Route{
{
Dst: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["b"].Name).Segments[0].cidrs[0],
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["b"].Name).Segments[0].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["a"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["b"].Name).Segments[0].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
},
},
{
name: "datacenter from c",
topology: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["c"].Name),
result: []*netlink.Route{
{
Dst: oneAddressCIDR(mustTopoForGranularityAndHost(DataCenterGranularity, nodes["c"].Name).Segments[0].wireGuardIP),
Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["c"].Name).Segments[0].cidrs[0],
Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["a"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(mustTopoForGranularityAndHost(DataCenterGranularity, nodes["c"].Name).Segments[1].wireGuardIP),
Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
},
},
},
{
name: "node from a",
topology: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name),
result: []*netlink.Route{
{
Dst: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[1].cidrs[0],
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[1].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["b"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[1].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[2].cidrs[0],
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[2].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["c"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[2].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
},
},
{
name: "node from b",
topology: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name),
result: []*netlink.Route{
{
Dst: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[0].cidrs[0],
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[0].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["a"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[0].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[2].cidrs[0],
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[2].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["c"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[2].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
},
},
{
name: "node from c",
topology: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name),
result: []*netlink.Route{
{
Dst: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[0].cidrs[0],
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[0].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["a"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[0].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[1].cidrs[0],
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[1].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["b"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[1].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
},
},
{
name: "datacenter from a local",
local: true,
topology: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name),
result: []*netlink.Route{
{
Dst: nodes["b"].Subnet,
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["b"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: nodes["c"].Subnet,
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["c"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
},
},
{
name: "datacenter from b local",
local: true,
topology: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["b"].Name),
result: []*netlink.Route{
{
Dst: nodes["a"].Subnet,
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["b"].Name).Segments[0].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["a"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["b"].Name).Segments[0].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: nodes["c"].Subnet,
Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["c"].InternalIP.IP,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
},
},
},
{
name: "datacenter from c local",
local: true,
topology: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["c"].Name),
result: []*netlink.Route{
{
Dst: oneAddressCIDR(mustTopoForGranularityAndHost(DataCenterGranularity, nodes["c"].Name).Segments[0].wireGuardIP),
Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: nodes["a"].Subnet,
Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["a"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(mustTopoForGranularityAndHost(DataCenterGranularity, nodes["c"].Name).Segments[1].wireGuardIP),
Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: nodes["b"].Subnet,
Flags: int(netlink.FLAG_ONLINK),
Gw: nodes["b"].InternalIP.IP,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
},
},
},
{
name: "node from a local",
local: true,
topology: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name),
result: []*netlink.Route{
{
Dst: nodes["b"].Subnet,
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[1].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["b"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[1].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: nodes["c"].Subnet,
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[2].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["c"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[2].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
},
},
{
name: "node from b local",
local: true,
topology: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name),
result: []*netlink.Route{
{
Dst: nodes["a"].Subnet,
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[0].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["a"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[0].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: nodes["c"].Subnet,
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[2].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["c"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[2].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
},
},
{
name: "node from c local",
local: true,
topology: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name),
result: []*netlink.Route{
{
Dst: nodes["a"].Subnet,
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[0].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["a"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[0].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: nodes["b"].Subnet,
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[1].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
{
Dst: oneAddressCIDR(nodes["b"].InternalIP.IP),
Flags: int(netlink.FLAG_ONLINK),
Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[1].wireGuardIP,
LinkIndex: kiloIface,
Protocol: unix.RTPROT_STATIC,
},
},
},
} {
routes := tc.topology.Routes(kiloIface, privIface, pubIface, tc.local, NeverEncapsulate)
if diff := pretty.Compare(routes, tc.result); diff != "" {
t.Errorf("test case %q: got diff: %v", tc.name, diff)
}
}
}
func TestConf(t *testing.T) {
nodes, key, port, kiloNet := setup(t)
for _, tc := range []struct {
name string
topology *Topology
result string
}{
{
name: "datacenter from a",
topology: mustTopo(t, nodes, DataCenterGranularity, nodes["a"].Name, port, key, kiloNet),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
[Peer]
PublicKey = key2
Endpoint = 10.1.0.2:51820
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
`,
},
{
name: "datacenter from b",
topology: mustTopo(t, nodes, DataCenterGranularity, nodes["b"].Name, port, key, kiloNet),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
[Peer]
PublicKey = key1
Endpoint = 10.1.0.1:51820
AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32
`,
},
{
name: "datacenter from c",
topology: mustTopo(t, nodes, DataCenterGranularity, nodes["c"].Name, port, key, kiloNet),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
[Peer]
PublicKey = key1
Endpoint = 10.1.0.1:51820
AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32
`,
},
{
name: "node from a",
topology: mustTopo(t, nodes, NodeGranularity, nodes["a"].Name, port, key, kiloNet),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
[Peer]
PublicKey = key2
Endpoint = 10.1.0.2:51820
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.4.0.2/32
[Peer]
PublicKey = key3
Endpoint = 10.1.0.3:51820
AllowedIPs = 10.2.3.0/24, 192.168.0.2/32, 10.4.0.3/32
`,
},
{
name: "node from b",
topology: mustTopo(t, nodes, NodeGranularity, nodes["b"].Name, port, key, kiloNet),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
[Peer]
PublicKey = key1
Endpoint = 10.1.0.1:51820
AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32
[Peer]
PublicKey = key3
Endpoint = 10.1.0.3:51820
AllowedIPs = 10.2.3.0/24, 192.168.0.2/32, 10.4.0.3/32
`,
},
{
name: "node from c",
topology: mustTopo(t, nodes, NodeGranularity, nodes["c"].Name, port, key, kiloNet),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
[Peer]
PublicKey = key1
Endpoint = 10.1.0.1:51820
AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32
[Peer]
PublicKey = key2
Endpoint = 10.1.0.2:51820
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.4.0.2/32
`,
},
} {
conf, err := tc.topology.Conf()
if err != nil {
t.Errorf("test case %q: failed to generate conf: %v", tc.name, err)
}
if string(conf) != tc.result {
t.Errorf("test case %q: expected %s got %s", tc.name, tc.result, string(conf))
}
}
}
func TestFindLeader(t *testing.T) {
ip, e1, err := net.ParseCIDR("10.0.0.1/32")
if err != nil {
t.Fatalf("failed to parse external IP CIDR: %v", err)
}
e1.IP = ip
ip, e2, err := net.ParseCIDR("8.8.8.8/32")
if err != nil {
t.Fatalf("failed to parse external IP CIDR: %v", err)
}
e2.IP = ip
nodes := []*Node{
{
Name: "a",
ExternalIP: e1,
},
{
Name: "b",
ExternalIP: e2,
},
{
Name: "c",
ExternalIP: e2,
},
{
Name: "d",
ExternalIP: e1,
Leader: true,
},
{
Name: "2",
ExternalIP: e2,
Leader: true,
},
}
for _, tc := range []struct {
name string
nodes []*Node
out int
}{
{
name: "nil",
nodes: nil,
out: 0,
},
{
name: "one",
nodes: []*Node{nodes[0]},
out: 0,
},
{
name: "non-leaders",
nodes: []*Node{nodes[0], nodes[1], nodes[2]},
out: 1,
},
{
name: "leaders",
nodes: []*Node{nodes[3], nodes[4]},
out: 1,
},
{
name: "public",
nodes: []*Node{nodes[1], nodes[2], nodes[4]},
out: 2,
},
{
name: "private",
nodes: []*Node{nodes[0], nodes[3]},
out: 1,
},
{
name: "all",
nodes: nodes,
out: 4,
},
} {
l := findLeader(tc.nodes)
if l != tc.out {
t.Errorf("test case %q: expected %d got %d", tc.name, tc.out, l)
}
}
}