pkg/mesh,docs: document and fix keepalive logic

This commit documents the use of the persistent-keepalive annotation and
corrects the implementation of keepalives.

Signed-off-by: Lucas Servén Marín <lserven@gmail.com>
This commit is contained in:
Lucas Servén Marín 2020-03-03 20:10:20 +01:00
parent 406a397566
commit 24d7c27901
No known key found for this signature in database
GPG Key ID: 586FEAF680DA74AD
8 changed files with 117 additions and 115 deletions

View File

@ -60,7 +60,7 @@ func runGraph(_ *cobra.Command, _ []string) error {
peers[p.Name] = p
}
}
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, 0, []byte{}, subnet)
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, 0, []byte{}, subnet, nodes[hostname].PersistentKeepalive)
if err != nil {
return fmt.Errorf("failed to create topology: %v", err)
}

View File

@ -147,7 +147,7 @@ func runShowConfNode(_ *cobra.Command, args []string) error {
}
}
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, opts.port, []byte{}, subnet)
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, opts.port, []byte{}, subnet, nodes[hostname].PersistentKeepalive)
if err != nil {
return fmt.Errorf("failed to create topology: %v", err)
}
@ -236,7 +236,7 @@ func runShowConfPeer(_ *cobra.Command, args []string) error {
return fmt.Errorf("did not find any peer named %q in the cluster", peer)
}
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, mesh.DefaultKiloPort, []byte{}, subnet)
t, err := mesh.NewTopology(nodes, peers, opts.granularity, hostname, mesh.DefaultKiloPort, []byte{}, subnet, peers[peer].PersistentKeepalive)
if err != nil {
return fmt.Errorf("failed to create topology: %v", err)
}

View File

@ -4,10 +4,11 @@ The following annotations can be added to any Kubernetes Node object to configur
|Name|type|examples|
|----|----|-------|
|[kilo.squat.ai/force-endpoint](#force-endpoint)|host:port|`55.55.55.55:51820`, `example.com:1337|
|[kilo.squat.ai/force-endpoint](#force-endpoint)|host:port|`55.55.55.55:51820`, `example.com:1337`|
|[kilo.squat.ai/force-internal-ip](#force-internal-ip)|CIDR|`55.55.55.55/32`|
|[kilo.squat.ai/leader](#leader)|string|`""`, `true`|
|[kilo.squat.ai/location](#location)|string|`gcp-east`, `lab`|
|[kilo.squat.ai/persistent-keepalive](#persistent-keepalive)|uint|`10`|
### force-endpoint
In order to create links between locations, Kilo requires at least one node in each location to have an endpoint, ie a `host:port` combination, that is routable from the other locations.
@ -42,3 +43,11 @@ Kilo will try to infer each node's location from the [topology.kubernetes.io/reg
If the label is not present for a node, for example if running a bare-metal cluster or on an unsupported cloud provider, then the location annotation should be specified.
_Note_: all nodes without a defined location will be considered to be in the default location `""`.
### persistent-keepalive
In certain deployments, cluster nodes may be located behind NAT or a firewall, e.g. edge nodes located behind a commodity router.
In these scenarios, the nodes behind NAT can send packets to the nodes outside of the NATed network, however the outside nodes can only send packets into the NATed network as long as the NAT mapping remains valid.
In order for a node behind NAT to receive packets from nodes outside of the NATed network, it must maintain the NAT mapping by regularly sending packets to those nodes, ie by sending _keepalives_.
The frequency of emission of these keepalive packets can be controlled by setting the persistent-keepalive annotation on the node behind NAT.
The annotated node will use the specified value will as the persistent-keepalive interval for all of its peers.
For more background, [see the WireGuard documentation on NAT and firewall traversal](https://www.wireguard.com/quickstart/#nat-and-firewall-traversal-persistence).

View File

@ -299,7 +299,7 @@ func schema_k8s_apis_kilo_v1alpha1_PeerSpec(ref common.ReferenceCallback) common
},
"persistentKeepalive": {
SchemaProps: spec.SchemaProps{
Description: "PersistentKeepalive is the interval in seconds of the emission of keepalive packets to the peer. This defaults to 0, which disables the feature.",
Description: "PersistentKeepalive is the interval in seconds of the emission of keepalive packets by the peer. This defaults to 0, which disables the feature.",
Type: []string{"integer"},
Format: "int32",
},

View File

@ -68,7 +68,7 @@ type PeerSpec struct {
// +optional
Endpoint *PeerEndpoint `json:"endpoint,omitempty"`
// PersistentKeepalive is the interval in seconds of the emission
// of keepalive packets to the peer. This defaults to 0, which
// of keepalive packets by the peer. This defaults to 0, which
// disables the feature.
// +optional
PersistentKeepalive int `json:"persistentKeepalive,omitempty"`

View File

@ -548,7 +548,7 @@ func (m *Mesh) applyTopology() {
if nodes[m.hostname] == nil {
return
}
t, err := NewTopology(nodes, peers, m.granularity, m.hostname, nodes[m.hostname].Endpoint.Port, m.priv, m.subnet)
t, err := NewTopology(nodes, peers, m.granularity, m.hostname, nodes[m.hostname].Endpoint.Port, m.priv, m.subnet, nodes[m.hostname].PersistentKeepalive)
if err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()

View File

@ -42,10 +42,13 @@ type Topology struct {
// leader represents whether or not the local host
// is the segment leader.
leader bool
// persistentKeepalive is the interval in seconds of the emission
// of keepalive packets by the local node to its peers.
persistentKeepalive int
// privateIP is the private IP address of the local node.
privateIP *net.IPNet
// subnet is the Pod subnet of the local node.
subnet *net.IPNet
// privateIP is the private IP address of the local node.
privateIP *net.IPNet
// wireGuardCIDR is the allocated CIDR of the WireGuard
// interface of the local node. If the local node is not
// the leader, then it is nil.
@ -65,9 +68,6 @@ type segment struct {
hostnames []string
// leader is the index of the leader of the segment.
leader int
// persistentKeepalive is the interval in seconds of the emission
// of keepalive packets to the peer.
persistentKeepalive int
// privateIPs is a slice of private IPs of all peers in the segment.
privateIPs []net.IP
// wireGuardIP is the allocated IP address of the WireGuard
@ -76,7 +76,7 @@ type segment struct {
}
// NewTopology creates a new Topology struct from a given set of nodes and peers.
func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Granularity, hostname string, port uint32, key []byte, subnet *net.IPNet) (*Topology, error) {
func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Granularity, hostname string, port uint32, key []byte, subnet *net.IPNet, persistentKeepalive int) (*Topology, error) {
topoMap := make(map[string][]*Node)
for _, node := range nodes {
var location string
@ -96,7 +96,7 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
localLocation = hostname
}
t := Topology{key: key, port: port, hostname: hostname, location: localLocation, subnet: nodes[hostname].Subnet, privateIP: nodes[hostname].InternalIP}
t := Topology{key: key, port: port, hostname: hostname, location: localLocation, persistentKeepalive: persistentKeepalive, privateIP: nodes[hostname].InternalIP, subnet: nodes[hostname].Subnet}
for location := range topoMap {
// Sort the location so the result is stable.
sort.Slice(topoMap[location], func(i, j int) bool {
@ -121,15 +121,14 @@ func NewTopology(nodes map[string]*Node, peers map[string]*Peer, granularity Gra
privateIPs = append(privateIPs, node.InternalIP.IP)
}
t.segments = append(t.segments, &segment{
allowedIPs: allowedIPs,
endpoint: topoMap[location][leader].Endpoint,
key: topoMap[location][leader].Key,
location: location,
cidrs: cidrs,
hostnames: hostnames,
leader: leader,
privateIPs: privateIPs,
persistentKeepalive: topoMap[location][leader].PersistentKeepalive,
allowedIPs: allowedIPs,
endpoint: topoMap[location][leader].Endpoint,
key: topoMap[location][leader].Key,
location: location,
cidrs: cidrs,
hostnames: hostnames,
leader: leader,
privateIPs: privateIPs,
})
}
// Sort the Topology segments so the result is stable.
@ -367,14 +366,14 @@ func (t *Topology) Conf() *wireguard.Conf {
AllowedIPs: s.allowedIPs,
Endpoint: s.endpoint,
PublicKey: s.key,
PersistentKeepalive: s.persistentKeepalive,
PersistentKeepalive: t.persistentKeepalive,
}
c.Peers = append(c.Peers, peer)
}
for _, p := range t.peers {
peer := &wireguard.Peer{
AllowedIPs: p.AllowedIPs,
PersistentKeepalive: p.PersistentKeepalive,
PersistentKeepalive: t.persistentKeepalive,
PublicKey: p.PublicKey,
Endpoint: p.Endpoint,
}
@ -391,10 +390,9 @@ func (t *Topology) AsPeer() *wireguard.Peer {
continue
}
return &wireguard.Peer{
AllowedIPs: s.allowedIPs,
Endpoint: s.endpoint,
PersistentKeepalive: s.persistentKeepalive,
PublicKey: s.key,
AllowedIPs: s.allowedIPs,
Endpoint: s.endpoint,
PublicKey: s.key,
}
}
return nil
@ -402,25 +400,35 @@ func (t *Topology) AsPeer() *wireguard.Peer {
// PeerConf generates a WireGuard configuration file for a given peer in a Topology.
func (t *Topology) PeerConf(name string) *wireguard.Conf {
var p *Peer
for i := range t.peers {
if t.peers[i].Name == name {
p = t.peers[i]
break
}
}
if p == nil {
return nil
}
c := &wireguard.Conf{}
for _, s := range t.segments {
peer := &wireguard.Peer{
AllowedIPs: s.allowedIPs,
Endpoint: s.endpoint,
PersistentKeepalive: s.persistentKeepalive,
PersistentKeepalive: p.PersistentKeepalive,
PublicKey: s.key,
}
c.Peers = append(c.Peers, peer)
}
for _, p := range t.peers {
if p.Name == name {
for i := range t.peers {
if t.peers[i].Name == name {
continue
}
peer := &wireguard.Peer{
AllowedIPs: p.AllowedIPs,
AllowedIPs: t.peers[i].AllowedIPs,
PersistentKeepalive: p.PersistentKeepalive,
PublicKey: p.PublicKey,
Endpoint: p.Endpoint,
PublicKey: t.peers[i].PublicKey,
Endpoint: t.peers[i].Endpoint,
}
c.Peers = append(c.Peers, peer)
}

View File

@ -118,15 +118,14 @@ func TestNewTopology(t *testing.T) {
wireGuardCIDR: &net.IPNet{IP: w1, Mask: net.CIDRMask(16, 32)},
segments: []*segment{
{
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key,
location: nodes["a"].Location,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
persistentKeepalive: nodes["a"].PersistentKeepalive,
wireGuardIP: w1,
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key,
location: nodes["a"].Location,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
wireGuardIP: w1,
},
{
allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
@ -155,15 +154,14 @@ func TestNewTopology(t *testing.T) {
wireGuardCIDR: &net.IPNet{IP: w2, Mask: net.CIDRMask(16, 32)},
segments: []*segment{
{
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key,
location: nodes["a"].Location,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
persistentKeepalive: nodes["a"].PersistentKeepalive,
wireGuardIP: w1,
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key,
location: nodes["a"].Location,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
wireGuardIP: w1,
},
{
allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
@ -192,15 +190,14 @@ func TestNewTopology(t *testing.T) {
wireGuardCIDR: nil,
segments: []*segment{
{
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key,
location: nodes["a"].Location,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
persistentKeepalive: nodes["a"].PersistentKeepalive,
wireGuardIP: w1,
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key,
location: nodes["a"].Location,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
wireGuardIP: w1,
},
{
allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, nodes["c"].Subnet, nodes["c"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
@ -229,15 +226,14 @@ func TestNewTopology(t *testing.T) {
wireGuardCIDR: &net.IPNet{IP: w1, Mask: net.CIDRMask(16, 32)},
segments: []*segment{
{
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key,
location: nodes["a"].Name,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
persistentKeepalive: nodes["a"].PersistentKeepalive,
wireGuardIP: w1,
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key,
location: nodes["a"].Name,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
wireGuardIP: w1,
},
{
allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
@ -276,15 +272,14 @@ func TestNewTopology(t *testing.T) {
wireGuardCIDR: &net.IPNet{IP: w2, Mask: net.CIDRMask(16, 32)},
segments: []*segment{
{
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key,
location: nodes["a"].Name,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
persistentKeepalive: nodes["a"].PersistentKeepalive,
wireGuardIP: w1,
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key,
location: nodes["a"].Name,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
wireGuardIP: w1,
},
{
allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
@ -323,15 +318,14 @@ func TestNewTopology(t *testing.T) {
wireGuardCIDR: &net.IPNet{IP: w3, Mask: net.CIDRMask(16, 32)},
segments: []*segment{
{
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key,
location: nodes["a"].Name,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
persistentKeepalive: nodes["a"].PersistentKeepalive,
wireGuardIP: w1,
allowedIPs: []*net.IPNet{nodes["a"].Subnet, nodes["a"].InternalIP, {IP: w1, Mask: net.CIDRMask(32, 32)}},
endpoint: nodes["a"].Endpoint,
key: nodes["a"].Key,
location: nodes["a"].Name,
cidrs: []*net.IPNet{nodes["a"].Subnet},
hostnames: []string{"a"},
privateIPs: []net.IP{nodes["a"].InternalIP.IP},
wireGuardIP: w1,
},
{
allowedIPs: []*net.IPNet{nodes["b"].Subnet, nodes["b"].InternalIP, {IP: w2, Mask: net.CIDRMask(32, 32)}},
@ -360,7 +354,7 @@ func TestNewTopology(t *testing.T) {
} {
tc.result.key = key
tc.result.port = port
topo, err := NewTopology(nodes, peers, tc.granularity, tc.hostname, port, key, DefaultKiloSubnet)
topo, err := NewTopology(nodes, peers, tc.granularity, tc.hostname, port, key, DefaultKiloSubnet, 0)
if err != nil {
t.Errorf("test case %q: failed to generate Topology: %v", tc.name, err)
}
@ -370,8 +364,8 @@ func TestNewTopology(t *testing.T) {
}
}
func mustTopo(t *testing.T, nodes map[string]*Node, peers map[string]*Peer, granularity Granularity, hostname string, port uint32, key []byte, subnet *net.IPNet) *Topology {
topo, err := NewTopology(nodes, peers, granularity, hostname, port, key, subnet)
func mustTopo(t *testing.T, nodes map[string]*Node, peers map[string]*Peer, granularity Granularity, hostname string, port uint32, key []byte, subnet *net.IPNet, persistentKeepalive int) *Topology {
topo, err := NewTopology(nodes, peers, granularity, hostname, port, key, subnet, persistentKeepalive)
if err != nil {
t.Errorf("failed to generate Topology: %v", err)
}
@ -384,7 +378,7 @@ func TestRoutes(t *testing.T) {
privIface := 1
tunlIface := 2
mustTopoForGranularityAndHost := func(granularity Granularity, hostname string) *Topology {
return mustTopo(t, nodes, peers, granularity, hostname, port, key, DefaultKiloSubnet)
return mustTopo(t, nodes, peers, granularity, hostname, port, key, DefaultKiloSubnet, 0)
}
for _, tc := range []struct {
@ -1213,7 +1207,7 @@ func TestConf(t *testing.T) {
}{
{
name: "logical from a",
topology: mustTopo(t, nodes, peers, LogicalGranularity, nodes["a"].Name, port, key, DefaultKiloSubnet),
topology: mustTopo(t, nodes, peers, LogicalGranularity, nodes["a"].Name, port, key, DefaultKiloSubnet, nodes["a"].PersistentKeepalive),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
@ -1222,22 +1216,23 @@ ListenPort = 51820
PublicKey = key2
Endpoint = 10.1.0.2:51820
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32
PersistentKeepalive = 25
[Peer]
PublicKey = key4
PersistentKeepalive = 0
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
PersistentKeepalive = 25
[Peer]
PublicKey = key5
Endpoint = 192.168.0.1:51820
PersistentKeepalive = 0
AllowedIPs = 10.5.0.3/24
PersistentKeepalive = 25
`,
},
{
name: "logical from b",
topology: mustTopo(t, nodes, peers, LogicalGranularity, nodes["b"].Name, port, key, DefaultKiloSubnet),
topology: mustTopo(t, nodes, peers, LogicalGranularity, nodes["b"].Name, port, key, DefaultKiloSubnet, nodes["b"].PersistentKeepalive),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
@ -1245,24 +1240,21 @@ AllowedIPs = 10.5.0.3/24
[Peer]
PublicKey = key1
Endpoint = 10.1.0.1:51820
PersistentKeepalive = 25
AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32
[Peer]
PublicKey = key4
PersistentKeepalive = 0
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
[Peer]
PublicKey = key5
Endpoint = 192.168.0.1:51820
PersistentKeepalive = 0
AllowedIPs = 10.5.0.3/24
`,
},
{
name: "logical from c",
topology: mustTopo(t, nodes, peers, LogicalGranularity, nodes["c"].Name, port, key, DefaultKiloSubnet),
topology: mustTopo(t, nodes, peers, LogicalGranularity, nodes["c"].Name, port, key, DefaultKiloSubnet, nodes["c"].PersistentKeepalive),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
@ -1270,24 +1262,21 @@ AllowedIPs = 10.5.0.3/24
[Peer]
PublicKey = key1
Endpoint = 10.1.0.1:51820
PersistentKeepalive = 25
AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32
[Peer]
PublicKey = key4
PersistentKeepalive = 0
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
[Peer]
PublicKey = key5
Endpoint = 192.168.0.1:51820
PersistentKeepalive = 0
AllowedIPs = 10.5.0.3/24
`,
},
{
name: "full from a",
topology: mustTopo(t, nodes, peers, FullGranularity, nodes["a"].Name, port, key, DefaultKiloSubnet),
topology: mustTopo(t, nodes, peers, FullGranularity, nodes["a"].Name, port, key, DefaultKiloSubnet, nodes["a"].PersistentKeepalive),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
@ -1296,27 +1285,29 @@ AllowedIPs = 10.5.0.3/24
PublicKey = key2
Endpoint = 10.1.0.2:51820
AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.4.0.2/32
PersistentKeepalive = 25
[Peer]
PublicKey = key3
Endpoint = 10.1.0.3:51820
AllowedIPs = 10.2.3.0/24, 192.168.0.2/32, 10.4.0.3/32
PersistentKeepalive = 25
[Peer]
PublicKey = key4
PersistentKeepalive = 0
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
PersistentKeepalive = 25
[Peer]
PublicKey = key5
Endpoint = 192.168.0.1:51820
PersistentKeepalive = 0
AllowedIPs = 10.5.0.3/24
PersistentKeepalive = 25
`,
},
{
name: "full from b",
topology: mustTopo(t, nodes, peers, FullGranularity, nodes["b"].Name, port, key, DefaultKiloSubnet),
topology: mustTopo(t, nodes, peers, FullGranularity, nodes["b"].Name, port, key, DefaultKiloSubnet, nodes["b"].PersistentKeepalive),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
@ -1324,7 +1315,6 @@ AllowedIPs = 10.5.0.3/24
[Peer]
PublicKey = key1
Endpoint = 10.1.0.1:51820
PersistentKeepalive = 25
AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32
[Peer]
@ -1334,19 +1324,17 @@ AllowedIPs = 10.5.0.3/24
[Peer]
PublicKey = key4
PersistentKeepalive = 0
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
[Peer]
PublicKey = key5
Endpoint = 192.168.0.1:51820
PersistentKeepalive = 0
AllowedIPs = 10.5.0.3/24
`,
},
{
name: "full from c",
topology: mustTopo(t, nodes, peers, FullGranularity, nodes["c"].Name, port, key, DefaultKiloSubnet),
topology: mustTopo(t, nodes, peers, FullGranularity, nodes["c"].Name, port, key, DefaultKiloSubnet, nodes["c"].PersistentKeepalive),
result: `[Interface]
PrivateKey = private
ListenPort = 51820
@ -1354,7 +1342,6 @@ AllowedIPs = 10.5.0.3/24
[Peer]
PublicKey = key1
Endpoint = 10.1.0.1:51820
PersistentKeepalive = 25
AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32
[Peer]
@ -1364,13 +1351,11 @@ AllowedIPs = 10.5.0.3/24
[Peer]
PublicKey = key4
PersistentKeepalive = 0
AllowedIPs = 10.5.0.1/24, 10.5.0.2/24
[Peer]
PublicKey = key5
Endpoint = 192.168.0.1:51820
PersistentKeepalive = 0
AllowedIPs = 10.5.0.3/24
`,
},