manifests,pkg/encapsulation: Flannel compatibility

This commit adds basic support to run in compatibility mode with
Flannel. This allows clusters running Flannel as their principal
networking solution to leverage some advances Kilo features. In certain
Flannel setups, the clusters can even leverage muti-cloud. For this, the
cluster needs to either run in a full mesh, or Flannel needs to use the
API server's external IP address.
This commit is contained in:
Lucas Servén Marín
2019-05-14 01:01:53 +02:00
parent cd6eeeb1e7
commit 81d6077fc2
12 changed files with 582 additions and 57 deletions

View File

@@ -169,7 +169,7 @@ type Mesh struct {
Backend
cni bool
cniPath string
enc encapsulation.Interface
enc encapsulation.Encapsulator
externalIP *net.IPNet
granularity Granularity
hostname string
@@ -202,7 +202,7 @@ type Mesh struct {
}
// New returns a new Mesh instance.
func New(backend Backend, enc encapsulation.Interface, granularity Granularity, hostname string, port uint32, subnet *net.IPNet, local, cni bool, cniPath string, logger log.Logger) (*Mesh, error) {
func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularity, hostname string, port uint32, subnet *net.IPNet, local, cni bool, cniPath string, logger log.Logger) (*Mesh, error) {
if err := os.MkdirAll(KiloPath, 0700); err != nil {
return nil, fmt.Errorf("failed to create directory to store configuration: %v", err)
}
@@ -245,7 +245,7 @@ func New(backend Backend, enc encapsulation.Interface, granularity Granularity,
}
if enc.Strategy() != encapsulation.Never {
if err := enc.Init(privIface); err != nil {
return nil, fmt.Errorf("failed to initialize encapsulation: %v", err)
return nil, fmt.Errorf("failed to initialize encapsulator: %v", err)
}
}
level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the private IP address", privateIP.String()))
@@ -674,7 +674,7 @@ func (m *Mesh) applyTopology() {
}
// We need to add routes last since they may depend
// on the WireGuard interface.
routes := t.Routes(m.kiloIface, m.privIface, m.enc.Index(), m.local, m.enc.Strategy())
routes := t.Routes(m.kiloIface, m.privIface, m.enc.Index(), m.local, m.enc)
if err := m.table.Set(routes); err != nil {
level.Error(m.logger).Log("error", err)
m.errorCounter.WithLabelValues("apply").Inc()
@@ -723,7 +723,7 @@ func (m *Mesh) cleanUp() {
m.errorCounter.WithLabelValues("cleanUp").Inc()
}
if err := m.enc.CleanUp(); err != nil {
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up encapsulation: %v", err))
level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up encapsulator: %v", err))
m.errorCounter.WithLabelValues("cleanUp").Inc()
}
}

View File

@@ -172,14 +172,16 @@ func (t *Topology) RemoteSubnets() []*net.IPNet {
}
// Routes generates a slice of routes for a given Topology.
func (t *Topology) Routes(kiloIface, privIface, tunlIface int, local bool, encapsulate encapsulation.Strategy) []*netlink.Route {
func (t *Topology) Routes(kiloIface, privIface, tunlIface int, local bool, enc encapsulation.Encapsulator) []*netlink.Route {
var routes []*netlink.Route
if !t.leader {
// Find the leader for this segment.
var leader net.IP
// Find the GW for this segment.
// This will be the an IP of the leader.
// In an IPIP encapsulated mesh it is the leader's private IP.
var gw net.IP
for _, segment := range t.segments {
if segment.location == t.location {
leader = segment.privateIPs[segment.leader]
gw = enc.Gw(segment.endpoint, segment.privateIPs[segment.leader], segment.cidrs[segment.leader])
break
}
}
@@ -188,10 +190,10 @@ func (t *Topology) Routes(kiloIface, privIface, tunlIface int, local bool, encap
routes = append(routes, encapsulateRoute(&netlink.Route{
Dst: oneAddressCIDR(segment.wireGuardIP),
Flags: int(netlink.FLAG_ONLINK),
Gw: leader,
Gw: gw,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
}, enc.Strategy(), t.privateIP, tunlIface))
// Add routes for the current segment if local is true.
if segment.location == t.location {
if local {
@@ -206,7 +208,7 @@ func (t *Topology) Routes(kiloIface, privIface, tunlIface int, local bool, encap
Gw: segment.privateIPs[i],
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
}, enc.Strategy(), t.privateIP, tunlIface))
}
}
continue
@@ -216,20 +218,20 @@ func (t *Topology) Routes(kiloIface, privIface, tunlIface int, local bool, encap
routes = append(routes, encapsulateRoute(&netlink.Route{
Dst: segment.cidrs[i],
Flags: int(netlink.FLAG_ONLINK),
Gw: leader,
Gw: gw,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
}, enc.Strategy(), t.privateIP, tunlIface))
// Add routes to the private IPs of nodes in other segments.
// Number of CIDRs and private IPs always match so
// we can reuse the loop.
routes = append(routes, encapsulateRoute(&netlink.Route{
Dst: oneAddressCIDR(segment.privateIPs[i]),
Flags: int(netlink.FLAG_ONLINK),
Gw: leader,
Gw: gw,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
}, enc.Strategy(), t.privateIP, tunlIface))
}
}
// Add routes for the allowed IPs of peers.
@@ -238,10 +240,10 @@ func (t *Topology) Routes(kiloIface, privIface, tunlIface int, local bool, encap
routes = append(routes, encapsulateRoute(&netlink.Route{
Dst: peer.AllowedIPs[i],
Flags: int(netlink.FLAG_ONLINK),
Gw: leader,
Gw: gw,
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
}, enc.Strategy(), t.privateIP, tunlIface))
}
}
return routes
@@ -261,7 +263,7 @@ func (t *Topology) Routes(kiloIface, privIface, tunlIface int, local bool, encap
Gw: segment.privateIPs[i],
LinkIndex: privIface,
Protocol: unix.RTPROT_STATIC,
}, encapsulate, t.privateIP, tunlIface))
}, enc.Strategy(), t.privateIP, tunlIface))
}
}
continue

View File

@@ -979,7 +979,7 @@ func TestRoutes(t *testing.T) {
},
},
} {
routes := tc.topology.Routes(kiloIface, privIface, pubIface, tc.local, encapsulation.Never)
routes := tc.topology.Routes(kiloIface, privIface, pubIface, tc.local, encapsulation.NewIPIP(encapsulation.Never))
if diff := pretty.Compare(routes, tc.result); diff != "" {
t.Errorf("test case %q: got diff: %v", tc.name, diff)
}