e2e: test VPN in e2e tests

This commit adds testing of the VPN feature of Kilo to the e2e tests.
Also, in order to make the tests less flaky, this commit ensures that
the Kilo Pods use the "root" kubeconfig to connect to the API rather
than the Kubernetes API's cluster IP, which can become unavailable when
networking is reconfigured.

Signed-off-by: Lucas Servén Marín <lserven@gmail.com>
This commit is contained in:
Lucas Servén Marín 2021-06-15 20:38:21 +02:00
parent 6542c2ee94
commit d10b40acb0
No known key found for this signature in database
GPG Key ID: 586FEAF680DA74AD
4 changed files with 103 additions and 25 deletions

View File

@ -207,8 +207,8 @@ $(BASH_UNIT):
curl -Lo $@ https://raw.githubusercontent.com/pgrange/bash_unit/v1.7.2/bash_unit curl -Lo $@ https://raw.githubusercontent.com/pgrange/bash_unit/v1.7.2/bash_unit
chmod +x $@ chmod +x $@
e2e: container ${KIND_BINARY} ${KUBECTL_BINARY} $(BASH_UNIT) e2e: container $(KIND_BINARY) $(KUBECTL_BINARY) $(BASH_UNIT) bin/$(OS)/$(ARCH)/kgctl
KILO_IMAGE=${IMAGE}:${ARCH}-${VERSION} KIND_BINARY=${KIND_BINARY} $(BASH_UNIT) ./e2e/kind.sh KILO_IMAGE=$(IMAGE):$(ARCH)-$(VERSION) KIND_BINARY=$(KIND_BINARY) KUBECTL_BINARY=$(KUBECTL_BINARY) KGCTL_BINARY=$(shell pwd)/bin/$(OS)/$(ARCH)/kgctl $(BASH_UNIT) ./e2e/kind.sh
header: .header header: .header
@HEADER=$$(cat .header); \ @HEADER=$$(cat .header); \

View File

@ -108,6 +108,7 @@ spec:
- --hostname=$(NODE_NAME) - --hostname=$(NODE_NAME)
- --create-interface=false - --create-interface=false
- --mesh-granularity=full - --mesh-granularity=full
- --kubeconfig=/etc/kubernetes/kubeconfig
env: env:
- name: NODE_NAME - name: NODE_NAME
valueFrom: valueFrom:
@ -132,6 +133,9 @@ spec:
- name: wireguard - name: wireguard
mountPath: /var/run/wireguard mountPath: /var/run/wireguard
readOnly: false readOnly: false
- name: kubeconfig
mountPath: /etc/kubernetes
readOnly: true
- name: boringtun - name: boringtun
image: leonnicolas/boringtun:alpine image: leonnicolas/boringtun:alpine
args: args:
@ -195,3 +199,6 @@ spec:
- name: wireguard - name: wireguard
hostPath: hostPath:
path: /var/run/wireguard path: /var/run/wireguard
- name: kubeconfig
secret:
secretName: kubeconfig

View File

@ -7,3 +7,5 @@ nodes:
networking: networking:
disableDefaultCNI: true # disable kindnet disableDefaultCNI: true # disable kindnet
podSubnet: 10.42.0.0/16 podSubnet: 10.42.0.0/16
apiServerAddress: 172.18.0.1
apiServerPort: 6443

View File

@ -1,9 +1,10 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# shellcheck disable=SC2034 # shellcheck disable=SC2034
KUBECONFIG="kind.yaml" export KUBECONFIG="kind.yaml"
KIND_CLUSTER="kind-cluster-kilo" KIND_CLUSTER="kind-cluster-kilo"
KIND_BINARY="${KIND_BINARY:-kind}" KIND_BINARY="${KIND_BINARY:-kind}"
KUBECTL_BINARY="${KUBECTL_BINARY:-kubectl}" KUBECTL_BINARY="${KUBECTL_BINARY:-kubectl}"
KGCTL_BINARY="${KGCTL_BINARY:-kgctl}"
KILO_IMAGE="${KILO_IMAGE:-squat/kilo}" KILO_IMAGE="${KILO_IMAGE:-squat/kilo}"
retry() { retry() {
@ -14,21 +15,47 @@ retry() {
shift 3 shift 3
for c in $(seq 1 "$COUNT"); do for c in $(seq 1 "$COUNT"); do
if "$@"; then if "$@"; then
return 0 return 0
else else
printf "%s(attempt %d/%d)\n" "$ERROR" "$c" "$COUNT" | color "$YELLOW" printf "%s(attempt %d/%d)\n" "$ERROR" "$c" "$COUNT" | color "$YELLOW"
if [ "$c" != "$COUNT" ]; then if [ "$c" != "$COUNT" ]; then
printf "retrying in %d seconds...\n" "$SLEEP" | color "$YELLOW" printf "retrying in %d seconds...\n" "$SLEEP" | color "$YELLOW"
sleep "$SLEEP" sleep "$SLEEP"
fi fi
fi fi
done done
return 1 return 1
} }
create_interface() {
docker run -d --name="$1" --rm --network=host --cap-add=NET_ADMIN --device=/dev/net/tun -v /var/run/wireguard:/var/run/wireguard -e WG_LOG_LEVEL=debug leonnicolas/boringtun --foreground --disable-drop-privileges true "$1"
}
delete_interface() {
docker rm --force "$1"
}
create_peer() {
cat <<EOF | $KUBECTL_BINARY apply -f -
apiVersion: kilo.squat.ai/v1alpha1
kind: Peer
metadata:
name: $1
spec:
allowedIPs:
- $2
persistentKeepalive: $3
publicKey: $4
EOF
}
delete_peer() {
$KUBECTL_BINARY delete peer "$1"
}
is_ready() { is_ready() {
for pod in $($KUBECTL_BINARY -n "$1" get pods -o name -l "$2"); do for pod in $($KUBECTL_BINARY -n "$1" get pods -o name -l "$2"); do
if ! $KUBECTL_BINARY -n "$1" get "$pod" | tail -n 1 | grep -q Running; then if ! $KUBECTL_BINARY -n "$1" get "$pod" | tail -n 1 | grep -q Running; then
return 1; return 1;
fi fi
done done
@ -54,6 +81,8 @@ setup_suite() {
# Load the Kilo image into kind. # Load the Kilo image into kind.
docker tag "$KILO_IMAGE" squat/kilo:test docker tag "$KILO_IMAGE" squat/kilo:test
$KIND_BINARY load docker-image squat/kilo:test --name $KIND_CLUSTER $KIND_BINARY load docker-image squat/kilo:test --name $KIND_CLUSTER
# Create the kubeconfig secret.
$KUBECTL_BINARY create secret generic kubeconfig --from-file=kubeconfig="$KUBECONFIG" -n kube-system
# Apply Kilo the the cluster. # Apply Kilo the the cluster.
$KUBECTL_BINARY apply -f ../manifests/crds.yaml $KUBECTL_BINARY apply -f ../manifests/crds.yaml
$KUBECTL_BINARY apply -f kilo-kind-userspace.yaml $KUBECTL_BINARY apply -f kilo-kind-userspace.yaml
@ -69,36 +98,68 @@ setup_suite() {
} }
check_ping() { check_ping() {
for ip in $($KUBECTL_BINARY get pods -l app.kubernetes.io/name=adjacency -o jsonpath='{.items[*].status.podIP}'); do local LOCAL
ping=$($KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c "curl -s http://$ip:8080/ping") while [ $# -gt 0 ]; do
if [ "$ping" = "pong" ]; then case $1 in
echo "successfully pinged $ip" --local)
else LOCAL=true
printf 'failed to ping %s; expected "pong" but got "%s"\n' "$ip" "$ping" ;;
return 1 esac
fi shift
done done
return 0
for ip in $($KUBECTL_BINARY get pods -l app.kubernetes.io/name=adjacency -o jsonpath='{.items[*].status.podIP}'); do
if [ -n "$LOCAL" ]; then
ping=$(curl -m 1 -s http://"$ip":8080/ping)
else
ping=$($KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c "curl -m 1 -s http://$ip:8080/ping")
fi
if [ "$ping" = "pong" ]; then
echo "successfully pinged $ip"
else
printf 'failed to ping %s; expected "pong" but got "%s"\n' "$ip" "$ping"
return 1
fi
done
return 0
} }
check_adjacent() { check_adjacent() {
echo $KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c 'curl -m 1 -s adjacency:8080/?format=fancy'
$KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c 'curl -s adjacency:8080/?format=fancy'
assert_equals "12" \ assert_equals "12" \
"$($KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c 'curl -s adjacency:8080/?format=json' | jq | grep -c true)" \ "$($KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c 'curl -m 1 -s adjacency:8080/?format=json' | jq | grep -c true)" \
"Adjacency returned the wrong number of successful pings" "Adjacency returned the wrong number of successful pings"
echo "sleep for 30s (one reconciliation period) and try again..." echo "sleep for 30s (one reconciliation period) and try again..."
sleep 30 sleep 30
echo $KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c 'curl -m 1 -s adjacency:8080/?format=fancy'
$KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c 'curl -s adjacency:8080/?format=fancy'
assert_equals "12" \ assert_equals "12" \
"$($KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c 'curl -s adjacency:8080/?format=json' | jq | grep -c true)" \ "$($KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c 'curl -m 1 -s adjacency:8080/?format=json' | jq | grep -c true)" \
"Adjacency returned the wrong number of successful pings" "Adjacency returned the wrong number of successful pings"
} }
check_peer() {
local INTERFACE=$1
local PEER=$2
local ALLOWED_IP=$3
local GRANULARITY=$4
create_interface "$INTERFACE"
docker run --rm --entrypoint=/usr/bin/wg "$KILO_IMAGE" genkey > "$INTERFACE"
create_peer "$PEER" "$ALLOWED_IP" 10 "$(docker run --rm --entrypoint=/bin/sh -v "$PWD/$INTERFACE":/key "$KILO_IMAGE" -c 'cat /key | wg pubkey')"
$KGCTL_BINARY showconf peer "$PEER" --mesh-granularity="$GRANULARITY" > "$PEER".ini
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/usr/bin/wg -v /var/run/wireguard:/var/run/wireguard -v "$PWD/$PEER.ini":/peer.ini "$KILO_IMAGE" setconf "$INTERFACE" /peer.ini
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/usr/bin/wg -v /var/run/wireguard:/var/run/wireguard -v "$PWD/$INTERFACE":/key "$KILO_IMAGE" set "$INTERFACE" private-key /key
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" address add "$ALLOWED_IP" dev "$INTERFACE"
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" link set "$INTERFACE" up
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" route add 10.42/16 dev "$INTERFACE"
retry 10 5 "" check_ping --local
rm "$INTERFACE" "$PEER".ini
delete_peer "$PEER"
delete_interface "$INTERFACE"
}
test_locationmesh() { test_locationmesh() {
# shellcheck disable=SC2016 # shellcheck disable=SC2016
$KUBECTL_BINARY patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--mesh-granularity=location"]}]}}}}' $KUBECTL_BINARY patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--kubeconfig=/etc/kubernetes/kubeconfig","--mesh-granularity=location"]}]}}}}'
sleep 5 sleep 5
block_until_ready_by_name kube-system kilo-userspace block_until_ready_by_name kube-system kilo-userspace
$KUBECTL_BINARY wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m $KUBECTL_BINARY wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
@ -107,9 +168,13 @@ test_locationmesh() {
retry 10 5 "the adjacency matrix is not complete yet" check_adjacent retry 10 5 "the adjacency matrix is not complete yet" check_adjacent
} }
test_locationmesh_peer() {
check_peer wg1 e2e 10.5.0.1/32 location
}
test_fullmesh() { test_fullmesh() {
# shellcheck disable=SC2016 # shellcheck disable=SC2016
$KUBECTL_BINARY patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--mesh-granularity=full"]}]}}}}' $KUBECTL_BINARY patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--kubeconfig=/etc/kubernetes/kubeconfig","--mesh-granularity=full"]}]}}}}'
sleep 5 sleep 5
block_until_ready_by_name kube-system kilo-userspace block_until_ready_by_name kube-system kilo-userspace
$KUBECTL_BINARY wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m $KUBECTL_BINARY wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
@ -118,6 +183,10 @@ test_fullmesh() {
retry 10 5 "the adjacency matrix is not complete yet" check_adjacent retry 10 5 "the adjacency matrix is not complete yet" check_adjacent
} }
test_fullmesh_peer() {
check_peer wg1 e2e 10.5.0.1/32 full
}
teardown_suite () { teardown_suite () {
$KIND_BINARY delete clusters $KIND_CLUSTER $KIND_BINARY delete clusters $KIND_CLUSTER
} }