e2e: add test for allowed location IPs

This commit adds a new e2e test fot the recently introduced
allowed-location-ips annotation. This test annotates the control-plane
node with an allowed IP and then ensures this IPs is reachable from
the curl helper Pod, which is now guaranteed to be scheduled on a
worker node.

Signed-off-by: Lucas Servén Marín <lserven@gmail.com>
This commit is contained in:
Lucas Servén Marín 2021-06-16 12:50:52 +02:00
parent 9a75468a32
commit 6ab338cf58
No known key found for this signature in database
GPG Key ID: 586FEAF680DA74AD
2 changed files with 26 additions and 7 deletions

View File

@ -12,7 +12,6 @@ spec:
app.kubernetes.io/name: curl app.kubernetes.io/name: curl
template: template:
metadata: metadata:
creationTimestamp: null
labels: labels:
app.kubernetes.io/name: curl app.kubernetes.io/name: curl
spec: spec:

View File

@ -1,5 +1,4 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# shellcheck disable=SC2034
export KUBECONFIG="kind.yaml" export KUBECONFIG="kind.yaml"
KIND_CLUSTER="kind-cluster-kilo" KIND_CLUSTER="kind-cluster-kilo"
KIND_BINARY="${KIND_BINARY:-kind}" KIND_BINARY="${KIND_BINARY:-kind}"
@ -27,6 +26,13 @@ retry() {
return 1 return 1
} }
_not() {
if "$@"; then
return 1
fi
return 0
}
create_interface() { create_interface() {
docker run -d --name="$1" --rm --network=host --cap-add=NET_ADMIN --device=/dev/net/tun -v /var/run/wireguard:/var/run/wireguard -e WG_LOG_LEVEL=debug leonnicolas/boringtun --foreground --disable-drop-privileges true "$1" docker run -d --name="$1" --rm --network=host --cap-add=NET_ADMIN --device=/dev/net/tun -v /var/run/wireguard:/var/run/wireguard -e WG_LOG_LEVEL=debug leonnicolas/boringtun --foreground --disable-drop-privileges true "$1"
} }
@ -88,13 +94,18 @@ setup_suite() {
$KUBECTL_BINARY apply -f kilo-kind-userspace.yaml $KUBECTL_BINARY apply -f kilo-kind-userspace.yaml
block_until_ready_by_name kube-system kilo-userspace block_until_ready_by_name kube-system kilo-userspace
$KUBECTL_BINARY wait nodes --all --for=condition=Ready $KUBECTL_BINARY wait nodes --all --for=condition=Ready
# wait for coredns # Wait for CoreDNS.
block_until_ready kube_system k8s-app=kube-dns block_until_ready kube_system k8s-app=kube-dns
# Ensure the curl helper is not scheduled on a control-plane node.
$KUBECTL_BINARY apply -f helper-curl.yaml
block_until_ready_by_name default curl
$KUBECTL_BINARY taint node $KIND_CLUSTER-control-plane node-role.kubernetes.io/master:NoSchedule- $KUBECTL_BINARY taint node $KIND_CLUSTER-control-plane node-role.kubernetes.io/master:NoSchedule-
$KUBECTL_BINARY apply -f https://raw.githubusercontent.com/heptoprint/adjacency/master/example.yaml $KUBECTL_BINARY apply -f https://raw.githubusercontent.com/heptoprint/adjacency/master/example.yaml
$KUBECTL_BINARY apply -f helper-curl.yaml
block_until_ready_by_name adjacency adjacency block_until_ready_by_name adjacency adjacency
block_until_ready_by_name default curl }
curl_pod() {
$KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c "curl $*"
} }
check_ping() { check_ping() {
@ -112,7 +123,7 @@ check_ping() {
if [ -n "$LOCAL" ]; then if [ -n "$LOCAL" ]; then
ping=$(curl -m 1 -s http://"$ip":8080/ping) ping=$(curl -m 1 -s http://"$ip":8080/ping)
else else
ping=$($KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c "curl -m 1 -s http://$ip:8080/ping") ping=$(curl_pod -m 1 -s http://"$ip":8080/ping)
fi fi
if [ "$ping" = "pong" ]; then if [ "$ping" = "pong" ]; then
echo "successfully pinged $ip" echo "successfully pinged $ip"
@ -126,7 +137,7 @@ check_ping() {
check_adjacent() { check_adjacent() {
$KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c 'curl -m 1 -s adjacency:8080/?format=fancy' $KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c 'curl -m 1 -s adjacency:8080/?format=fancy'
[ "$($KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c 'curl -m 1 -s adjacency:8080/?format=json' | jq | grep -c true)" -eq "$1" ] [ "$(curl_pod -m 1 -s adjacency:8080/?format=json | jq | grep -c true)" -eq "$1" ]
} }
check_peer() { check_peer() {
@ -189,6 +200,15 @@ test_reject_peer_empty_public_key() {
assert_fail "create_peer e2e 10.5.0.1/32 0 ''" "should not be able to create Peer with empty public key" assert_fail "create_peer e2e 10.5.0.1/32 0 ''" "should not be able to create Peer with empty public key"
} }
test_fullmesh_allowed_location_ips() {
docker exec kind-cluster-kilo-control-plane ip address add 10.6.0.1/32 dev eth0
$KUBECTL_BINARY annotate node kind-cluster-kilo-control-plane kilo.squat.ai/allowed-location-ips=10.6.0.1/32
assert_equals Unauthorized "$(retry 10 5 'IP is not yet routable' curl_pod -m 1 -s -k https://10.6.0.1:10250/healthz)" "should be able to make HTTP request to allowed location IP"
$KUBECTL_BINARY annotate node kind-cluster-kilo-control-plane kilo.squat.ai/allowed-location-ips-
assert "retry 10 5 'IP is still routable' _not curl_pod -m 1 -s -k https://10.6.0.1:10250/healthz" "should not be able to make HTTP request to allowed location IP"
docker exec kind-cluster-kilo-control-plane ip address delete 10.6.0.1/32 dev eth0
}
teardown_suite () { teardown_suite () {
$KIND_BINARY delete clusters $KIND_CLUSTER $KIND_BINARY delete clusters $KIND_CLUSTER
} }