Merge pull request #202 from squat/e2e_improvements
e2e test suite improvements
This commit is contained in:
commit
6b5001bf0e
@ -4,17 +4,17 @@
|
||||
|
||||
setup_suite() {
|
||||
# shellcheck disable=SC2016
|
||||
$KUBECTL_BINARY patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--kubeconfig=/etc/kubernetes/kubeconfig","--mesh-granularity=full"]}]}}}}'
|
||||
_kubectl patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--kubeconfig=/etc/kubernetes/kubeconfig","--mesh-granularity=full"]}]}}}}'
|
||||
block_until_ready_by_name kube-system kilo-userspace
|
||||
$KUBECTL_BINARY wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
|
||||
_kubectl wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
|
||||
}
|
||||
|
||||
test_full_mesh_connectivity() {
|
||||
assert "retry 30 5 '' check_ping" "should be able to ping all Pods"
|
||||
assert "retry 10 5 'the adjacency matrix is not complete yet' check_adjacent 12" "adjacency should return the right number of successful pings"
|
||||
assert "retry 10 5 'the adjacency matrix is not complete yet' check_adjacent 3" "adjacency should return the right number of successful pings"
|
||||
echo "sleep for 30s (one reconciliation period) and try again..."
|
||||
sleep 30
|
||||
assert "retry 10 5 'the adjacency matrix is not complete yet' check_adjacent 12" "adjacency should return the right number of successful pings after reconciling"
|
||||
assert "retry 10 5 'the adjacency matrix is not complete yet' check_adjacent 3" "adjacency should return the right number of successful pings after reconciling"
|
||||
}
|
||||
|
||||
test_full_mesh_peer() {
|
||||
@ -23,9 +23,9 @@ test_full_mesh_peer() {
|
||||
|
||||
test_full_mesh_allowed_location_ips() {
|
||||
docker exec kind-cluster-kilo-control-plane ip address add 10.6.0.1/32 dev eth0
|
||||
$KUBECTL_BINARY annotate node kind-cluster-kilo-control-plane kilo.squat.ai/allowed-location-ips=10.6.0.1/32
|
||||
_kubectl annotate node kind-cluster-kilo-control-plane kilo.squat.ai/allowed-location-ips=10.6.0.1/32
|
||||
assert_equals Unauthorized "$(retry 10 5 'IP is not yet routable' curl_pod -m 1 -s -k https://10.6.0.1:10250/healthz)" "should be able to make HTTP request to allowed location IP"
|
||||
$KUBECTL_BINARY annotate node kind-cluster-kilo-control-plane kilo.squat.ai/allowed-location-ips-
|
||||
_kubectl annotate node kind-cluster-kilo-control-plane kilo.squat.ai/allowed-location-ips-
|
||||
assert "retry 10 5 'IP is still routable' _not curl_pod -m 1 -s -k https://10.6.0.1:10250/healthz" "should not be able to make HTTP request to allowed location IP"
|
||||
docker exec kind-cluster-kilo-control-plane ip address delete 10.6.0.1/32 dev eth0
|
||||
}
|
||||
@ -39,5 +39,5 @@ test_reject_peer_empty_public_key() {
|
||||
}
|
||||
|
||||
test_mesh_granularity_auto_detect() {
|
||||
assert_equals "$($KGCTL_BINARY graph)" "$($KGCTL_BINARY graph --mesh-granularity full)"
|
||||
assert_equals "$(_kgctl graph)" "$(_kgctl graph --mesh-granularity full)"
|
||||
}
|
||||
|
@ -1,11 +1,10 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
- role: worker
|
||||
- role: control-plane$WORKERS
|
||||
networking:
|
||||
disableDefaultCNI: true # disable kindnet
|
||||
podSubnet: 10.42.0.0/16
|
||||
apiServerAddress: 172.18.0.1
|
||||
apiServerPort: 6443
|
||||
apiServerPort: $API_SERVER_PORT
|
||||
podSubnet: $POD_SUBNET
|
||||
serviceSubnet: $SERVICE_SUBNET
|
||||
|
80
e2e/lib.sh
80
e2e/lib.sh
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
export KUBECONFIG="kind.yaml"
|
||||
KUBECONFIG="kind.yaml"
|
||||
KIND_CLUSTER="kind-cluster-kilo"
|
||||
KIND_BINARY="${KIND_BINARY:-kind}"
|
||||
KUBECTL_BINARY="${KUBECTL_BINARY:-kubectl}"
|
||||
@ -16,9 +16,9 @@ retry() {
|
||||
if "$@"; then
|
||||
return 0
|
||||
else
|
||||
printf "%s(attempt %d/%d)\n" "$ERROR" "$c" "$COUNT" | color "$YELLOW"
|
||||
printf "%s(attempt %d/%d)\n" "$ERROR" "$c" "$COUNT" | color "$YELLOW" 1>&2
|
||||
if [ "$c" != "$COUNT" ]; then
|
||||
printf "retrying in %d seconds...\n" "$SLEEP" | color "$YELLOW"
|
||||
printf "retrying in %d seconds...\n" "$SLEEP" | color "$YELLOW" 1>&2
|
||||
sleep "$SLEEP"
|
||||
fi
|
||||
fi
|
||||
@ -33,6 +33,37 @@ _not() {
|
||||
return 0
|
||||
}
|
||||
|
||||
# _kubectl is a helper that calls kubectl with the --kubeconfig flag.
|
||||
_kubectl() {
|
||||
$KUBECTL_BINARY --kubeconfig="$KUBECONFIG" "$@"
|
||||
}
|
||||
|
||||
# _kgctl is a helper that calls kgctl with the --kubeconfig flag.
|
||||
_kgctl() {
|
||||
$KGCTL_BINARY --kubeconfig="$KUBECONFIG" "$@"
|
||||
}
|
||||
|
||||
# _kind is a helper that calls kind with the --kubeconfig flag.
|
||||
_kind() {
|
||||
$KIND_BINARY --kubeconfig="$KUBECONFIG" "$@"
|
||||
}
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
build_kind_config() {
|
||||
local WORKER_COUNT="${1:-0}"
|
||||
export API_SERVER_PORT="${2:-6443}"
|
||||
export POD_SUBNET="${3:-10.42.0.0/16}"
|
||||
export SERVICE_SUBNET="${4:-10.43.0.0/16}"
|
||||
export WORKERS=""
|
||||
local i=0
|
||||
while [ "$i" -lt "$WORKER_COUNT" ]; do
|
||||
WORKERS="$(printf "%s\n- role: worker" "$WORKERS")"
|
||||
((i++))
|
||||
done
|
||||
envsubst < ./kind-config.yaml
|
||||
unset API_SERVER_PORT POD_SUBNET SERVICE_SUBNET WORKERS
|
||||
}
|
||||
|
||||
create_interface() {
|
||||
docker run -d --name="$1" --rm --network=host --cap-add=NET_ADMIN --device=/dev/net/tun -v /var/run/wireguard:/var/run/wireguard -e WG_LOG_LEVEL=debug leonnicolas/boringtun --foreground --disable-drop-privileges true "$1"
|
||||
}
|
||||
@ -42,7 +73,7 @@ delete_interface() {
|
||||
}
|
||||
|
||||
create_peer() {
|
||||
cat <<EOF | $KUBECTL_BINARY apply -f -
|
||||
cat <<EOF | _kubectl apply -f -
|
||||
apiVersion: kilo.squat.ai/v1alpha1
|
||||
kind: Peer
|
||||
metadata:
|
||||
@ -56,12 +87,12 @@ EOF
|
||||
}
|
||||
|
||||
delete_peer() {
|
||||
$KUBECTL_BINARY delete peer "$1"
|
||||
_kubectl delete peer "$1"
|
||||
}
|
||||
|
||||
is_ready() {
|
||||
for pod in $($KUBECTL_BINARY -n "$1" get pods -o name -l "$2"); do
|
||||
if ! $KUBECTL_BINARY -n "$1" get "$pod" | tail -n 1 | grep -q Running; then
|
||||
for pod in $(_kubectl -n "$1" get pods -o name -l "$2"); do
|
||||
if ! _kubectl -n "$1" get "$pod" | tail -n 1 | grep -q Running; then
|
||||
return 1;
|
||||
fi
|
||||
done
|
||||
@ -81,35 +112,38 @@ block_until_ready() {
|
||||
|
||||
# create_cluster launches a kind cluster and deploys Kilo, Adjacency, and a helper with curl.
|
||||
create_cluster() {
|
||||
$KIND_BINARY delete clusters $KIND_CLUSTER > /dev/null
|
||||
# shellcheck disable=SC2119
|
||||
local CONFIG="${1:-$(build_kind_config)}"
|
||||
_kind delete clusters $KIND_CLUSTER > /dev/null
|
||||
# Create the kind cluster.
|
||||
$KIND_BINARY create cluster --name $KIND_CLUSTER --config ./kind-config.yaml
|
||||
_kind create cluster --name $KIND_CLUSTER --config <(echo "$CONFIG")
|
||||
# Load the Kilo image into kind.
|
||||
docker tag "$KILO_IMAGE" squat/kilo:test
|
||||
# This command does not accept the --kubeconfig flag, so call the command directly.
|
||||
$KIND_BINARY load docker-image squat/kilo:test --name $KIND_CLUSTER
|
||||
# Create the kubeconfig secret.
|
||||
$KUBECTL_BINARY create secret generic kubeconfig --from-file=kubeconfig="$KUBECONFIG" -n kube-system
|
||||
_kubectl create secret generic kubeconfig --from-file=kubeconfig="$KUBECONFIG" -n kube-system
|
||||
# Apply Kilo the the cluster.
|
||||
$KUBECTL_BINARY apply -f ../manifests/crds.yaml
|
||||
$KUBECTL_BINARY apply -f kilo-kind-userspace.yaml
|
||||
_kubectl apply -f ../manifests/crds.yaml
|
||||
_kubectl apply -f kilo-kind-userspace.yaml
|
||||
block_until_ready_by_name kube-system kilo-userspace
|
||||
$KUBECTL_BINARY wait nodes --all --for=condition=Ready
|
||||
_kubectl wait nodes --all --for=condition=Ready
|
||||
# Wait for CoreDNS.
|
||||
block_until_ready kube_system k8s-app=kube-dns
|
||||
# Ensure the curl helper is not scheduled on a control-plane node.
|
||||
$KUBECTL_BINARY apply -f helper-curl.yaml
|
||||
_kubectl apply -f helper-curl.yaml
|
||||
block_until_ready_by_name default curl
|
||||
$KUBECTL_BINARY taint node $KIND_CLUSTER-control-plane node-role.kubernetes.io/master:NoSchedule-
|
||||
$KUBECTL_BINARY apply -f https://raw.githubusercontent.com/heptoprint/adjacency/master/example.yaml
|
||||
_kubectl taint node $KIND_CLUSTER-control-plane node-role.kubernetes.io/master:NoSchedule-
|
||||
_kubectl apply -f https://raw.githubusercontent.com/heptoprint/adjacency/master/example.yaml
|
||||
block_until_ready_by_name adjacency adjacency
|
||||
}
|
||||
|
||||
delete_cluster () {
|
||||
$KIND_BINARY delete clusters $KIND_CLUSTER
|
||||
_kind delete clusters $KIND_CLUSTER
|
||||
}
|
||||
|
||||
curl_pod() {
|
||||
$KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c "curl $*"
|
||||
_kubectl get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" --kubeconfig="$KUBECONFIG" exec {} -- /usr/bin/curl "$@"
|
||||
}
|
||||
|
||||
check_ping() {
|
||||
@ -123,7 +157,7 @@ check_ping() {
|
||||
shift
|
||||
done
|
||||
|
||||
for ip in $($KUBECTL_BINARY get pods -l app.kubernetes.io/name=adjacency -o jsonpath='{.items[*].status.podIP}'); do
|
||||
for ip in $(_kubectl get pods -l app.kubernetes.io/name=adjacency -o jsonpath='{.items[*].status.podIP}'); do
|
||||
if [ -n "$LOCAL" ]; then
|
||||
ping=$(curl -m 1 -s http://"$ip":8080/ping)
|
||||
else
|
||||
@ -140,8 +174,8 @@ check_ping() {
|
||||
}
|
||||
|
||||
check_adjacent() {
|
||||
$KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c 'curl -m 1 -s adjacency:8080/?format=fancy'
|
||||
[ "$(curl_pod -m 1 -s adjacency:8080/?format=json | jq | grep -c true)" -eq "$1" ]
|
||||
curl_pod adjacency:8080/?format=fancy
|
||||
[ "$(curl_pod -m 1 -s adjacency:8080/?format=json | jq '.[].latencies[].ok' | grep -c true)" -eq $(($1*$1)) ]
|
||||
}
|
||||
|
||||
check_peer() {
|
||||
@ -152,14 +186,14 @@ check_peer() {
|
||||
create_interface "$INTERFACE"
|
||||
docker run --rm --entrypoint=/usr/bin/wg "$KILO_IMAGE" genkey > "$INTERFACE"
|
||||
assert "create_peer $PEER $ALLOWED_IP 10 $(docker run --rm --entrypoint=/bin/sh -v "$PWD/$INTERFACE":/key "$KILO_IMAGE" -c 'cat /key | wg pubkey')" "should be able to create Peer"
|
||||
assert "$KGCTL_BINARY showconf peer $PEER --mesh-granularity=$GRANULARITY > $PEER.ini" "should be able to get Peer configuration"
|
||||
assert "_kgctl showconf peer $PEER --mesh-granularity=$GRANULARITY > $PEER.ini" "should be able to get Peer configuration"
|
||||
assert "docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/usr/bin/wg -v /var/run/wireguard:/var/run/wireguard -v $PWD/$PEER.ini:/peer.ini $KILO_IMAGE setconf $INTERFACE /peer.ini" "should be able to apply configuration from kgctl"
|
||||
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/usr/bin/wg -v /var/run/wireguard:/var/run/wireguard -v "$PWD/$INTERFACE":/key "$KILO_IMAGE" set "$INTERFACE" private-key /key
|
||||
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" address add "$ALLOWED_IP" dev "$INTERFACE"
|
||||
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" link set "$INTERFACE" up
|
||||
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" route add 10.42/16 dev "$INTERFACE"
|
||||
assert "retry 10 5 '' check_ping --local" "should be able to ping Pods from host"
|
||||
assert_equals "$($KGCTL_BINARY showconf peer "$PEER")" "$($KGCTL_BINARY showconf peer "$PEER" --mesh-granularity="$GRANULARITY")" "kgctl should be able to auto detect the mesh granularity"
|
||||
assert_equals "$(_kgctl showconf peer "$PEER")" "$(_kgctl showconf peer "$PEER" --mesh-granularity="$GRANULARITY")" "kgctl should be able to auto detect the mesh granularity"
|
||||
rm "$INTERFACE" "$PEER".ini
|
||||
delete_peer "$PEER"
|
||||
delete_interface "$INTERFACE"
|
||||
|
@ -4,17 +4,17 @@
|
||||
|
||||
setup_suite() {
|
||||
# shellcheck disable=SC2016
|
||||
$KUBECTL_BINARY patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--kubeconfig=/etc/kubernetes/kubeconfig","--mesh-granularity=location"]}]}}}}'
|
||||
_kubectl patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--kubeconfig=/etc/kubernetes/kubeconfig","--mesh-granularity=location"]}]}}}}'
|
||||
block_until_ready_by_name kube-system kilo-userspace
|
||||
$KUBECTL_BINARY wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
|
||||
_kubectl wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
|
||||
}
|
||||
|
||||
test_location_mesh_connectivity() {
|
||||
assert "retry 30 5 '' check_ping" "should be able to ping all Pods"
|
||||
assert "retry 10 5 'the adjacency matrix is not complete yet' check_adjacent 12" "adjacency should return the right number of successful pings"
|
||||
assert "retry 10 5 'the adjacency matrix is not complete yet' check_adjacent 3" "adjacency should return the right number of successful pings"
|
||||
echo "sleep for 30s (one reconciliation period) and try again..."
|
||||
sleep 30
|
||||
assert "retry 10 5 'the adjacency matrix is not complete yet' check_adjacent 12" "adjacency should return the right number of successful pings after reconciling"
|
||||
assert "retry 10 5 'the adjacency matrix is not complete yet' check_adjacent 3" "adjacency should return the right number of successful pings after reconciling"
|
||||
}
|
||||
|
||||
test_location_mesh_peer() {
|
||||
@ -22,5 +22,5 @@ test_location_mesh_peer() {
|
||||
}
|
||||
|
||||
test_mesh_granularity_auto_detect() {
|
||||
assert_equals "$($KGCTL_BINARY graph)" "$($KGCTL_BINARY graph --mesh-granularity location)"
|
||||
assert_equals "$(_kgctl graph)" "$(_kgctl graph --mesh-granularity location)"
|
||||
}
|
||||
|
@ -3,5 +3,5 @@
|
||||
. lib.sh
|
||||
|
||||
setup_suite() {
|
||||
create_cluster
|
||||
create_cluster "$(build_kind_config 2)"
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user