e2e: don't export KUBECONFIG

This commit modifies the e2e shell scripts so that the KUBECONFIG
variable does not need to be exported. This will become important once
we allow the e2e test suite to launch multiple clusters, e.g. to test
multi-cluster connectivity.

Signed-off-by: Lucas Servén Marín <lserven@gmail.com>
This commit is contained in:
Lucas Servén Marín 2021-07-05 13:07:37 +02:00
parent 0733c83a0a
commit 1e1f8819bf
No known key found for this signature in database
GPG Key ID: 586FEAF680DA74AD
3 changed files with 44 additions and 28 deletions

View File

@ -4,9 +4,9 @@
setup_suite() {
# shellcheck disable=SC2016
$KUBECTL_BINARY patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--kubeconfig=/etc/kubernetes/kubeconfig","--mesh-granularity=full"]}]}}}}'
_kubectl patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--kubeconfig=/etc/kubernetes/kubeconfig","--mesh-granularity=full"]}]}}}}'
block_until_ready_by_name kube-system kilo-userspace
$KUBECTL_BINARY wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
_kubectl wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
}
test_full_mesh_connectivity() {
@ -23,9 +23,9 @@ test_full_mesh_peer() {
test_full_mesh_allowed_location_ips() {
docker exec kind-cluster-kilo-control-plane ip address add 10.6.0.1/32 dev eth0
$KUBECTL_BINARY annotate node kind-cluster-kilo-control-plane kilo.squat.ai/allowed-location-ips=10.6.0.1/32
_kubectl annotate node kind-cluster-kilo-control-plane kilo.squat.ai/allowed-location-ips=10.6.0.1/32
assert_equals Unauthorized "$(retry 10 5 'IP is not yet routable' curl_pod -m 1 -s -k https://10.6.0.1:10250/healthz)" "should be able to make HTTP request to allowed location IP"
$KUBECTL_BINARY annotate node kind-cluster-kilo-control-plane kilo.squat.ai/allowed-location-ips-
_kubectl annotate node kind-cluster-kilo-control-plane kilo.squat.ai/allowed-location-ips-
assert "retry 10 5 'IP is still routable' _not curl_pod -m 1 -s -k https://10.6.0.1:10250/healthz" "should not be able to make HTTP request to allowed location IP"
docker exec kind-cluster-kilo-control-plane ip address delete 10.6.0.1/32 dev eth0
}
@ -39,5 +39,5 @@ test_reject_peer_empty_public_key() {
}
test_mesh_granularity_auto_detect() {
assert_equals "$($KGCTL_BINARY graph)" "$($KGCTL_BINARY graph --mesh-granularity full)"
assert_equals "$(_kgctl graph)" "$(_kgctl graph --mesh-granularity full)"
}

View File

@ -1,5 +1,5 @@
#!/usr/bin/env bash
export KUBECONFIG="kind.yaml"
KUBECONFIG="kind.yaml"
KIND_CLUSTER="kind-cluster-kilo"
KIND_BINARY="${KIND_BINARY:-kind}"
KUBECTL_BINARY="${KUBECTL_BINARY:-kubectl}"
@ -33,6 +33,21 @@ _not() {
return 0
}
# _kubectl is a helper that calls kubectl with the --kubeconfig flag.
_kubectl() {
$KUBECTL_BINARY --kubeconfig="$KUBECONFIG" "$@"
}
# _kgctl is a helper that calls kgctl with the --kubeconfig flag.
_kgctl() {
$KGCTL_BINARY --kubeconfig="$KUBECONFIG" "$@"
}
# _kind is a helper that calls kind with the --kubeconfig flag.
_kind() {
$KIND_BINARY --kubeconfig="$KUBECONFIG" "$@"
}
create_interface() {
docker run -d --name="$1" --rm --network=host --cap-add=NET_ADMIN --device=/dev/net/tun -v /var/run/wireguard:/var/run/wireguard -e WG_LOG_LEVEL=debug leonnicolas/boringtun --foreground --disable-drop-privileges true "$1"
}
@ -42,7 +57,7 @@ delete_interface() {
}
create_peer() {
cat <<EOF | $KUBECTL_BINARY apply -f -
cat <<EOF | _kubectl apply -f -
apiVersion: kilo.squat.ai/v1alpha1
kind: Peer
metadata:
@ -56,12 +71,12 @@ EOF
}
delete_peer() {
$KUBECTL_BINARY delete peer "$1"
_kubectl delete peer "$1"
}
is_ready() {
for pod in $($KUBECTL_BINARY -n "$1" get pods -o name -l "$2"); do
if ! $KUBECTL_BINARY -n "$1" get "$pod" | tail -n 1 | grep -q Running; then
for pod in $(_kubectl -n "$1" get pods -o name -l "$2"); do
if ! _kubectl -n "$1" get "$pod" | tail -n 1 | grep -q Running; then
return 1;
fi
done
@ -81,35 +96,36 @@ block_until_ready() {
# create_cluster launches a kind cluster and deploys Kilo, Adjacency, and a helper with curl.
create_cluster() {
$KIND_BINARY delete clusters $KIND_CLUSTER > /dev/null
_kind delete clusters $KIND_CLUSTER > /dev/null
# Create the kind cluster.
$KIND_BINARY create cluster --name $KIND_CLUSTER --config ./kind-config.yaml
_kind create cluster --name $KIND_CLUSTER --config ./kind-config.yaml
# Load the Kilo image into kind.
docker tag "$KILO_IMAGE" squat/kilo:test
# This command does not accept the --kubeconfig flag, so call the command directly.
$KIND_BINARY load docker-image squat/kilo:test --name $KIND_CLUSTER
# Create the kubeconfig secret.
$KUBECTL_BINARY create secret generic kubeconfig --from-file=kubeconfig="$KUBECONFIG" -n kube-system
_kubectl create secret generic kubeconfig --from-file=kubeconfig="$KUBECONFIG" -n kube-system
# Apply Kilo the the cluster.
$KUBECTL_BINARY apply -f ../manifests/crds.yaml
$KUBECTL_BINARY apply -f kilo-kind-userspace.yaml
_kubectl apply -f ../manifests/crds.yaml
_kubectl apply -f kilo-kind-userspace.yaml
block_until_ready_by_name kube-system kilo-userspace
$KUBECTL_BINARY wait nodes --all --for=condition=Ready
_kubectl wait nodes --all --for=condition=Ready
# Wait for CoreDNS.
block_until_ready kube_system k8s-app=kube-dns
# Ensure the curl helper is not scheduled on a control-plane node.
$KUBECTL_BINARY apply -f helper-curl.yaml
_kubectl apply -f helper-curl.yaml
block_until_ready_by_name default curl
$KUBECTL_BINARY taint node $KIND_CLUSTER-control-plane node-role.kubernetes.io/master:NoSchedule-
$KUBECTL_BINARY apply -f https://raw.githubusercontent.com/heptoprint/adjacency/master/example.yaml
_kubectl taint node $KIND_CLUSTER-control-plane node-role.kubernetes.io/master:NoSchedule-
_kubectl apply -f https://raw.githubusercontent.com/heptoprint/adjacency/master/example.yaml
block_until_ready_by_name adjacency adjacency
}
delete_cluster () {
$KIND_BINARY delete clusters $KIND_CLUSTER
_kind delete clusters $KIND_CLUSTER
}
curl_pod() {
$KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c "curl $*"
_kubectl get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" --kubeconfig="$KUBECONFIG" exec {} -- /bin/sh -c "curl $*"
}
check_ping() {
@ -123,7 +139,7 @@ check_ping() {
shift
done
for ip in $($KUBECTL_BINARY get pods -l app.kubernetes.io/name=adjacency -o jsonpath='{.items[*].status.podIP}'); do
for ip in $(_kubectl get pods -l app.kubernetes.io/name=adjacency -o jsonpath='{.items[*].status.podIP}'); do
if [ -n "$LOCAL" ]; then
ping=$(curl -m 1 -s http://"$ip":8080/ping)
else
@ -140,7 +156,7 @@ check_ping() {
}
check_adjacent() {
$KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" exec {} -- /bin/sh -c 'curl -m 1 -s adjacency:8080/?format=fancy'
_kubectl get pods -l app.kubernetes.io/name=curl -o name | xargs -I{} "$KUBECTL_BINARY" --kubeconfig="$KUBECONFIG" exec {} -- /bin/sh -c 'curl -m 1 -s adjacency:8080/?format=fancy'
[ "$(curl_pod -m 1 -s adjacency:8080/?format=json | jq | grep -c true)" -eq "$1" ]
}
@ -152,14 +168,14 @@ check_peer() {
create_interface "$INTERFACE"
docker run --rm --entrypoint=/usr/bin/wg "$KILO_IMAGE" genkey > "$INTERFACE"
assert "create_peer $PEER $ALLOWED_IP 10 $(docker run --rm --entrypoint=/bin/sh -v "$PWD/$INTERFACE":/key "$KILO_IMAGE" -c 'cat /key | wg pubkey')" "should be able to create Peer"
assert "$KGCTL_BINARY showconf peer $PEER --mesh-granularity=$GRANULARITY > $PEER.ini" "should be able to get Peer configuration"
assert "_kgctl showconf peer $PEER --mesh-granularity=$GRANULARITY > $PEER.ini" "should be able to get Peer configuration"
assert "docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/usr/bin/wg -v /var/run/wireguard:/var/run/wireguard -v $PWD/$PEER.ini:/peer.ini $KILO_IMAGE setconf $INTERFACE /peer.ini" "should be able to apply configuration from kgctl"
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/usr/bin/wg -v /var/run/wireguard:/var/run/wireguard -v "$PWD/$INTERFACE":/key "$KILO_IMAGE" set "$INTERFACE" private-key /key
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" address add "$ALLOWED_IP" dev "$INTERFACE"
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" link set "$INTERFACE" up
docker run --rm --network=host --cap-add=NET_ADMIN --entrypoint=/sbin/ip "$KILO_IMAGE" route add 10.42/16 dev "$INTERFACE"
assert "retry 10 5 '' check_ping --local" "should be able to ping Pods from host"
assert_equals "$($KGCTL_BINARY showconf peer "$PEER")" "$($KGCTL_BINARY showconf peer "$PEER" --mesh-granularity="$GRANULARITY")" "kgctl should be able to auto detect the mesh granularity"
assert_equals "$(_kgctl showconf peer "$PEER")" "$(_kgctl showconf peer "$PEER" --mesh-granularity="$GRANULARITY")" "kgctl should be able to auto detect the mesh granularity"
rm "$INTERFACE" "$PEER".ini
delete_peer "$PEER"
delete_interface "$INTERFACE"

View File

@ -4,9 +4,9 @@
setup_suite() {
# shellcheck disable=SC2016
$KUBECTL_BINARY patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--kubeconfig=/etc/kubernetes/kubeconfig","--mesh-granularity=location"]}]}}}}'
_kubectl patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--kubeconfig=/etc/kubernetes/kubeconfig","--mesh-granularity=location"]}]}}}}'
block_until_ready_by_name kube-system kilo-userspace
$KUBECTL_BINARY wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
_kubectl wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
}
test_location_mesh_connectivity() {
@ -22,5 +22,5 @@ test_location_mesh_peer() {
}
test_mesh_granularity_auto_detect() {
assert_equals "$($KGCTL_BINARY graph)" "$($KGCTL_BINARY graph --mesh-granularity location)"
assert_equals "$(_kgctl graph)" "$(_kgctl graph --mesh-granularity location)"
}