Merge pull request #171 from squat/e2e

Makefile, e2e/*: Add end to end tests
This commit is contained in:
leonnicolas 2021-05-20 19:40:36 +02:00 committed by GitHub
commit 3422e8a40c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 388 additions and 2 deletions

View File

@ -66,6 +66,18 @@ jobs:
- name: Run Unit Tests
run: make unit
e2e:
if: github.event_name == 'pull_request'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.15.7
- name: Run e2e Tests
run: make e2e
lint:
runs-on: ubuntu-latest
steps:

View File

@ -1,5 +1,5 @@
export GO111MODULE=on
.PHONY: push container clean container-name container-latest push-latest fmt lint test unit vendor header generate client deepcopy informer lister openapi manifest manfest-latest manifest-annotate manifest manfest-latest manifest-annotate release gen-docs
.PHONY: push container clean container-name container-latest push-latest fmt lint test unit vendor header generate client deepcopy informer lister openapi manifest manfest-latest manifest-annotate manifest manfest-latest manifest-annotate release gen-docs e2e
OS ?= $(shell go env GOOS)
ARCH ?= $(shell go env GOARCH)
@ -40,6 +40,9 @@ LISTER_GEN_BINARY := bin/lister-gen
OPENAPI_GEN_BINARY := bin/openapi-gen
GOLINT_BINARY := bin/golint
EMBEDMD_BINARY := bin/embedmd
KIND_BINARY := $(shell pwd)/bin/kind
KUBECTL_BINARY := $(shell pwd)/bin/kubectl
BASH_UNIT := $(shell pwd)/bin/bash_unit
BUILD_IMAGE ?= golang:1.15.7-alpine
BASE_IMAGE ?= alpine:3.12
@ -195,7 +198,22 @@ lint: header $(GOLINT_BINARY)
unit:
go test -mod=vendor --race ./...
test: lint unit
test: lint unit e2e
$(KIND_BINARY):
curl -Lo $@ https://kind.sigs.k8s.io/dl/v0.10.0/kind-linux-$(ARCH)
chmod +x $@
$(KUBECTL_BINARY):
curl -Lo $@ https://dl.k8s.io/release/v1.21.0/bin/linux/$(ARCH)/kubectl
chmod +x $@
$(BASH_UNIT):
curl -Lo $@ https://raw.githubusercontent.com/pgrange/bash_unit/v1.6.0/bash_unit
chmod +x $@
e2e: container ${KIND_BINARY} ${KUBECTL_BINARY} $(BASH_UNIT)
KILO_IMAGE=${IMAGE}:${ARCH}-${VERSION} KIND_BINARY=${KIND_BINARY} $(BASH_UNIT) ./e2e/kind.sh
header: .header
@HEADER=$$(cat .header); \

26
e2e/helper-curl.yaml Normal file
View File

@ -0,0 +1,26 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
labels:
app.kubernetes.io/name: curl
name: curl
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: curl
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/name: curl
spec:
containers:
- command:
- /bin/sh
- -c
- while [ 1 -eq 1 ] ; do sleep 10; done
image: curlimages/curl
name: curl
restartPolicy: Always

View File

@ -0,0 +1,197 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo
data:
cni-conf.json: |
{
"cniVersion":"0.3.1",
"name":"kilo",
"plugins":[
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"forceAddress":true,
"mtu": 1420,
"ipam":{
"type":"host-local"
}
},
{
"type":"portmap",
"snat":true,
"capabilities":{
"portMappings":true
}
}
]
}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kilo
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kilo
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- patch
- watch
- apiGroups:
- kilo.squat.ai
resources:
- peers
verbs:
- list
- update
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kilo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kilo
subjects:
- kind: ServiceAccount
name: kilo
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kilo
namespace: kube-system
labels:
app.kubernetes.io/name: kilo-userspace
app.kubernetes.io/part-of: kilo
spec:
selector:
matchLabels:
app.kubernetes.io/name: kilo-userspace
app.kubernetes.io/part-of: kilo
template:
metadata:
labels:
app.kubernetes.io/name: kilo-userspace
app.kubernetes.io/part-of: kilo
spec:
serviceAccountName: kilo
hostNetwork: true
containers:
- name: kilo
image: squat/kilo:test
imagePullPolicy: Never
args:
- --hostname=$(NODE_NAME)
- --create-interface=false
- --mesh-granularity=full
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
ports:
- containerPort: 1107
name: metrics
securityContext:
privileged: true
volumeMounts:
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kilo-dir
mountPath: /var/lib/kilo
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
- name: wireguard
mountPath: /var/run/wireguard
readOnly: false
- name: boringtun
image: leonnicolas/boringtun:alpine
args:
- --disable-drop-privileges=true
- --foreground
- kilo0
securityContext:
privileged: true
volumeMounts:
- name: wireguard
mountPath: /var/run/wireguard
readOnly: false
initContainers:
- name: install-cni
image: squat/kilo:test
imagePullPolicy: Never
command:
- /bin/sh
- -c
- set -e -x;
cp /opt/cni/bin/* /host/opt/cni/bin/;
TMP_CONF="$CNI_CONF_NAME".tmp;
echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;
rm -f /host/etc/cni/net.d/*;
mv $TMP_CONF /host/etc/cni/net.d/$CNI_CONF_NAME
env:
- name: CNI_CONF_NAME
value: 10-kilo.conflist
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kilo
key: cni-conf.json
volumeMounts:
- name: cni-bin-dir
mountPath: /host/opt/cni/bin
- name: cni-conf-dir
mountPath: /host/etc/cni/net.d
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d
- name: kilo-dir
hostPath:
path: /var/lib/kilo
- name: lib-modules
hostPath:
path: /lib/modules
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: wireguard
hostPath:
path: /var/run/wireguard

9
e2e/kind-config.yaml Normal file
View File

@ -0,0 +1,9 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
- role: worker
networking:
disableDefaultCNI: true # disable kindnet
podSubnet: 10.42.0.0/16

124
e2e/kind.sh Executable file
View File

@ -0,0 +1,124 @@
#!/usr/bin/env bash
KUBECONFIG="kind.yaml"
KIND_CLUSTER="kind-cluster-kilo"
KIND_BINARY="${KIND_BINARY:-kind}"
KUBECTL_BINARY="${KUBECTL_BINARY:-kubectl}"
KILO_IMAGE="${KILO_IMAGE:-squat/kilo}"
is_ready() {
for pod in $(${KUBECTL_BINARY} -n ${1} get pods -o name -l ${2}); do
${KUBECTL_BINARY} -n ${1} get $pod | tail -n 1 | grep -q Running;
if [ $? -ne 0 ]; then
return 1;
fi
done
return 0
}
# Returns non zero if one pod of the given name in the given namespace is not ready.
block_until_ready_by_name() {
block_until_ready ${1} app.kubernetes.io/name=${2}
}
# Blocks until all pods of a deployment are ready.
block_until_ready() {
# Just abort after 150s
for c in {1..30}; do
ready=$(is_ready ${1} ${2})
if [ $? -ne 0 ]; then
echo "some ${2} pods are not ready, yet. Retries=$c/30"
sleep 5
else
break
fi
done
return 0
}
# Block waits until pods are ready. When patching pods, it is not very reliable because sometimes it checkts the state of old pods.
block() {
$KUBECTL_BINARY -n ${1} wait -l "app.kubernetes.io/name=${2}" pod --for=condition=Ready
}
# Set up the kind cluster and deploy Kilo, Adjacency and a helper with curl.
setup_suite() {
$KIND_BINARY delete clusters $KIND_CLUSTER > /dev/null
# Create the kind cluster.
$KIND_BINARY create cluster --name $KIND_CLUSTER --config ./kind-config.yaml
# Load the Kilo image into kind.
docker tag $KILO_IMAGE squat/kilo:test
$KIND_BINARY load docker-image squat/kilo:test --name $KIND_CLUSTER
# Apply Kilo the the cluster.
$KUBECTL_BINARY apply -f kilo-kind-userspace.yaml
block_until_ready_by_name kube-system kilo-userspace
$KUBECTL_BINARY wait nodes --all --for=condition=Ready
# wait for coredns
block_until_ready kube_system k8s-app=kube-dns
$KUBECTL_BINARY taint node $KIND_CLUSTER-control-plane node-role.kubernetes.io/master:NoSchedule-
$KUBECTL_BINARY apply -f https://raw.githubusercontent.com/heptoprint/adjacency/master/example.yaml
$KUBECTL_BINARY apply -f helper-curl.yaml
block_until_ready_by_name adjacency adjacency
block_until_ready_by_name default curl
}
block_until_ping() {
for c in {1..30}; do
keepgoing=1
# Block until all IP addresses of the adjacency pods are reached.
for ip in $($KUBECTL_BINARY get pods -l app.kubernetes.io/name=adjacency -o jsonpath='{.items[*].status.podIP}'); do
ping=$($KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -i $KUBECTL_BINARY exec {} -- /bin/sh -c 'curl -s http://'$ip':8080/ping')
if [[ $ping == "pong" ]]; then
echo "successfully pinged $ip"
keepgoing=0
else
keepgoing=1
echo "expected \"pong\" got \"$ping\""
break
fi
done
if [[ $keepgoing == 0 ]]; then
break
else
sleep 5
fi
done
}
check_adjacent() {
echo
echo "$($KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -i $KUBECTL_BINARY exec {} -- /bin/sh -c 'curl -s adjacency:8080/?format=fancy')"
assert_equals "12" \
"$($KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -i $KUBECTL_BINARY exec {} -- /bin/sh -c 'curl -s adjacency:8080/?format=json' | jq | grep -c true)" \
"Adjacency returned the wrong number of successful pings"
echo "sleep for 30s (one reconciliation period) and try again..."
sleep 30
echo
echo "$($KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -i $KUBECTL_BINARY exec {} -- /bin/sh -c 'curl -s adjacency:8080/?format=fancy')"
assert_equals "12" \
"$($KUBECTL_BINARY get pods -l app.kubernetes.io/name=curl -o name | xargs -i $KUBECTL_BINARY exec {} -- /bin/sh -c 'curl -s adjacency:8080/?format=json' | jq | grep -c true)" \
"Adjacency returned the wrong number of successful pings"
}
test_locationmesh() {
$KUBECTL_BINARY patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--mesh-granularity=location"]}]}}}}'
sleep 5
block_until_ready_by_name kube-system kilo-userspace
block_until_ping
$KUBECTL_BINARY wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
sleep 5
check_adjacent
}
test_fullmesh() {
$KUBECTL_BINARY patch ds -n kube-system kilo -p '{"spec": {"template":{"spec":{"containers":[{"name":"kilo","args":["--hostname=$(NODE_NAME)","--create-interface=false","--mesh-granularity=full"]}]}}}}'
sleep 5
block_until_ready_by_name kube-system kilo-userspace
block_until_ping
$KUBECTL_BINARY wait pod -l app.kubernetes.io/name=adjacency --for=condition=Ready --timeout 3m
sleep 5
check_adjacent
}
teardown_suite () {
$KIND_BINARY delete clusters $KIND_CLUSTER
}