Bind kubeconfig HTTP server to 0.0.0.0:8080 (was 127.0.0.1) so integration tests can reach it via QEMU SLIRP port forwarding. Add shared wait_for_boot and fetch_kubeconfig helpers to qemu-helpers.sh. Update all 5 integration tests to fetch kubeconfig via HTTP and use it for kubectl authentication. All 6 tests pass on Linux with KVM: boot (18s), security (7/7), K8s ready (15s), workload deploy, local storage, network policy. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
158 lines
4.3 KiB
Bash
Executable File
158 lines
4.3 KiB
Bash
Executable File
#!/bin/bash
|
|
# test-local-storage.sh — Verify PVC with local-path provisioner works
|
|
# Usage: ./test/integration/test-local-storage.sh <iso-path>
|
|
# Requires: kubectl on host, QEMU
|
|
set -euo pipefail
|
|
|
|
ISO="${1:?Usage: $0 <path-to-iso>}"
|
|
TIMEOUT_K8S=${TIMEOUT_K8S:-300}
|
|
TIMEOUT_PVC=${TIMEOUT_PVC:-180}
|
|
API_PORT=6443
|
|
KC_PORT=8080
|
|
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
|
. "$SCRIPT_DIR/../lib/qemu-helpers.sh"
|
|
|
|
DATA_DISK=$(mktemp /tmp/kubesolo-data-XXXXXX.img)
|
|
dd if=/dev/zero of="$DATA_DISK" bs=1M count=2048 2>/dev/null
|
|
mkfs.ext4 -q -L KSOLODATA "$DATA_DISK" 2>/dev/null
|
|
|
|
SERIAL_LOG=$(mktemp /tmp/kubesolo-storage-XXXXXX.log)
|
|
|
|
QEMU_PID=""
|
|
EXTRACT_DIR=""
|
|
KUBECONFIG_FILE=""
|
|
|
|
cleanup() {
|
|
# Clean up K8s resources
|
|
[ -n "$KUBECONFIG_FILE" ] && [ -f "$KUBECONFIG_FILE" ] && {
|
|
kubectl --kubeconfig="$KUBECONFIG_FILE" --insecure-skip-tls-verify \
|
|
delete pod test-storage --grace-period=0 --force 2>/dev/null || true
|
|
kubectl --kubeconfig="$KUBECONFIG_FILE" --insecure-skip-tls-verify \
|
|
delete pvc test-pvc 2>/dev/null || true
|
|
}
|
|
[ -n "$QEMU_PID" ] && kill "$QEMU_PID" 2>/dev/null || true
|
|
rm -f "$DATA_DISK" "$SERIAL_LOG"
|
|
[ -n "$KUBECONFIG_FILE" ] && rm -f "$KUBECONFIG_FILE"
|
|
[ -n "$EXTRACT_DIR" ] && rm -rf "$EXTRACT_DIR"
|
|
}
|
|
trap cleanup EXIT
|
|
|
|
echo "==> Local storage test: $ISO"
|
|
|
|
# Extract kernel from ISO
|
|
EXTRACT_DIR="$(mktemp -d /tmp/kubesolo-extract-XXXXXX)"
|
|
extract_kernel_from_iso "$ISO" "$EXTRACT_DIR"
|
|
|
|
KVM_FLAG=$(detect_kvm)
|
|
|
|
# Launch QEMU
|
|
# shellcheck disable=SC2086
|
|
qemu-system-x86_64 \
|
|
-m 2048 -smp 2 \
|
|
-nographic \
|
|
$KVM_FLAG \
|
|
-kernel "$VMLINUZ" \
|
|
-initrd "$INITRAMFS" \
|
|
-drive "file=$DATA_DISK,format=raw,if=virtio" \
|
|
-net "nic,model=virtio" \
|
|
-net "user,hostfwd=tcp::${API_PORT}-:6443,hostfwd=tcp::${KC_PORT}-:8080" \
|
|
-serial "file:$SERIAL_LOG" \
|
|
-append "console=ttyS0,115200n8 kubesolo.data=/dev/vda kubesolo.debug" \
|
|
&
|
|
QEMU_PID=$!
|
|
|
|
# Wait for boot + fetch kubeconfig
|
|
echo " Waiting for boot..."
|
|
wait_for_boot "$SERIAL_LOG" "$QEMU_PID" 180 || exit 1
|
|
|
|
KUBECONFIG_FILE=$(mktemp /tmp/kubesolo-kubeconfig-XXXXXX.yaml)
|
|
fetch_kubeconfig "$KC_PORT" "$KUBECONFIG_FILE" || exit 1
|
|
|
|
KUBECTL="kubectl --kubeconfig=$KUBECONFIG_FILE --insecure-skip-tls-verify"
|
|
|
|
# Wait for K8s API
|
|
echo " Waiting for K8s node Ready..."
|
|
ELAPSED=0
|
|
while [ "$ELAPSED" -lt "$TIMEOUT_K8S" ]; do
|
|
if $KUBECTL get nodes 2>/dev/null | grep -q "Ready"; then
|
|
break
|
|
fi
|
|
sleep 5
|
|
ELAPSED=$((ELAPSED + 5))
|
|
done
|
|
|
|
if [ "$ELAPSED" -ge "$TIMEOUT_K8S" ]; then
|
|
echo "==> FAIL: K8s not ready within ${TIMEOUT_K8S}s"
|
|
exit 1
|
|
fi
|
|
echo " K8s ready (${ELAPSED}s)"
|
|
|
|
# Create PVC
|
|
echo "==> Creating PersistentVolumeClaim..."
|
|
$KUBECTL apply -f - << 'YAML'
|
|
apiVersion: v1
|
|
kind: PersistentVolumeClaim
|
|
metadata:
|
|
name: test-pvc
|
|
spec:
|
|
accessModes:
|
|
- ReadWriteOnce
|
|
resources:
|
|
requests:
|
|
storage: 64Mi
|
|
YAML
|
|
|
|
# Create pod that uses the PVC
|
|
echo "==> Creating pod with PVC..."
|
|
$KUBECTL apply -f - << 'YAML'
|
|
apiVersion: v1
|
|
kind: Pod
|
|
metadata:
|
|
name: test-storage
|
|
spec:
|
|
containers:
|
|
- name: writer
|
|
image: busybox:latest
|
|
command: ["sh", "-c", "echo 'kubesolo-storage-test' > /data/test.txt && sleep 3600"]
|
|
volumeMounts:
|
|
- name: data
|
|
mountPath: /data
|
|
volumes:
|
|
- name: data
|
|
persistentVolumeClaim:
|
|
claimName: test-pvc
|
|
YAML
|
|
|
|
# Wait for pod Running
|
|
echo " Waiting for storage pod..."
|
|
ELAPSED=0
|
|
STATUS=""
|
|
while [ "$ELAPSED" -lt "$TIMEOUT_PVC" ]; do
|
|
STATUS=$($KUBECTL get pod test-storage -o jsonpath='{.status.phase}' 2>/dev/null || echo "")
|
|
if [ "$STATUS" = "Running" ]; then
|
|
break
|
|
fi
|
|
sleep 5
|
|
ELAPSED=$((ELAPSED + 5))
|
|
done
|
|
|
|
if [ "$STATUS" != "Running" ]; then
|
|
echo "==> FAIL: Storage pod did not reach Running (status: $STATUS)"
|
|
$KUBECTL describe pod test-storage 2>/dev/null | tail -20 || true
|
|
exit 1
|
|
fi
|
|
|
|
# Verify data was written
|
|
sleep 3
|
|
DATA=$($KUBECTL exec test-storage -- cat /data/test.txt 2>/dev/null || echo "")
|
|
if [ "$DATA" = "kubesolo-storage-test" ]; then
|
|
echo "==> PASS: Local storage provisioning works"
|
|
echo " PVC bound, pod running, data written and read back successfully"
|
|
exit 0
|
|
else
|
|
echo "==> FAIL: Data verification failed (got: '$DATA')"
|
|
exit 1
|
|
fi
|