feat: initial Phase 1 PoC scaffolding for KubeSolo OS
Complete Phase 1 implementation of KubeSolo OS — an immutable, bootable Linux distribution built on Tiny Core Linux for running KubeSolo single-node Kubernetes. Build system: - Makefile with fetch, rootfs, initramfs, iso, disk-image targets - Dockerfile.builder for reproducible builds - Scripts to download Tiny Core, extract rootfs, inject KubeSolo, pack initramfs, and create bootable ISO/disk images Init system (10 POSIX sh stages): - Early mount (proc/sys/dev/cgroup2), cmdline parsing, persistent mount with bind-mounts, kernel module loading, sysctl, DHCP networking, hostname, clock sync, containerd prep, KubeSolo exec Shared libraries: - functions.sh (device wait, IP lookup, config helpers) - network.sh (static IP, config persistence, interface detection) - health.sh (containerd, API server, node readiness checks) - Emergency shell for boot failure debugging Testing: - QEMU boot test with serial log marker detection - K8s readiness test with kubectl verification - Persistence test (reboot + verify state survives) - Workload deployment test (nginx pod) - Local storage test (PVC + local-path provisioner) - Network policy test - Reusable run-vm.sh launcher Developer tools: - dev-vm.sh (interactive QEMU with port forwarding) - rebuild-initramfs.sh (fast iteration) - inject-ssh.sh (dropbear SSH for debugging) - extract-kernel-config.sh + kernel-audit.sh Documentation: - Full design document with architecture research - Boot flow documentation covering all 10 init stages - Cloud-init examples (DHCP, static IP, Portainer Edge, air-gapped) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
126
test/integration/test-local-storage.sh
Executable file
126
test/integration/test-local-storage.sh
Executable file
@@ -0,0 +1,126 @@
|
||||
#!/bin/bash
|
||||
# test-local-storage.sh — Verify PVC with local-path provisioner works
|
||||
# Usage: ./test/integration/test-local-storage.sh <iso-path>
|
||||
# Requires: kubectl on host, QEMU
|
||||
set -euo pipefail
|
||||
|
||||
ISO="${1:?Usage: $0 <path-to-iso>}"
|
||||
TIMEOUT_K8S=300
|
||||
TIMEOUT_PVC=120
|
||||
API_PORT=6443
|
||||
|
||||
DATA_DISK=$(mktemp /tmp/kubesolo-data-XXXXXX.img)
|
||||
dd if=/dev/zero of="$DATA_DISK" bs=1M count=2048 2>/dev/null
|
||||
mkfs.ext4 -q -L KSOLODATA "$DATA_DISK" 2>/dev/null
|
||||
|
||||
SERIAL_LOG=$(mktemp /tmp/kubesolo-storage-XXXXXX.log)
|
||||
|
||||
cleanup() {
|
||||
# Clean up K8s resources
|
||||
$KUBECTL delete pod test-storage --grace-period=0 --force 2>/dev/null || true
|
||||
$KUBECTL delete pvc test-pvc 2>/dev/null || true
|
||||
kill "$QEMU_PID" 2>/dev/null || true
|
||||
rm -f "$DATA_DISK" "$SERIAL_LOG"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
KUBECTL="kubectl --server=https://localhost:${API_PORT} --insecure-skip-tls-verify"
|
||||
|
||||
echo "==> Local storage test: $ISO"
|
||||
|
||||
# Launch QEMU
|
||||
qemu-system-x86_64 \
|
||||
-m 2048 -smp 2 \
|
||||
-nographic \
|
||||
-cdrom "$ISO" \
|
||||
-boot d \
|
||||
-drive "file=$DATA_DISK,format=raw,if=virtio" \
|
||||
-net nic,model=virtio \
|
||||
-net "user,hostfwd=tcp::${API_PORT}-:6443" \
|
||||
-serial "file:$SERIAL_LOG" \
|
||||
-append "console=ttyS0,115200n8 kubesolo.data=/dev/vda" \
|
||||
&
|
||||
QEMU_PID=$!
|
||||
|
||||
# Wait for K8s API
|
||||
echo " Waiting for K8s API..."
|
||||
ELAPSED=0
|
||||
while [ "$ELAPSED" -lt "$TIMEOUT_K8S" ]; do
|
||||
if $KUBECTL get nodes 2>/dev/null | grep -q "Ready"; then
|
||||
break
|
||||
fi
|
||||
sleep 5
|
||||
ELAPSED=$((ELAPSED + 5))
|
||||
done
|
||||
|
||||
if [ "$ELAPSED" -ge "$TIMEOUT_K8S" ]; then
|
||||
echo "==> FAIL: K8s not ready within ${TIMEOUT_K8S}s"
|
||||
exit 1
|
||||
fi
|
||||
echo " K8s ready (${ELAPSED}s)"
|
||||
|
||||
# Create PVC
|
||||
echo "==> Creating PersistentVolumeClaim..."
|
||||
$KUBECTL apply -f - << 'YAML'
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: test-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 64Mi
|
||||
YAML
|
||||
|
||||
# Create pod that uses the PVC
|
||||
echo "==> Creating pod with PVC..."
|
||||
$KUBECTL apply -f - << 'YAML'
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test-storage
|
||||
spec:
|
||||
containers:
|
||||
- name: writer
|
||||
image: busybox:latest
|
||||
command: ["sh", "-c", "echo 'kubesolo-storage-test' > /data/test.txt && sleep 3600"]
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: data
|
||||
persistentVolumeClaim:
|
||||
claimName: test-pvc
|
||||
YAML
|
||||
|
||||
# Wait for pod Running
|
||||
echo " Waiting for storage pod..."
|
||||
ELAPSED=0
|
||||
while [ "$ELAPSED" -lt "$TIMEOUT_PVC" ]; do
|
||||
STATUS=$($KUBECTL get pod test-storage -o jsonpath='{.status.phase}' 2>/dev/null || echo "")
|
||||
if [ "$STATUS" = "Running" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 5
|
||||
ELAPSED=$((ELAPSED + 5))
|
||||
done
|
||||
|
||||
if [ "$STATUS" != "Running" ]; then
|
||||
echo "==> FAIL: Storage pod did not reach Running (status: $STATUS)"
|
||||
$KUBECTL describe pod test-storage 2>/dev/null | tail -20 || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify data was written
|
||||
sleep 3
|
||||
DATA=$($KUBECTL exec test-storage -- cat /data/test.txt 2>/dev/null || echo "")
|
||||
if [ "$DATA" = "kubesolo-storage-test" ]; then
|
||||
echo "==> PASS: Local storage provisioning works"
|
||||
echo " PVC bound, pod running, data written and read back successfully"
|
||||
exit 0
|
||||
else
|
||||
echo "==> FAIL: Data verification failed (got: '$DATA')"
|
||||
exit 1
|
||||
fi
|
||||
Reference in New Issue
Block a user