Files
kubesolo-os/update/cmd/apply.go
Adolfo Delorenzo 9fb894c5af
Some checks failed
ARM64 Build / Build generic ARM64 disk image (push) Failing after 4s
CI / Go Tests (push) Successful in 1m29s
CI / Shellcheck (push) Successful in 48s
CI / Build Go Binaries (amd64, linux, linux-amd64) (push) Successful in 1m12s
CI / Build Go Binaries (arm64, linux, linux-arm64) (push) Has been cancelled
feat(update): pre-flight gates + deeper healthcheck + auto-rollback
Phase 8 of v0.3. Tightens the update lifecycle on both ends.

Pre-flight (apply.go, before any download):
- Free-space check on the passive partition: image size + 10% headroom must
  be available. Uses statfs(2) via the new pkg/partition.FreeBytes /
  HasFreeSpaceFor helpers (tests cover happy path, tiny request, huge
  request, missing path). Catches corrupted-FS and shrunk-partition cases
  before we destroy the existing slot data.
- Node-block-label check: refuses if the local K8s node carries the
  updates.kubesolo.io/block=true label. New pkg/health.CheckNodeBlocked
  shells out to kubectl per the project's zero-deps stance. Silently bypassed
  when no kubeconfig is reachable (air-gap case). Skipped by --force.

Healthcheck (extended via new pkg/health/extended.go + preflight.go):
- CheckKubeSystemReady waits until every kube-system pod has held the Running
  phase for >= N seconds (default 30). Catches "started ok, will crash-loop"
  bugs that a single-shot phase check misses.
- CheckProbeURL fetches an operator-supplied URL; 200 = pass. Wired through
  update.conf as healthcheck_url= and cloud-init updates.healthcheck_url.
- CheckDiskWritable writes/fsyncs/reads a 1-KiB probe under /var/lib/kubesolo.
  Always runs in healthcheck so a wedged data partition fails fast.
- pkg/health.Status grows KubeSystemReady, ProbeURL, DiskWritable booleans.
  Optional checks default to true in RunAll() so they don't block when
  unconfigured. health_test.go updated to the new 6-field shape.

Auto-rollback (healthcheck.go):
- state.UpdateState gains HealthCheckFailures (consecutive post-Activated
  failures). Reset on a clean pass.
- --auto-rollback-after N (also auto_rollback_after= in update.conf) triggers
  env.ForceRollback() when the failure count reaches the threshold. State
  transitions to RolledBack with a descriptive LastError. The command still
  exits with the healthcheck error; the operator/init is expected to reboot.
- Only fires while Phase == Activated. Doesn't second-guess a long-stable
  system that happens to fail one healthcheck.

config / opts / cloud-init plumbing:
- update.conf gains healthcheck_url= and auto_rollback_after= keys.
- New CLI flags: --healthcheck-url, --auto-rollback-after, --kube-system-settle.
- cloud-init full-config.yaml documents the new updates: subfields.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-14 19:08:30 -06:00

243 lines
8.8 KiB
Go

package cmd
import (
"context"
"fmt"
"log/slog"
"os"
"runtime"
"time"
"github.com/portainer/kubesolo-os/update/pkg/config"
"github.com/portainer/kubesolo-os/update/pkg/health"
"github.com/portainer/kubesolo-os/update/pkg/image"
"github.com/portainer/kubesolo-os/update/pkg/oci"
"github.com/portainer/kubesolo-os/update/pkg/partition"
"github.com/portainer/kubesolo-os/update/pkg/state"
)
// applyMetadataGates enforces channel / architecture / min-version policy on
// resolved update metadata, regardless of transport (HTTP or OCI). Records
// any failure to the state file before returning.
func applyMetadataGates(opts opts, st *state.UpdateState, meta *image.UpdateMetadata) error {
if meta.Channel != "" && meta.Channel != opts.Channel {
err := fmt.Errorf("metadata channel %q does not match local channel %q",
meta.Channel, opts.Channel)
_ = st.RecordError(opts.StatePath, err)
return err
}
if meta.Architecture != "" && meta.Architecture != runtime.GOARCH {
err := fmt.Errorf("metadata architecture %q does not match runtime %q",
meta.Architecture, runtime.GOARCH)
_ = st.RecordError(opts.StatePath, err)
return err
}
if meta.MinCompatibleVersion != "" && st.FromVersion != "" {
cmp, cerr := config.CompareVersions(st.FromVersion, meta.MinCompatibleVersion)
if cerr != nil {
slog.Warn("min-version comparison failed", "error", cerr,
"from", st.FromVersion, "min", meta.MinCompatibleVersion)
} else if cmp < 0 {
err := fmt.Errorf("current version %s is below min_compatible_version %s; install %s first",
st.FromVersion, meta.MinCompatibleVersion, meta.MinCompatibleVersion)
_ = st.RecordError(opts.StatePath, err)
return err
}
}
return nil
}
// Apply downloads a new OS image and writes it to the passive partition.
// It does NOT activate the new partition — use 'activate' for that.
//
// State transitions: Idle/Success/Failed → Checking → Downloading → Staged.
// On any error the state moves to Failed with LastError set.
func Apply(args []string) error {
opts := parseOpts(args)
if opts.ServerURL == "" && opts.Registry == "" {
return fmt.Errorf("--server or --registry is required (or set in /etc/kubesolo/update.conf)")
}
if opts.ServerURL != "" && opts.Registry != "" {
return fmt.Errorf("--server and --registry are mutually exclusive")
}
// Maintenance window gate — earliest cheap check, before any HTTP work.
// Skipped with --force.
window, werr := config.ParseWindow(opts.MaintenanceWindow)
if werr != nil {
return fmt.Errorf("parse maintenance_window: %w", werr)
}
if !opts.Force && !window.Contains(time.Now()) {
return fmt.Errorf("outside maintenance window (%s); pass --force to override",
window.String())
}
// Node-block-label gate — workload authors can defer an update by
// labeling the node updates.kubesolo.io/block=true. Skipped with --force
// and silently bypassed when the K8s API isn't reachable (air-gap).
if !opts.Force {
blocked, berr := health.CheckNodeBlocked("")
if berr != nil {
slog.Warn("node-block check failed, allowing update", "error", berr)
} else if blocked {
return fmt.Errorf("node carries label %s=true; refusing update (pass --force to override)",
health.NodeBlockLabel)
}
}
st, err := state.Load(opts.StatePath)
if err != nil {
// Don't block the operation on a corrupt state file. Log + recover.
slog.Warn("state file unreadable, starting fresh", "error", err)
st = state.New()
}
env := opts.NewBootEnv()
// Record the current running version as the "from" reference. The active
// slot's version file is the most reliable source.
activeSlot, slotErr := env.ActiveSlot()
if slotErr == nil {
if partInfo, perr := partition.GetSlotPartition(activeSlot); perr == nil {
mp := "/tmp/kubesolo-active-" + activeSlot
if merr := partition.MountReadOnly(partInfo.Device, mp); merr == nil {
if v, rerr := partition.ReadVersion(mp); rerr == nil {
st.SetFromVersion(v)
}
partition.Unmount(mp)
}
}
}
// Determine passive slot
passiveSlot, err := env.PassiveSlot()
if err != nil {
_ = st.RecordError(opts.StatePath, fmt.Errorf("reading passive slot: %w", err))
return fmt.Errorf("reading passive slot: %w", err)
}
slog.Info("applying update", "target_slot", passiveSlot)
stageDir := "/tmp/kubesolo-update-stage"
if err := st.Transition(opts.StatePath, state.PhaseChecking, "", ""); err != nil {
slog.Warn("state transition failed", "phase", state.PhaseChecking, "error", err)
}
// Resolve metadata via the configured transport. OCI registry mode pulls
// the manifest only; HTTP mode hits latest.json.
var (
meta *image.UpdateMetadata
staged *image.StagedImage
)
if opts.Registry != "" {
ociClient, err := oci.NewClient(opts.Registry)
if err != nil {
_ = st.RecordError(opts.StatePath, fmt.Errorf("oci client: %w", err))
return fmt.Errorf("oci client: %w", err)
}
tag := opts.Tag
if tag == "" {
tag = opts.Channel
}
if tag == "" {
tag = "stable"
}
meta, err = ociClient.FetchMetadata(context.Background(), tag)
if err != nil {
_ = st.RecordError(opts.StatePath, fmt.Errorf("oci fetch metadata: %w", err))
return fmt.Errorf("oci fetch metadata: %w", err)
}
if err := applyMetadataGates(opts, st, meta); err != nil {
return err
}
if err := st.Transition(opts.StatePath, state.PhaseDownloading, meta.Version, ""); err != nil {
slog.Warn("state transition failed", "phase", state.PhaseDownloading, "error", err)
}
staged, _, err = ociClient.Pull(context.Background(), tag, stageDir)
if err != nil {
_ = st.RecordError(opts.StatePath, fmt.Errorf("oci pull: %w", err))
return fmt.Errorf("oci pull: %w", err)
}
} else {
client := image.NewClient(opts.ServerURL, stageDir)
defer client.Cleanup()
if opts.PubKeyPath != "" {
client.SetPublicKeyPath(opts.PubKeyPath)
slog.Info("signature verification enabled", "pubkey", opts.PubKeyPath)
}
var err error
meta, err = client.CheckForUpdate()
if err != nil {
_ = st.RecordError(opts.StatePath, fmt.Errorf("checking for update: %w", err))
return fmt.Errorf("checking for update: %w", err)
}
if err := applyMetadataGates(opts, st, meta); err != nil {
return err
}
if err := st.Transition(opts.StatePath, state.PhaseDownloading, meta.Version, ""); err != nil {
slog.Warn("state transition failed", "phase", state.PhaseDownloading, "error", err)
}
staged, err = client.Download(meta)
if err != nil {
_ = st.RecordError(opts.StatePath, fmt.Errorf("downloading update: %w", err))
return fmt.Errorf("downloading update: %w", err)
}
}
slog.Info("update available", "version", meta.Version, "channel", meta.Channel, "arch", meta.Architecture)
// Mount passive partition
partInfo, err := partition.GetSlotPartition(passiveSlot)
if err != nil {
_ = st.RecordError(opts.StatePath, fmt.Errorf("finding passive partition: %w", err))
return fmt.Errorf("finding passive partition: %w", err)
}
mountPoint := "/tmp/kubesolo-passive-" + passiveSlot
if err := partition.MountReadWrite(partInfo.Device, mountPoint); err != nil {
_ = st.RecordError(opts.StatePath, fmt.Errorf("mounting passive partition: %w", err))
return fmt.Errorf("mounting passive partition: %w", err)
}
defer partition.Unmount(mountPoint)
// Free-space pre-write check: the passive partition must have at least
// (kernel + initramfs) + 10% headroom. Catches corrupted-FS reports and
// shrunk/wrong-size partitions before we destroy the existing slot data.
var imgSize int64
for _, p := range []string{staged.VmlinuzPath, staged.InitramfsPath} {
fi, ferr := os.Stat(p)
if ferr != nil {
_ = st.RecordError(opts.StatePath, fmt.Errorf("stat staged file %s: %w", p, ferr))
return fmt.Errorf("stat staged file %s: %w", p, ferr)
}
imgSize += fi.Size()
}
avail, ok, ferr := partition.HasFreeSpaceFor(mountPoint, imgSize, 10)
if ferr != nil {
_ = st.RecordError(opts.StatePath, fmt.Errorf("free-space check: %w", ferr))
return fmt.Errorf("free-space check: %w", ferr)
}
if !ok {
err := fmt.Errorf("insufficient space on %s: have %.1f MiB, need %.1f MiB (image + 10%% headroom)",
passiveSlot, float64(avail)/(1<<20), float64(imgSize)*1.1/(1<<20))
_ = st.RecordError(opts.StatePath, err)
return err
}
// Write image to passive partition
if err := partition.WriteSystemImage(mountPoint, staged.VmlinuzPath, staged.InitramfsPath, staged.Version); err != nil {
_ = st.RecordError(opts.StatePath, fmt.Errorf("writing system image: %w", err))
return fmt.Errorf("writing system image: %w", err)
}
if err := st.Transition(opts.StatePath, state.PhaseStaged, staged.Version, ""); err != nil {
slog.Warn("state transition failed", "phase", state.PhaseStaged, "error", err)
}
fmt.Printf("Update v%s written to slot %s (%s)\n", staged.Version, passiveSlot, partInfo.Device)
fmt.Println("Run 'kubesolo-update activate' to boot into the new version")
return nil
}