apiVersion: v1
kind: ConfigMap
metadata:
  name: kilo
  namespace: kube-system
  labels:
    app.kubernetes.io/name: kilo
data:
  cni-conf.json: |
    {
       "cniVersion":"0.3.1",
       "name":"kilo",
       "plugins":[
          {
             "name":"kubernetes",
             "type":"bridge",
             "bridge":"kube-bridge",
             "isDefaultGateway":true,
             "forceAddress":true,
             "mtu": 1420,
             "ipam":{
                "type":"host-local"
             }
          },
          {
             "type":"portmap",
             "snat":true,
             "capabilities":{
                "portMappings":true
             }
          }
       ]
    }
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: kilo
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: kilo
rules:
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - get
  - patch
  - watch
- apiGroups:
  - kilo.squat.ai
  resources:
  - peers
  verbs:
  - list
  - update
  - watch
- apiGroups:
  - apiextensions.k8s.io
  resources:
  - customresourcedefinitions
  verbs:
  - create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kilo
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kilo
subjects:
  - kind: ServiceAccount
    name: kilo
    namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kilo
  namespace: kube-system
  labels:
    app.kubernetes.io/name: kilo
spec:
  selector:
    matchLabels:
      app.kubernetes.io/name: kilo
  template:
    metadata:
      labels:
        app.kubernetes.io/name: kilo
    spec:
      nodeSelector:
        nkml.squat.ai/wireguard: "true"
      serviceAccountName: kilo
      hostNetwork: true
      containers:
      - name: kilo
        image: squat/kilo
        args:
        - --kubeconfig=/etc/kubernetes/kubeconfig
        - --hostname=$(NODE_NAME)
        - --interface=kilo0
        env:
        - name: NODE_NAME
          valueFrom:
            fieldRef:
              fieldPath: spec.nodeName
        securityContext:
          privileged: true
        volumeMounts:
        - name: cni-conf-dir
          mountPath: /etc/cni/net.d
        - name: kilo-dir
          mountPath: /var/lib/kilo
        - name: kubeconfig
          mountPath: /etc/kubernetes/kubeconfig
          readOnly: true
        - name: lib-modules
          mountPath: /lib/modules
          readOnly: true
        - name: xtables-lock
          mountPath: /run/xtables.lock
          readOnly: false
      initContainers:
      - name: install-cni
        image: squat/kilo
        command:
        - /bin/sh
        - -c
        - set -e -x;
          cp /opt/cni/bin/* /host/opt/cni/bin/;
          TMP_CONF="$CNI_CONF_NAME".tmp;
          echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;
          rm -f /host/etc/cni/net.d/*;
          mv $TMP_CONF /host/etc/cni/net.d/$CNI_CONF_NAME
        env:
        - name: CNI_CONF_NAME
          value: 10-kilo.conflist
        - name: CNI_NETWORK_CONFIG
          valueFrom:
            configMapKeyRef:
              name: kilo
              key: cni-conf.json
        volumeMounts:
        - name: cni-bin-dir
          mountPath: /host/opt/cni/bin
        - name: cni-conf-dir
          mountPath: /host/etc/cni/net.d
      tolerations:
      - effect: NoSchedule
        operator: Exists
      - effect: NoExecute
        operator: Exists
      volumes:
      - name: cni-bin-dir
        hostPath:
          path: /opt/cni/bin
      - name: cni-conf-dir
        hostPath:
          path: /etc/cni/net.d
      - name: kilo-dir
        hostPath:
          path: /var/lib/kilo
      - name: kubeconfig
        hostPath: 
          # Since kilo runs as a daemonset, it is recommended that you copy the
          # k3s.yaml kubeconfig file from the master node to all worker nodes
          # with the same path structure.
          path: /etc/rancher/k3s/k3s.yaml
      - name: lib-modules
        hostPath:
          path: /lib/modules
      - name: xtables-lock
        hostPath:
          path: /run/xtables.lock
          type: FileOrCreate
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kilo-userspace
  namespace: kube-system
  labels:
    app.kubernetes.io/name: kilo-userspace
spec:
  selector:
    matchLabels:
      app.kubernetes.io/name: kilo-userspace
  template:
    metadata:
      labels:
        app.kubernetes.io/name: kilo-userspace
    spec:
      nodeSelector:
        nkml.squat.ai/wireguard: "false"
      serviceAccountName: kilo
      hostNetwork: true
      containers:
      - name: kilo
        image: squat/kilo 
        args:
        - --kubeconfig=/etc/kubernetes/kubeconfig
        - --hostname=$(NODE_NAME)
        - --create-interface=false
        - --interface=kilo0
        env:
        - name: NODE_NAME
          valueFrom:
            fieldRef:
              fieldPath: spec.nodeName
        securityContext:
          privileged: true
        volumeMounts:
        - name: cni-conf-dir
          mountPath: /etc/cni/net.d
        - name: kilo-dir
          mountPath: /var/lib/kilo
        - name: kubeconfig
          mountPath: /etc/kubernetes/kubeconfig
          readOnly: true
        - name: lib-modules
          mountPath: /lib/modules
          readOnly: true
        - name: xtables-lock
          mountPath: /run/xtables.lock
          readOnly: false
        - name: wireguard
          mountPath: /var/run/wireguard
          readOnly: false
      - name: boringtun
        image: leonnicolas/boringtun
        args:
        - --disable-drop-privileges=true
        - --foreground
        - kilo0
        securityContext:
          privileged: true
        volumeMounts:
        - name: wireguard
          mountPath: /var/run/wireguard
          readOnly: false
      initContainers:
      - name: install-cni
        image: squat/kilo
        command:
        - /bin/sh
        - -c
        - set -e -x;
          cp /opt/cni/bin/* /host/opt/cni/bin/;
          TMP_CONF="$CNI_CONF_NAME".tmp;
          echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;
          rm -f /host/etc/cni/net.d/*;
          mv $TMP_CONF /host/etc/cni/net.d/$CNI_CONF_NAME
        env:
        - name: CNI_CONF_NAME
          value: 10-kilo.conflist
        - name: CNI_NETWORK_CONFIG
          valueFrom:
            configMapKeyRef:
              name: kilo
              key: cni-conf.json
        volumeMounts:
        - name: cni-bin-dir
          mountPath: /host/opt/cni/bin
        - name: cni-conf-dir
          mountPath: /host/etc/cni/net.d
      tolerations:
      - effect: NoSchedule
        operator: Exists
      - effect: NoExecute
        operator: Exists
      volumes:
      - name: cni-bin-dir
        hostPath:
          path: /opt/cni/bin
      - name: cni-conf-dir
        hostPath:
          path: /etc/cni/net.d
      - name: kilo-dir
        hostPath:
          path: /var/lib/kilo
      - name: kubeconfig
        hostPath: 
          # Since kilo runs as a daemonset, it is recommended that you copy the
          # k3s.yaml kubeconfig file from the master node to all worker nodes
          # with the same path structure.
          path: /etc/rancher/k3s/k3s.yaml
      - name: lib-modules
        hostPath:
          path: /lib/modules
      - name: xtables-lock
        hostPath:
          path: /run/xtables.lock
          type: FileOrCreate
      - name: wireguard
        hostPath:
          path: /var/run/wireguard
---
kind: DaemonSet
apiVersion: apps/v1
metadata: 
  name: nkml
  namespace: kube-system
  labels:
    app.kubernetes.io/name: nkml
spec:
  selector:
    matchLabels:
      app.kubernetes.io/name: nkml
  template:
    metadata:
      labels:
        app.kubernetes.io/name: nkml
    spec:
      hostNetwork: true
      containers:
      - name: nkml
        image: leonnicolas/nkml
        args: 
        - --hostname=$(NODE_NAME)
        - --label-mod=wireguard
        - --kubeconfig=/etc/kubernetes/kubeconfig
        env:
        - name: NODE_NAME
          valueFrom:
            fieldRef:
              fieldPath: spec.nodeName
        ports:
        - name: http
          containerPort: 8080
        volumeMounts:
        - name: kubeconfig
          mountPath: /etc/kubernetes/kubeconfig
          readOnly: true
      volumes:
      - name: kubeconfig
        hostPath: 
          # since the above DaemonSets are dependant on the labels
          # and nkml would need a cni to start
          # it needs run on the hostnetwork and use the kubeconfig
          # to label the nodes
          path: /etc/rancher/k3s/k3s.yaml