Files
k8straining/all-deploy-services.yaml
2025-11-13 22:21:23 +13:00

8638 lines
273 KiB
YAML

apiVersion: v1
items:
- apiVersion: v1
kind: Pod
metadata:
annotations:
container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined
container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined
container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined
container.apparmor.security.beta.kubernetes.io/config: unconfined
container.apparmor.security.beta.kubernetes.io/install-cni-binaries: unconfined
container.apparmor.security.beta.kubernetes.io/mount-bpf-fs: unconfined
container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined
kubectl.kubernetes.io/restartedAt: "2025-11-02T23:26:02Z"
creationTimestamp: "2025-11-02T23:42:16Z"
generateName: cilium-
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
controller-revision-hash: 957c85bbd
k8s-app: cilium
pod-template-generation: "2"
name: cilium-dz5tb
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: DaemonSet
name: cilium
uid: 0f5033cb-a382-446b-8b7d-ead01e3cdc64
resourceVersion: "2119659"
uid: e6aa5803-1ff8-4fc7-adc0-2b2d876883e2
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchFields:
- key: metadata.name
operator: In
values:
- k8s-node3
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- args:
- --config-dir=/tmp/cilium/config-map
command:
- cilium-agent
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: GOMEMLIMIT
valueFrom:
resourceFieldRef:
divisor: "1"
resource: limits.memory
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- bash
- -c
- |
set -o errexit
set -o pipefail
set -o nounset
# When running in AWS ENI mode, it's likely that 'aws-node' has
# had a chance to install SNAT iptables rules. These can result
# in dropped traffic, so we should attempt to remove them.
# We do it using a 'postStart' hook since this may need to run
# for nodes which might have already been init'ed but may still
# have dangling rules. This is safe because there are no
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
fi
echo 'Done!'
preStop:
exec:
command:
- /cni-uninstall.sh
livenessProbe:
failureThreshold: 10
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
readinessProbe:
failureThreshold: 3
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
capabilities:
add:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
startupProbe:
failureThreshold: 105
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 1
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/proc/sys/net
name: host-proc-sys-net
- mountPath: /host/proc/sys/kernel
name: host-proc-sys-kernel
- mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/cilium/netns
mountPropagation: HostToContainer
name: cilium-netns
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /var/lib/cilium/tls/hubble
name: hubble-tls
readOnly: true
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-w8z2h
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
hostNetwork: true
initContainers:
- command:
- cilium-dbg
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: config
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-w8z2h
readOnly: true
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: mount-cgroup
resources: {}
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-w8z2h
readOnly: true
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
env:
- name: BIN_PATH
value: /opt/cni/bin
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: apply-sysctl-overwrites
resources: {}
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-w8z2h
readOnly: true
- args:
- mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
command:
- /bin/bash
- -c
- --
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: mount-bpf-fs
resources: {}
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
name: bpf-maps
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-w8z2h
readOnly: true
- command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-state
name: cilium-config
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
- name: WRITE_CNI_CONF_WHEN_READY
valueFrom:
configMapKeyRef:
key: write-cni-conf-when-ready
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-w8z2h
readOnly: true
- command:
- /install-plugin.sh
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-w8z2h
readOnly: true
nodeName: k8s-node3
nodeSelector:
kubernetes.io/os: linux
preemptionPolicy: PreemptLowerPriority
priority: 2000001000
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
appArmorProfile:
type: Unconfined
serviceAccount: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
tolerations:
- operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/disk-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/memory-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/pid-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/unschedulable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/network-unavailable
operator: Exists
volumes:
- emptyDir: {}
name: tmp
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
name: cilium-run
- hostPath:
path: /var/run/netns
type: DirectoryOrCreate
name: cilium-netns
- hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
- hostPath:
path: /proc
type: Directory
name: hostproc
- hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
name: cilium-cgroup
- hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
name: cni-path
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
name: etc-cni-netd
- hostPath:
path: /lib/modules
type: ""
name: lib-modules
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock
- name: clustermesh-secrets
projected:
defaultMode: 256
sources:
- secret:
name: cilium-clustermesh
optional: true
- secret:
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
name: clustermesh-apiserver-remote-cert
optional: true
- secret:
items:
- key: tls.key
path: local-etcd-client.key
- key: tls.crt
path: local-etcd-client.crt
- key: ca.crt
path: local-etcd-client-ca.crt
name: clustermesh-apiserver-local-cert
optional: true
- hostPath:
path: /proc/sys/net
type: Directory
name: host-proc-sys-net
- hostPath:
path: /proc/sys/kernel
type: Directory
name: host-proc-sys-kernel
- name: hubble-tls
projected:
defaultMode: 256
sources:
- secret:
items:
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
- key: ca.crt
path: client-ca.crt
name: hubble-server-certs
optional: true
- name: kube-api-access-w8z2h
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:56:35Z"
status: "True"
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:42:52Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:27Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:27Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:42:16Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: containerd://e2e2831eeb015c7b2680d8ac3e6b8150d599c5ed336e1c68f0529444a90f6900
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState:
terminated:
containerID: containerd://80d9e3dc9712c6f026ef6beb4e3bb5d4a728998fae8407b24afd9840e6e8bd6c
exitCode: 255
finishedAt: "2025-11-11T23:56:16Z"
message: |-
-11T21:50:34Z level=info msg="agent.datapath.agent-liveness-updater.timer-job-agent-liveness-updater (rev=131303)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.iptables.ipset.job-ipset-init-finalizer (rev=23)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.iptables.ipset.job-reconcile (rev=131335)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.iptables.ipset.job-refresh (rev=131336)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.iptables.job-iptables-reconciliation-loop (rev=131330)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.l2-responder.job-l2-responder-reconciler (rev=131302)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.maps.bwmap.timer-job-pressure-metric-throttle (rev=131304)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.mtu.job-mtu-updater (rev=131326)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.node-address.job-node-address-update (rev=131333)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.orchestrator.job-reinitialize (rev=131324)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.sysctl.job-reconcile (rev=131339)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.sysctl.job-refresh (rev=131338)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.infra.k8s-synced-crdsync.job-sync-crds (rev=7)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.infra.metrics.job-collect (rev=131320)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.infra.metrics.timer-job-cleanup (rev=131319)" module=health
time=2025-11-11T21:50:3
reason: Unknown
startedAt: "2025-11-10T20:51:18Z"
name: cilium-agent
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:08Z"
volumeMounts:
- mountPath: /host/proc/sys/net
name: host-proc-sys-net
- mountPath: /host/proc/sys/kernel
name: host-proc-sys-kernel
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/cilium/netns
name: cilium-netns
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /lib/modules
name: lib-modules
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /var/lib/cilium/tls/hubble
name: hubble-tls
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-w8z2h
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.50.13
hostIPs:
- ip: 192.168.50.13
- ip: 2404:4400:4181:9200:5054:ff:fe2a:4db1
initContainerStatuses:
- containerID: containerd://b4a4bee558788a4b4370ffb735731a4c8e33a4230cccaada59feb1927d87a3c1
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: config
ready: true
restartCount: 3
started: false
state:
terminated:
containerID: containerd://b4a4bee558788a4b4370ffb735731a4c8e33a4230cccaada59feb1927d87a3c1
exitCode: 0
finishedAt: "2025-11-11T23:56:52Z"
reason: Completed
startedAt: "2025-11-11T23:56:34Z"
volumeMounts:
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-w8z2h
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://0e3684fa418d2194ff9d387b74f740ec05c61cac24af63d6aeaa611de1b1e0b0
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: mount-cgroup
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://0e3684fa418d2194ff9d387b74f740ec05c61cac24af63d6aeaa611de1b1e0b0
exitCode: 0
finishedAt: "2025-11-11T23:56:56Z"
reason: Completed
startedAt: "2025-11-11T23:56:53Z"
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-w8z2h
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://43a7019fe515c755549e92890f55d934c8773d872d7eaae888294ff952d32beb
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: apply-sysctl-overwrites
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://43a7019fe515c755549e92890f55d934c8773d872d7eaae888294ff952d32beb
exitCode: 0
finishedAt: "2025-11-11T23:56:57Z"
reason: Completed
startedAt: "2025-11-11T23:56:57Z"
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-w8z2h
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://9dbe3a352a76b01cbd1ecc1549e580dec420f8f438fb4fc3004fd139da1ce275
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: mount-bpf-fs
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://9dbe3a352a76b01cbd1ecc1549e580dec420f8f438fb4fc3004fd139da1ce275
exitCode: 0
finishedAt: "2025-11-11T23:56:59Z"
reason: Completed
startedAt: "2025-11-11T23:56:58Z"
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-w8z2h
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://a89642a78574c3e7a48a3c1925aaf638ee938f3fa8e16876b36a839c09515616
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: clean-cilium-state
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://a89642a78574c3e7a48a3c1925aaf638ee938f3fa8e16876b36a839c09515616
exitCode: 0
finishedAt: "2025-11-11T23:57:00Z"
reason: Completed
startedAt: "2025-11-11T23:57:00Z"
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-w8z2h
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://1d7b3e0d8f6eae8e750966189b760175a79514039881e5fc02548be35de5879e
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: install-cni-binaries
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://1d7b3e0d8f6eae8e750966189b760175a79514039881e5fc02548be35de5879e
exitCode: 0
finishedAt: "2025-11-11T23:57:07Z"
reason: Completed
startedAt: "2025-11-11T23:57:01Z"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-w8z2h
readOnly: true
recursiveReadOnly: Disabled
phase: Running
podIP: 192.168.50.13
podIPs:
- ip: 192.168.50.13
- ip: 2404:4400:4181:9200:5054:ff:fe2a:4db1
qosClass: Burstable
startTime: "2025-11-02T23:42:17Z"
- apiVersion: v1
kind: Pod
metadata:
annotations:
container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined
container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined
container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined
container.apparmor.security.beta.kubernetes.io/config: unconfined
container.apparmor.security.beta.kubernetes.io/install-cni-binaries: unconfined
container.apparmor.security.beta.kubernetes.io/mount-bpf-fs: unconfined
container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined
kubectl.kubernetes.io/restartedAt: "2025-11-02T23:26:02Z"
creationTimestamp: "2025-11-02T23:26:28Z"
generateName: cilium-
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
controller-revision-hash: 957c85bbd
k8s-app: cilium
pod-template-generation: "2"
name: cilium-f4jnc
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: DaemonSet
name: cilium
uid: 0f5033cb-a382-446b-8b7d-ead01e3cdc64
resourceVersion: "2119508"
uid: 86f05ab3-3e61-4dd9-9578-7baf9a5030e1
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchFields:
- key: metadata.name
operator: In
values:
- k8s-node1
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- args:
- --config-dir=/tmp/cilium/config-map
command:
- cilium-agent
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: GOMEMLIMIT
valueFrom:
resourceFieldRef:
divisor: "1"
resource: limits.memory
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- bash
- -c
- |
set -o errexit
set -o pipefail
set -o nounset
# When running in AWS ENI mode, it's likely that 'aws-node' has
# had a chance to install SNAT iptables rules. These can result
# in dropped traffic, so we should attempt to remove them.
# We do it using a 'postStart' hook since this may need to run
# for nodes which might have already been init'ed but may still
# have dangling rules. This is safe because there are no
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
fi
echo 'Done!'
preStop:
exec:
command:
- /cni-uninstall.sh
livenessProbe:
failureThreshold: 10
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
readinessProbe:
failureThreshold: 3
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
capabilities:
add:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
startupProbe:
failureThreshold: 105
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 1
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/proc/sys/net
name: host-proc-sys-net
- mountPath: /host/proc/sys/kernel
name: host-proc-sys-kernel
- mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/cilium/netns
mountPropagation: HostToContainer
name: cilium-netns
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /var/lib/cilium/tls/hubble
name: hubble-tls
readOnly: true
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-dfgg9
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
hostNetwork: true
initContainers:
- command:
- cilium-dbg
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: config
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-dfgg9
readOnly: true
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: mount-cgroup
resources: {}
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-dfgg9
readOnly: true
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
env:
- name: BIN_PATH
value: /opt/cni/bin
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: apply-sysctl-overwrites
resources: {}
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-dfgg9
readOnly: true
- args:
- mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
command:
- /bin/bash
- -c
- --
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: mount-bpf-fs
resources: {}
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
name: bpf-maps
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-dfgg9
readOnly: true
- command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-state
name: cilium-config
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
- name: WRITE_CNI_CONF_WHEN_READY
valueFrom:
configMapKeyRef:
key: write-cni-conf-when-ready
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-dfgg9
readOnly: true
- command:
- /install-plugin.sh
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-dfgg9
readOnly: true
nodeName: k8s-node1
nodeSelector:
kubernetes.io/os: linux
preemptionPolicy: PreemptLowerPriority
priority: 2000001000
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
appArmorProfile:
type: Unconfined
serviceAccount: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
tolerations:
- operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/disk-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/memory-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/pid-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/unschedulable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/network-unavailable
operator: Exists
volumes:
- emptyDir: {}
name: tmp
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
name: cilium-run
- hostPath:
path: /var/run/netns
type: DirectoryOrCreate
name: cilium-netns
- hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
- hostPath:
path: /proc
type: Directory
name: hostproc
- hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
name: cilium-cgroup
- hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
name: cni-path
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
name: etc-cni-netd
- hostPath:
path: /lib/modules
type: ""
name: lib-modules
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock
- name: clustermesh-secrets
projected:
defaultMode: 256
sources:
- secret:
name: cilium-clustermesh
optional: true
- secret:
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
name: clustermesh-apiserver-remote-cert
optional: true
- secret:
items:
- key: tls.key
path: local-etcd-client.key
- key: tls.crt
path: local-etcd-client.crt
- key: ca.crt
path: local-etcd-client-ca.crt
name: clustermesh-apiserver-local-cert
optional: true
- hostPath:
path: /proc/sys/net
type: Directory
name: host-proc-sys-net
- hostPath:
path: /proc/sys/kernel
type: Directory
name: host-proc-sys-kernel
- name: hubble-tls
projected:
defaultMode: 256
sources:
- secret:
items:
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
- key: ca.crt
path: client-ca.crt
name: hubble-server-certs
optional: true
- name: kube-api-access-dfgg9
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:55:21Z"
status: "True"
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:26:35Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:06Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:06Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:26:28Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: containerd://254b67269c40bcb7082c0bcfa422ae7c85c970858f07136f061f5fe75982de07
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState:
terminated:
containerID: containerd://68bb7731aab67ca572b79ba2b7918835d5ee597721db18210a88ead89add2a09
exitCode: 255
finishedAt: "2025-11-11T23:54:41Z"
message: |-
l=info msg="agent.controlplane.stale-endpoint-cleanup.job-endpoint-cleanup (rev=112)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.agent-liveness-updater.timer-job-agent-liveness-updater (rev=159335)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.iptables.ipset.job-ipset-init-finalizer (rev=30)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.iptables.ipset.job-reconcile (rev=159368)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.iptables.ipset.job-refresh (rev=159369)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.iptables.job-iptables-reconciliation-loop (rev=159363)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.l2-responder.job-l2-responder-reconciler (rev=159334)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.maps.bwmap.timer-job-pressure-metric-throttle (rev=159336)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.mtu.job-mtu-updater (rev=159358)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.node-address.job-node-address-update (rev=159366)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.orchestrator.job-reinitialize (rev=159356)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.sysctl.job-reconcile (rev=159372)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.sysctl.job-refresh (rev=159371)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.infra.k8s-synced-crdsync.job-sync-crds (rev=7)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.infra.metrics.job-collect (rev=159352)" module=health
time=2025-11-11T21:
reason: Unknown
startedAt: "2025-11-10T20:51:21Z"
name: cilium-agent
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:56:17Z"
volumeMounts:
- mountPath: /host/proc/sys/net
name: host-proc-sys-net
- mountPath: /host/proc/sys/kernel
name: host-proc-sys-kernel
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/cilium/netns
name: cilium-netns
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /lib/modules
name: lib-modules
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /var/lib/cilium/tls/hubble
name: hubble-tls
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-dfgg9
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.50.11
hostIPs:
- ip: 192.168.50.11
- ip: 2404:4400:4181:9200:5054:ff:fe3f:dcd9
initContainerStatuses:
- containerID: containerd://8e9ec097d12cd404bcacf3574447ae003fe57db27670b9894c00f42c98025a15
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: config
ready: true
restartCount: 3
started: false
state:
terminated:
containerID: containerd://8e9ec097d12cd404bcacf3574447ae003fe57db27670b9894c00f42c98025a15
exitCode: 0
finishedAt: "2025-11-11T23:55:55Z"
reason: Completed
startedAt: "2025-11-11T23:55:20Z"
volumeMounts:
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-dfgg9
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://6be98d33b982de482f785e8c1a5a3431c0d1467305da3d7d80d9677d90c0dcc3
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: mount-cgroup
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://6be98d33b982de482f785e8c1a5a3431c0d1467305da3d7d80d9677d90c0dcc3
exitCode: 0
finishedAt: "2025-11-11T23:55:58Z"
reason: Completed
startedAt: "2025-11-11T23:55:56Z"
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-dfgg9
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://ebd10ea799bcd40ab75dd7f5b0c3fad0b26037214f19d259b26967b8d2e9750e
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: apply-sysctl-overwrites
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://ebd10ea799bcd40ab75dd7f5b0c3fad0b26037214f19d259b26967b8d2e9750e
exitCode: 0
finishedAt: "2025-11-11T23:55:59Z"
reason: Completed
startedAt: "2025-11-11T23:55:59Z"
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-dfgg9
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://154553b5685a9235804ed8b662c3c6283e25ccb3fac0afe62522a0b05d0f43b7
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: mount-bpf-fs
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://154553b5685a9235804ed8b662c3c6283e25ccb3fac0afe62522a0b05d0f43b7
exitCode: 0
finishedAt: "2025-11-11T23:56:04Z"
reason: Completed
startedAt: "2025-11-11T23:56:00Z"
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-dfgg9
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://3d88bd56884d4c1b52e0d90928856708f3567ca9c5a314e9704a5a8d1cc1a9b9
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: clean-cilium-state
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://3d88bd56884d4c1b52e0d90928856708f3567ca9c5a314e9704a5a8d1cc1a9b9
exitCode: 0
finishedAt: "2025-11-11T23:56:06Z"
reason: Completed
startedAt: "2025-11-11T23:56:05Z"
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-dfgg9
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://080a1a943869e1e62da7db6654f2874a95208b56804efc0936c93cf79db1c4af
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: install-cni-binaries
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://080a1a943869e1e62da7db6654f2874a95208b56804efc0936c93cf79db1c4af
exitCode: 0
finishedAt: "2025-11-11T23:56:16Z"
reason: Completed
startedAt: "2025-11-11T23:56:06Z"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-dfgg9
readOnly: true
recursiveReadOnly: Disabled
phase: Running
podIP: 192.168.50.11
podIPs:
- ip: 192.168.50.11
- ip: 2404:4400:4181:9200:5054:ff:fe3f:dcd9
qosClass: Burstable
startTime: "2025-11-02T23:26:28Z"
- apiVersion: v1
kind: Pod
metadata:
annotations:
container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined
container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined
container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined
container.apparmor.security.beta.kubernetes.io/config: unconfined
container.apparmor.security.beta.kubernetes.io/install-cni-binaries: unconfined
container.apparmor.security.beta.kubernetes.io/mount-bpf-fs: unconfined
container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined
kubectl.kubernetes.io/restartedAt: "2025-11-02T23:26:02Z"
creationTimestamp: "2025-11-04T02:46:15Z"
generateName: cilium-
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
controller-revision-hash: 957c85bbd
k8s-app: cilium
pod-template-generation: "2"
name: cilium-kw7r6
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: DaemonSet
name: cilium
uid: 0f5033cb-a382-446b-8b7d-ead01e3cdc64
resourceVersion: "2119741"
uid: 418263eb-6022-4c52-b317-822c2a0cfd99
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchFields:
- key: metadata.name
operator: In
values:
- k8s-node5
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- args:
- --config-dir=/tmp/cilium/config-map
command:
- cilium-agent
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: GOMEMLIMIT
valueFrom:
resourceFieldRef:
divisor: "1"
resource: limits.memory
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- bash
- -c
- |
set -o errexit
set -o pipefail
set -o nounset
# When running in AWS ENI mode, it's likely that 'aws-node' has
# had a chance to install SNAT iptables rules. These can result
# in dropped traffic, so we should attempt to remove them.
# We do it using a 'postStart' hook since this may need to run
# for nodes which might have already been init'ed but may still
# have dangling rules. This is safe because there are no
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
fi
echo 'Done!'
preStop:
exec:
command:
- /cni-uninstall.sh
livenessProbe:
failureThreshold: 10
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
readinessProbe:
failureThreshold: 3
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
capabilities:
add:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
startupProbe:
failureThreshold: 105
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 1
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/proc/sys/net
name: host-proc-sys-net
- mountPath: /host/proc/sys/kernel
name: host-proc-sys-kernel
- mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/cilium/netns
mountPropagation: HostToContainer
name: cilium-netns
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /var/lib/cilium/tls/hubble
name: hubble-tls
readOnly: true
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qnp6w
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
hostNetwork: true
initContainers:
- command:
- cilium-dbg
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: config
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qnp6w
readOnly: true
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: mount-cgroup
resources: {}
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qnp6w
readOnly: true
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
env:
- name: BIN_PATH
value: /opt/cni/bin
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: apply-sysctl-overwrites
resources: {}
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qnp6w
readOnly: true
- args:
- mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
command:
- /bin/bash
- -c
- --
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: mount-bpf-fs
resources: {}
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
name: bpf-maps
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qnp6w
readOnly: true
- command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-state
name: cilium-config
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
- name: WRITE_CNI_CONF_WHEN_READY
valueFrom:
configMapKeyRef:
key: write-cni-conf-when-ready
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qnp6w
readOnly: true
- command:
- /install-plugin.sh
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qnp6w
readOnly: true
nodeName: k8s-node5
nodeSelector:
kubernetes.io/os: linux
preemptionPolicy: PreemptLowerPriority
priority: 2000001000
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
appArmorProfile:
type: Unconfined
serviceAccount: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
tolerations:
- operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/disk-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/memory-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/pid-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/unschedulable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/network-unavailable
operator: Exists
volumes:
- emptyDir: {}
name: tmp
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
name: cilium-run
- hostPath:
path: /var/run/netns
type: DirectoryOrCreate
name: cilium-netns
- hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
- hostPath:
path: /proc
type: Directory
name: hostproc
- hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
name: cilium-cgroup
- hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
name: cni-path
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
name: etc-cni-netd
- hostPath:
path: /lib/modules
type: ""
name: lib-modules
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock
- name: clustermesh-secrets
projected:
defaultMode: 256
sources:
- secret:
name: cilium-clustermesh
optional: true
- secret:
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
name: clustermesh-apiserver-remote-cert
optional: true
- secret:
items:
- key: tls.key
path: local-etcd-client.key
- key: tls.crt
path: local-etcd-client.crt
- key: ca.crt
path: local-etcd-client-ca.crt
name: clustermesh-apiserver-local-cert
optional: true
- hostPath:
path: /proc/sys/net
type: Directory
name: host-proc-sys-net
- hostPath:
path: /proc/sys/kernel
type: Directory
name: host-proc-sys-kernel
- name: hubble-tls
projected:
defaultMode: 256
sources:
- secret:
items:
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
- key: ca.crt
path: client-ca.crt
name: hubble-server-certs
optional: true
- name: kube-api-access-qnp6w
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:56:26Z"
status: "True"
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: "2025-11-04T02:46:47Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:36Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:36Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2025-11-04T02:46:15Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: containerd://5a8639fc948b11edaf9d351493ae99d8ceba4b0be83a00701ff28ac79b129da1
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState:
terminated:
containerID: containerd://425470344e26736531f1e35d3872a430a13c48ee34739d0c9a17e12ec46b0ced
exitCode: 255
finishedAt: "2025-11-11T23:56:04Z"
message: |-
econciler (rev=116210)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.maps.bwmap.timer-job-pressure-metric-throttle (rev=116212)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.mtu.job-mtu-updater (rev=116234)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.node-address.job-node-address-update (rev=116241)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.orchestrator.job-reinitialize (rev=116232)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.sysctl.job-reconcile (rev=116247)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.sysctl.job-refresh (rev=116246)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.infra.k8s-synced-crdsync.job-sync-crds (rev=7)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.infra.metrics.job-collect (rev=116228)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.infra.metrics.timer-job-cleanup (rev=116227)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.infra.shell.job-listener (rev=116226)" module=health
time=2025-11-11T21:50:34Z level=info msg="health.job-module-status-metrics (rev=116209)" module=health
time="2025-11-11T21:50:34.49600263Z" level=info msg="Removed endpoint" ciliumEndpointName=/ containerID= containerInterface= datapathPolicyRevision=1 desiredPolicyRevision=1 endpointID=164 identity=4 ipv4=10.1.4.94 ipv6= k8sPodName=/ subsys=endpoint
time="2025-11-11T21:50:34.502098866Z" level=info msg="Stopping fswatcher" config=tls-server subsys=hubble
time="2025-11-11T21:50:34.5089139Z" level=info msg="Datapath signal listener exiting" subsys=signal
time="2025-11-11T21:50:34.508982532Z" level=info msg="
reason: Unknown
startedAt: "2025-11-10T23:40:12Z"
name: cilium-agent
ready: true
restartCount: 4
started: true
state:
running:
startedAt: "2025-11-11T23:57:06Z"
volumeMounts:
- mountPath: /host/proc/sys/net
name: host-proc-sys-net
- mountPath: /host/proc/sys/kernel
name: host-proc-sys-kernel
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/cilium/netns
name: cilium-netns
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /lib/modules
name: lib-modules
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /var/lib/cilium/tls/hubble
name: hubble-tls
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qnp6w
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.50.15
hostIPs:
- ip: 192.168.50.15
- ip: 2404:4400:4181:9200:5054:ff:fe9a:4a4d
initContainerStatuses:
- containerID: containerd://95c856ecd163b849b0c1dedb7ed012a2594d2338561bfa123364d56536733530
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: config
ready: true
restartCount: 4
started: false
state:
terminated:
containerID: containerd://95c856ecd163b849b0c1dedb7ed012a2594d2338561bfa123364d56536733530
exitCode: 0
finishedAt: "2025-11-11T23:56:52Z"
reason: Completed
startedAt: "2025-11-11T23:56:25Z"
volumeMounts:
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qnp6w
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://1934842e1f673e17242d63ea68307d53bc789d3cd4361ecb108e6ad9fce4e0f8
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: mount-cgroup
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://1934842e1f673e17242d63ea68307d53bc789d3cd4361ecb108e6ad9fce4e0f8
exitCode: 0
finishedAt: "2025-11-11T23:56:56Z"
reason: Completed
startedAt: "2025-11-11T23:56:53Z"
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qnp6w
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://7a1ace36d237e687b587baab31ea5f8a93ba159200119924246e501448b9f69b
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: apply-sysctl-overwrites
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://7a1ace36d237e687b587baab31ea5f8a93ba159200119924246e501448b9f69b
exitCode: 0
finishedAt: "2025-11-11T23:56:56Z"
reason: Completed
startedAt: "2025-11-11T23:56:56Z"
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qnp6w
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://974d9257f1fb98e481aa757c5aa8c2cc5af7760d4de4ba14801fdef3b32fd5c6
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: mount-bpf-fs
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://974d9257f1fb98e481aa757c5aa8c2cc5af7760d4de4ba14801fdef3b32fd5c6
exitCode: 0
finishedAt: "2025-11-11T23:56:58Z"
reason: Completed
startedAt: "2025-11-11T23:56:57Z"
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qnp6w
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://cf9cecf868e5c827c7eb3abc590cd8d43d11d522a7a35dde15f0117e38ef3860
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: clean-cilium-state
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://cf9cecf868e5c827c7eb3abc590cd8d43d11d522a7a35dde15f0117e38ef3860
exitCode: 0
finishedAt: "2025-11-11T23:56:59Z"
reason: Completed
startedAt: "2025-11-11T23:56:59Z"
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qnp6w
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://853b7ccf0d7abb99213eb689b778c3406a4fdb469e6d3ea8fea96fd86c7b6734
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: install-cni-binaries
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://853b7ccf0d7abb99213eb689b778c3406a4fdb469e6d3ea8fea96fd86c7b6734
exitCode: 0
finishedAt: "2025-11-11T23:57:05Z"
reason: Completed
startedAt: "2025-11-11T23:57:00Z"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qnp6w
readOnly: true
recursiveReadOnly: Disabled
phase: Running
podIP: 192.168.50.15
podIPs:
- ip: 192.168.50.15
- ip: 2404:4400:4181:9200:5054:ff:fe9a:4a4d
qosClass: Burstable
startTime: "2025-11-04T02:46:16Z"
- apiVersion: v1
kind: Pod
metadata:
annotations:
kubectl.kubernetes.io/restartedAt: "2025-11-02T23:26:02Z"
prometheus.io/port: "9963"
prometheus.io/scrape: "true"
creationTimestamp: "2025-11-02T23:26:02Z"
generateName: cilium-operator-678d7868c8-
labels:
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
pod-template-hash: 678d7868c8
name: cilium-operator-678d7868c8-2rn65
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: cilium-operator-678d7868c8
uid: 54d5cbf9-63e4-45f6-a6be-22fcac89aae5
resourceVersion: "2119150"
uid: 3f425f4e-da91-4e2d-9a54-5cb54c30ae6a
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
io.cilium/app: operator
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- args:
- --config-dir=/tmp/cilium/config-map
- --debug=$(CILIUM_DEBUG)
command:
- cilium-operator-generic
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium-operator-generic:1.17.1-ck2
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
host: 127.0.0.1
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
name: cilium-operator
ports:
- containerPort: 9963
hostPort: 9963
name: prometheus
protocol: TCP
readinessProbe:
failureThreshold: 5
httpGet:
host: 127.0.0.1
path: /healthz
port: 9234
scheme: HTTP
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /tmp/cilium/config-map
name: cilium-config-path
readOnly: true
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-22lnl
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
hostNetwork: true
nodeName: k8s-node1
nodeSelector:
kubernetes.io/os: linux
preemptionPolicy: PreemptLowerPriority
priority: 2000000000
priorityClassName: system-cluster-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: cilium-operator
serviceAccountName: cilium-operator
terminationGracePeriodSeconds: 30
tolerations:
- operator: Exists
volumes:
- configMap:
defaultMode: 420
name: cilium-config
name: cilium-config-path
- name: kube-api-access-22lnl
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:55:21Z"
status: "True"
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:26:13Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:56:03Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:56:03Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:26:13Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: containerd://6db5adddca627179cf41860c5883ff27df56c1608c7211a0ad0225100e58b197
image: ghcr.io/canonical/cilium-operator-generic:1.17.1-ck2
imageID: ghcr.io/canonical/cilium-operator-generic@sha256:d584588048b6ae94c1810eaa5ebc9073e3063a019477a03bf3ec23445998e6e5
lastState:
terminated:
containerID: containerd://55605c9d5753491b368ed971b3d0c971df68106377ee060a595df41f7b6de940
exitCode: 255
finishedAt: "2025-11-11T23:54:41Z"
message: |
tus tree" module=health error="provider is stopped, no more updates will take place"
time=2025-11-11T21:50:34Z level=info msg="Stop hook executed" module=operator.operator-controlplane function=*job.group.Stop duration=84.292589ms
time=2025-11-11T21:50:34Z level=info msg="Stop hook executed" module=operator.operator-controlplane function=*resource.resource[*github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2.CiliumNode].Stop duration=832.704µs
time=2025-11-11T21:50:34Z level=info msg="Stop hook executed" module=operator.operator-controlplane function=*resource.resource[*github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1.CiliumBGPPeerConfig].Stop duration=269.997µs
time=2025-11-11T21:50:34Z level=info msg="Stop hook executed" module=operator.operator-controlplane function=*resource.resource[*github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1.CiliumBGPNodeConfig].Stop duration=12.698µs
time=2025-11-11T21:50:34Z level=info msg="Stop hook executed" module=operator.operator-controlplane function=*resource.resource[*github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1.CiliumBGPNodeConfigOverride].Stop duration=33.624µs
time=2025-11-11T21:50:34Z level=info msg="Stop hook executed" module=operator.operator-controlplane function=*resource.resource[*github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1.CiliumBGPClusterConfig].Stop duration=13.212µs
time=2025-11-11T21:50:34Z level=info msg="Stop hook executed" module=operator.operator-controlplane function=*job.group.Stop duration=193.719µs
time=2025-11-11T21:50:34Z level=info msg="Stop hook executed" module=operator.operator-controlplane function=*resource.resource[*github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1.Service].Stop duration=864.496µs
time=2025-11-11T21:50:34Z level=error msg="failed to delete reporter status tree" module=health error="provider is stopped, no more updates will take place"
time=2025-11-11T21:50:34Z level=info msg="Stop hook executed" module=operator.operator-controlplane function=*job.group.Stop duration=121.359µs
reason: Unknown
startedAt: "2025-11-10T20:50:42Z"
name: cilium-operator
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:55:20Z"
volumeMounts:
- mountPath: /tmp/cilium/config-map
name: cilium-config-path
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-22lnl
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.50.11
hostIPs:
- ip: 192.168.50.11
- ip: 2404:4400:4181:9200:5054:ff:fe3f:dcd9
phase: Running
podIP: 192.168.50.11
podIPs:
- ip: 192.168.50.11
- ip: 2404:4400:4181:9200:5054:ff:fe3f:dcd9
qosClass: BestEffort
startTime: "2025-11-02T23:26:13Z"
- apiVersion: v1
kind: Pod
metadata:
annotations:
container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined
container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined
container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined
container.apparmor.security.beta.kubernetes.io/config: unconfined
container.apparmor.security.beta.kubernetes.io/install-cni-binaries: unconfined
container.apparmor.security.beta.kubernetes.io/mount-bpf-fs: unconfined
container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined
kubectl.kubernetes.io/restartedAt: "2025-11-02T23:26:02Z"
creationTimestamp: "2025-11-04T02:41:48Z"
generateName: cilium-
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
controller-revision-hash: 957c85bbd
k8s-app: cilium
pod-template-generation: "2"
name: cilium-t8dkh
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: DaemonSet
name: cilium
uid: 0f5033cb-a382-446b-8b7d-ead01e3cdc64
resourceVersion: "2119825"
uid: 35b69926-c49a-4e61-b1bd-a1137f36dddd
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchFields:
- key: metadata.name
operator: In
values:
- k8s-node4
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- args:
- --config-dir=/tmp/cilium/config-map
command:
- cilium-agent
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: GOMEMLIMIT
valueFrom:
resourceFieldRef:
divisor: "1"
resource: limits.memory
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- bash
- -c
- |
set -o errexit
set -o pipefail
set -o nounset
# When running in AWS ENI mode, it's likely that 'aws-node' has
# had a chance to install SNAT iptables rules. These can result
# in dropped traffic, so we should attempt to remove them.
# We do it using a 'postStart' hook since this may need to run
# for nodes which might have already been init'ed but may still
# have dangling rules. This is safe because there are no
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
fi
echo 'Done!'
preStop:
exec:
command:
- /cni-uninstall.sh
livenessProbe:
failureThreshold: 10
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
readinessProbe:
failureThreshold: 3
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
capabilities:
add:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
startupProbe:
failureThreshold: 105
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 1
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/proc/sys/net
name: host-proc-sys-net
- mountPath: /host/proc/sys/kernel
name: host-proc-sys-kernel
- mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/cilium/netns
mountPropagation: HostToContainer
name: cilium-netns
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /var/lib/cilium/tls/hubble
name: hubble-tls
readOnly: true
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-5nqv2
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
hostNetwork: true
initContainers:
- command:
- cilium-dbg
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: config
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-5nqv2
readOnly: true
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: mount-cgroup
resources: {}
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-5nqv2
readOnly: true
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
env:
- name: BIN_PATH
value: /opt/cni/bin
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: apply-sysctl-overwrites
resources: {}
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-5nqv2
readOnly: true
- args:
- mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
command:
- /bin/bash
- -c
- --
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: mount-bpf-fs
resources: {}
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
name: bpf-maps
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-5nqv2
readOnly: true
- command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-state
name: cilium-config
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
- name: WRITE_CNI_CONF_WHEN_READY
valueFrom:
configMapKeyRef:
key: write-cni-conf-when-ready
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-5nqv2
readOnly: true
- command:
- /install-plugin.sh
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-5nqv2
readOnly: true
nodeName: k8s-node4
nodeSelector:
kubernetes.io/os: linux
preemptionPolicy: PreemptLowerPriority
priority: 2000001000
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
appArmorProfile:
type: Unconfined
serviceAccount: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
tolerations:
- operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/disk-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/memory-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/pid-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/unschedulable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/network-unavailable
operator: Exists
volumes:
- emptyDir: {}
name: tmp
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
name: cilium-run
- hostPath:
path: /var/run/netns
type: DirectoryOrCreate
name: cilium-netns
- hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
- hostPath:
path: /proc
type: Directory
name: hostproc
- hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
name: cilium-cgroup
- hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
name: cni-path
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
name: etc-cni-netd
- hostPath:
path: /lib/modules
type: ""
name: lib-modules
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock
- name: clustermesh-secrets
projected:
defaultMode: 256
sources:
- secret:
name: cilium-clustermesh
optional: true
- secret:
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
name: clustermesh-apiserver-remote-cert
optional: true
- secret:
items:
- key: tls.key
path: local-etcd-client.key
- key: tls.crt
path: local-etcd-client.crt
- key: ca.crt
path: local-etcd-client-ca.crt
name: clustermesh-apiserver-local-cert
optional: true
- hostPath:
path: /proc/sys/net
type: Directory
name: host-proc-sys-net
- hostPath:
path: /proc/sys/kernel
type: Directory
name: host-proc-sys-kernel
- name: hubble-tls
projected:
defaultMode: 256
sources:
- secret:
items:
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
- key: ca.crt
path: client-ca.crt
name: hubble-server-certs
optional: true
- name: kube-api-access-5nqv2
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:56:38Z"
status: "True"
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: "2025-11-04T02:46:44Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:51Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:51Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2025-11-04T02:41:48Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: containerd://db2da04ca78515ca4473f942f4885f89208ea331316214f60ac902ecae09aac7
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState:
terminated:
containerID: containerd://2aa34d5ab71180e5aca2012ea875cdf8c0f3ace7fdbc9f1b2f72ca6260d4a6be
exitCode: 255
finishedAt: "2025-11-11T23:56:20Z"
message: |-
025-11-11T21:50:34Z level=info msg="agent.datapath.mtu.job-mtu-updater (rev=131015)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.node-address.job-node-address-update (rev=131022)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.orchestrator.job-reinitialize (rev=131013)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.sysctl.job-reconcile (rev=131028)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.sysctl.job-refresh (rev=131027)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.infra.k8s-synced-crdsync.job-sync-crds (rev=8)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.infra.metrics.job-collect (rev=131009)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.infra.metrics.timer-job-cleanup (rev=131008)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.infra.shell.job-listener (rev=131007)" module=health
time=2025-11-11T21:50:34Z level=info msg="health.job-module-status-metrics (rev=130990)" module=health
time="2025-11-11T21:50:34.487300853Z" level=info msg="Removed endpoint" ciliumEndpointName=/ containerID= containerInterface= datapathPolicyRevision=1 desiredPolicyRevision=1 endpointID=79 identity=4 ipv4=10.1.3.141 ipv6= k8sPodName=/ subsys=endpoint
time="2025-11-11T21:50:34.489107118Z" level=info msg="Stopping fswatcher" config=tls-server subsys=hubble
time="2025-11-11T21:50:34.494710404Z" level=info msg="Datapath signal listener exiting" subsys=signal
time="2025-11-11T21:50:34.494805615Z" level=info msg="Datapath signal listener done" subsys=signal
time="2025-11-11T21:50:34.494858625Z" level=info msg="Signal handler closed. Stopping conntrack garbage collector" subsys=ct-na
reason: Unknown
startedAt: "2025-11-10T20:51:38Z"
name: cilium-agent
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:20Z"
volumeMounts:
- mountPath: /host/proc/sys/net
name: host-proc-sys-net
- mountPath: /host/proc/sys/kernel
name: host-proc-sys-kernel
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/cilium/netns
name: cilium-netns
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /lib/modules
name: lib-modules
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /var/lib/cilium/tls/hubble
name: hubble-tls
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-5nqv2
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.50.14
hostIPs:
- ip: 192.168.50.14
- ip: 2404:4400:4181:9200:5054:ff:fe3a:84fd
initContainerStatuses:
- containerID: containerd://750bf14f07ee1c2b4c21ceb9ad7bbdf3e51ec8eb171c01507d38a1873f2a8974
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: config
ready: true
restartCount: 3
started: false
state:
terminated:
containerID: containerd://750bf14f07ee1c2b4c21ceb9ad7bbdf3e51ec8eb171c01507d38a1873f2a8974
exitCode: 0
finishedAt: "2025-11-11T23:57:06Z"
reason: Completed
startedAt: "2025-11-11T23:56:37Z"
volumeMounts:
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-5nqv2
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://f7d02d586ca180f2642fdd4a66945f427f88886eb50b452aa4b8d6d6ff4e0d25
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: mount-cgroup
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://f7d02d586ca180f2642fdd4a66945f427f88886eb50b452aa4b8d6d6ff4e0d25
exitCode: 0
finishedAt: "2025-11-11T23:57:09Z"
reason: Completed
startedAt: "2025-11-11T23:57:07Z"
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-5nqv2
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://7b86ca82da7e3efc052da212a996c083e4bfd34c8ad44379df41c45240df328f
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: apply-sysctl-overwrites
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://7b86ca82da7e3efc052da212a996c083e4bfd34c8ad44379df41c45240df328f
exitCode: 0
finishedAt: "2025-11-11T23:57:10Z"
reason: Completed
startedAt: "2025-11-11T23:57:10Z"
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-5nqv2
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://d90c6c06ceb7ad7f24cb92973121ac0e287abbb7355127da87ce64bb62087953
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: mount-bpf-fs
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://d90c6c06ceb7ad7f24cb92973121ac0e287abbb7355127da87ce64bb62087953
exitCode: 0
finishedAt: "2025-11-11T23:57:12Z"
reason: Completed
startedAt: "2025-11-11T23:57:11Z"
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-5nqv2
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://1543205d77323dff8ef2d4fd85927d1945d84f9bb5ee3eb363376bb2409bb8d5
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: clean-cilium-state
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://1543205d77323dff8ef2d4fd85927d1945d84f9bb5ee3eb363376bb2409bb8d5
exitCode: 0
finishedAt: "2025-11-11T23:57:13Z"
reason: Completed
startedAt: "2025-11-11T23:57:13Z"
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-5nqv2
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://40a5f66cb3f8d9015aac388cadc0bd97a852dd0ce87a2389cf96650d427a2bcb
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: install-cni-binaries
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://40a5f66cb3f8d9015aac388cadc0bd97a852dd0ce87a2389cf96650d427a2bcb
exitCode: 0
finishedAt: "2025-11-11T23:57:19Z"
reason: Completed
startedAt: "2025-11-11T23:57:14Z"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-5nqv2
readOnly: true
recursiveReadOnly: Disabled
phase: Running
podIP: 192.168.50.14
podIPs:
- ip: 192.168.50.14
- ip: 2404:4400:4181:9200:5054:ff:fe3a:84fd
qosClass: Burstable
startTime: "2025-11-04T02:41:49Z"
- apiVersion: v1
kind: Pod
metadata:
annotations:
container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined
container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined
container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined
container.apparmor.security.beta.kubernetes.io/config: unconfined
container.apparmor.security.beta.kubernetes.io/install-cni-binaries: unconfined
container.apparmor.security.beta.kubernetes.io/mount-bpf-fs: unconfined
container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined
kubectl.kubernetes.io/restartedAt: "2025-11-02T23:26:02Z"
creationTimestamp: "2025-11-02T23:41:26Z"
generateName: cilium-
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
controller-revision-hash: 957c85bbd
k8s-app: cilium
pod-template-generation: "2"
name: cilium-tkb27
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: DaemonSet
name: cilium
uid: 0f5033cb-a382-446b-8b7d-ead01e3cdc64
resourceVersion: "2119638"
uid: 2617c977-03c5-43d3-8852-fdc002bf7b62
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchFields:
- key: metadata.name
operator: In
values:
- k8s-node2
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- args:
- --config-dir=/tmp/cilium/config-map
command:
- cilium-agent
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: GOMEMLIMIT
valueFrom:
resourceFieldRef:
divisor: "1"
resource: limits.memory
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- bash
- -c
- |
set -o errexit
set -o pipefail
set -o nounset
# When running in AWS ENI mode, it's likely that 'aws-node' has
# had a chance to install SNAT iptables rules. These can result
# in dropped traffic, so we should attempt to remove them.
# We do it using a 'postStart' hook since this may need to run
# for nodes which might have already been init'ed but may still
# have dangling rules. This is safe because there are no
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
fi
echo 'Done!'
preStop:
exec:
command:
- /cni-uninstall.sh
livenessProbe:
failureThreshold: 10
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
readinessProbe:
failureThreshold: 3
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
capabilities:
add:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
startupProbe:
failureThreshold: 105
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 1
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/proc/sys/net
name: host-proc-sys-net
- mountPath: /host/proc/sys/kernel
name: host-proc-sys-kernel
- mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/cilium/netns
mountPropagation: HostToContainer
name: cilium-netns
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /var/lib/cilium/tls/hubble
name: hubble-tls
readOnly: true
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-hqj7c
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
hostNetwork: true
initContainers:
- command:
- cilium-dbg
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: config
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-hqj7c
readOnly: true
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: mount-cgroup
resources: {}
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-hqj7c
readOnly: true
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
env:
- name: BIN_PATH
value: /opt/cni/bin
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: apply-sysctl-overwrites
resources: {}
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-hqj7c
readOnly: true
- args:
- mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
command:
- /bin/bash
- -c
- --
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: mount-bpf-fs
resources: {}
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
name: bpf-maps
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-hqj7c
readOnly: true
- command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-state
name: cilium-config
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
- name: WRITE_CNI_CONF_WHEN_READY
valueFrom:
configMapKeyRef:
key: write-cni-conf-when-ready
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-hqj7c
readOnly: true
- command:
- /install-plugin.sh
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-hqj7c
readOnly: true
nodeName: k8s-node2
nodeSelector:
kubernetes.io/os: linux
preemptionPolicy: PreemptLowerPriority
priority: 2000001000
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
appArmorProfile:
type: Unconfined
serviceAccount: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
tolerations:
- operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/disk-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/memory-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/pid-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/unschedulable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/network-unavailable
operator: Exists
volumes:
- emptyDir: {}
name: tmp
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
name: cilium-run
- hostPath:
path: /var/run/netns
type: DirectoryOrCreate
name: cilium-netns
- hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
- hostPath:
path: /proc
type: Directory
name: hostproc
- hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
name: cilium-cgroup
- hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
name: cni-path
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
name: etc-cni-netd
- hostPath:
path: /lib/modules
type: ""
name: lib-modules
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock
- name: clustermesh-secrets
projected:
defaultMode: 256
sources:
- secret:
name: cilium-clustermesh
optional: true
- secret:
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
name: clustermesh-apiserver-remote-cert
optional: true
- secret:
items:
- key: tls.key
path: local-etcd-client.key
- key: tls.crt
path: local-etcd-client.crt
- key: ca.crt
path: local-etcd-client-ca.crt
name: clustermesh-apiserver-local-cert
optional: true
- hostPath:
path: /proc/sys/net
type: Directory
name: host-proc-sys-net
- hostPath:
path: /proc/sys/kernel
type: Directory
name: host-proc-sys-kernel
- name: hubble-tls
projected:
defaultMode: 256
sources:
- secret:
items:
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
- key: ca.crt
path: client-ca.crt
name: hubble-server-certs
optional: true
- name: kube-api-access-hqj7c
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:56:10Z"
status: "True"
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:46:51Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:24Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:24Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:41:26Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: containerd://36b8463c78f62196e9106bb9bdc1a911e7b7a8c8ffd03b55b2283cdb5a861735
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState:
terminated:
containerID: containerd://7a7d32498b5f1fd1d4fb557a40b55dfcf9c3be7b8cd0936744a61baa0cca36e2
exitCode: 255
finishedAt: "2025-11-11T23:55:50Z"
message: |-
l=info msg="agent.controlplane.stale-endpoint-cleanup.job-endpoint-cleanup (rev=100)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.agent-liveness-updater.timer-job-agent-liveness-updater (rev=130756)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.iptables.ipset.job-ipset-init-finalizer (rev=29)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.iptables.ipset.job-reconcile (rev=130789)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.iptables.ipset.job-refresh (rev=130788)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.iptables.job-iptables-reconciliation-loop (rev=130783)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.l2-responder.job-l2-responder-reconciler (rev=130755)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.maps.bwmap.timer-job-pressure-metric-throttle (rev=130757)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.mtu.job-mtu-updater (rev=130779)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.node-address.job-node-address-update (rev=130786)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.orchestrator.job-reinitialize (rev=130777)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.sysctl.job-reconcile (rev=130792)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.datapath.sysctl.job-refresh (rev=130791)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.infra.k8s-synced-crdsync.job-sync-crds (rev=7)" module=health
time=2025-11-11T21:50:34Z level=info msg="agent.infra.metrics.job-collect (rev=130773)" module=health
time=2025-11-11T21:
reason: Unknown
startedAt: "2025-11-10T20:51:21Z"
name: cilium-agent
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:00Z"
volumeMounts:
- mountPath: /host/proc/sys/net
name: host-proc-sys-net
- mountPath: /host/proc/sys/kernel
name: host-proc-sys-kernel
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/cilium/netns
name: cilium-netns
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /lib/modules
name: lib-modules
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /var/lib/cilium/tls/hubble
name: hubble-tls
readOnly: true
recursiveReadOnly: Disabled
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-hqj7c
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.50.12
hostIPs:
- ip: 192.168.50.12
- ip: 2404:4400:4181:9200:5054:ff:fe26:38b3
initContainerStatuses:
- containerID: containerd://8fd59b3a5cd8c595b3b805788b169fc71b7e8b57e4a43003c49ef9c951fc6e6c
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: config
ready: true
restartCount: 3
started: false
state:
terminated:
containerID: containerd://8fd59b3a5cd8c595b3b805788b169fc71b7e8b57e4a43003c49ef9c951fc6e6c
exitCode: 0
finishedAt: "2025-11-11T23:56:45Z"
reason: Completed
startedAt: "2025-11-11T23:56:09Z"
volumeMounts:
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-hqj7c
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://c1b42a1573966c8bee69384710b533863d646bd4e1844d4ec4aa5cda00e91286
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: mount-cgroup
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://c1b42a1573966c8bee69384710b533863d646bd4e1844d4ec4aa5cda00e91286
exitCode: 0
finishedAt: "2025-11-11T23:56:47Z"
reason: Completed
startedAt: "2025-11-11T23:56:46Z"
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-hqj7c
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://6266ef449fbac4bbf8a1634e814c1088495cf10936bdc80ccfcd665e0d8293bc
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: apply-sysctl-overwrites
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://6266ef449fbac4bbf8a1634e814c1088495cf10936bdc80ccfcd665e0d8293bc
exitCode: 0
finishedAt: "2025-11-11T23:56:48Z"
reason: Completed
startedAt: "2025-11-11T23:56:48Z"
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-hqj7c
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://7d4794ee63fd693e8afa68665cabab085655bab126603bb95c8cf2795b290d9c
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: mount-bpf-fs
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://7d4794ee63fd693e8afa68665cabab085655bab126603bb95c8cf2795b290d9c
exitCode: 0
finishedAt: "2025-11-11T23:56:50Z"
reason: Completed
startedAt: "2025-11-11T23:56:49Z"
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-hqj7c
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://80f89ffea7743e7a8195b6aacb8b2251ab99f44ddfdf4ea57870a87208b36ae1
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: clean-cilium-state
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://80f89ffea7743e7a8195b6aacb8b2251ab99f44ddfdf4ea57870a87208b36ae1
exitCode: 0
finishedAt: "2025-11-11T23:56:51Z"
reason: Completed
startedAt: "2025-11-11T23:56:51Z"
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-hqj7c
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://fcae7494936109a46a6d61ed8435171920929dafafbc6c77c305557f8647a006
image: ghcr.io/canonical/cilium:1.17.1-ck2
imageID: ghcr.io/canonical/cilium@sha256:5291fb13d024f015d48b84bb09089032e3ff9deedbe1444a389c9a4f00adc9e3
lastState: {}
name: install-cni-binaries
ready: true
restartCount: 0
started: false
state:
terminated:
containerID: containerd://fcae7494936109a46a6d61ed8435171920929dafafbc6c77c305557f8647a006
exitCode: 0
finishedAt: "2025-11-11T23:56:59Z"
reason: Completed
startedAt: "2025-11-11T23:56:52Z"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-hqj7c
readOnly: true
recursiveReadOnly: Disabled
phase: Running
podIP: 192.168.50.12
podIPs:
- ip: 192.168.50.12
- ip: 2404:4400:4181:9200:5054:ff:fe26:38b3
qosClass: Burstable
startTime: "2025-11-02T23:41:26Z"
- apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2025-11-02T23:25:53Z"
generateName: ck-storage-rawfile-csi-controller-
labels:
app.kubernetes.io/instance: ck-storage
app.kubernetes.io/name: rawfile-csi
apps.kubernetes.io/pod-index: "0"
component: controller
controller-revision-hash: ck-storage-rawfile-csi-controller-6ddc646dbb
statefulset.kubernetes.io/pod-name: ck-storage-rawfile-csi-controller-0
name: ck-storage-rawfile-csi-controller-0
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: StatefulSet
name: ck-storage-rawfile-csi-controller
uid: 80bd7171-7f66-4fac-b489-836a98e06d72
resourceVersion: "2119698"
uid: c18a3ca0-3b8d-4963-8db7-238605d0d717
spec:
containers:
- args:
- --args
- rawfile
- csi-driver
- --disable-metrics
env:
- name: PROVISIONER_NAME
value: rawfile.csi.openebs.io
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: IMAGE_REPOSITORY
value: ghcr.io/canonical/rawfile-localpv
image: ghcr.io/canonical/rawfile-localpv:0.8.2-ck1
imagePullPolicy: IfNotPresent
name: csi-driver
ports:
- containerPort: 9808
name: csi-probe
protocol: TCP
resources:
limits:
cpu: "1"
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-mk8h9
readOnly: true
- args:
- --csi-address=$(ADDRESS)
- --handle-volume-inuse-error=false
env:
- name: ADDRESS
value: /csi/csi.sock
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-resizer:v1.11.2
imagePullPolicy: IfNotPresent
name: external-resizer
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-mk8h9
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
hostname: ck-storage-rawfile-csi-controller-0
nodeName: k8s-node1
preemptionPolicy: PreemptLowerPriority
priority: 2000000000
priorityClassName: system-cluster-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: ck-storage-rawfile-csi-driver
serviceAccountName: ck-storage-rawfile-csi-driver
subdomain: ck-storage-rawfile-csi
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Equal
value: "true"
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Equal
value: "true"
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- emptyDir: {}
name: socket-dir
- name: kube-api-access-mk8h9
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:29Z"
status: "True"
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:26:40Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:29Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:29Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:26:40Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: containerd://731a565272b6d3c9f875c4afa6f39494228b1ced1d24a0a755b7aec3bbb133b5
image: ghcr.io/canonical/rawfile-localpv:0.8.2-ck1
imageID: ghcr.io/canonical/rawfile-localpv@sha256:f31db8b52a8399a80e892dea4edeead75ac138c83ba733dfa05b265e7c9ab02f
lastState:
terminated:
containerID: containerd://7c853c66349b22e7f88845aa87a899d9267c131ffe4d9ba051d3a07967543905
exitCode: 255
finishedAt: "2025-11-11T23:54:42Z"
reason: Unknown
startedAt: "2025-11-10T20:52:26Z"
name: csi-driver
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:28Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-mk8h9
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://5d90fd45eb0a010ffd92beaaa6f4f6ea539386e2f969b3e05ee57f0d30c0f8a8
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-resizer:v1.11.2
imageID: ghcr.io/canonical/k8s-snap/sig-storage/csi-resizer@sha256:be6a7de1d43dba90710b61bd3d0d8f568654a6adadaeea9188cf4cd3554cbb87
lastState:
terminated:
containerID: containerd://0f457dd211e021b3c84af1a8aaaa7a49a523959c45eb088e7df9e35bca0a7532
exitCode: 255
finishedAt: "2025-11-11T23:54:40Z"
reason: Unknown
startedAt: "2025-11-10T20:52:27Z"
name: external-resizer
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:28Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-mk8h9
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.50.11
hostIPs:
- ip: 192.168.50.11
- ip: 2404:4400:4181:9200:5054:ff:fe3f:dcd9
phase: Running
podIP: 10.1.0.182
podIPs:
- ip: 10.1.0.182
qosClass: Burstable
startTime: "2025-11-02T23:26:40Z"
- apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2025-11-04T02:41:48Z"
generateName: ck-storage-rawfile-csi-node-
labels:
app.kubernetes.io/instance: ck-storage
app.kubernetes.io/name: rawfile-csi
component: node
controller-revision-hash: 5df7f564fd
pod-template-generation: "1"
name: ck-storage-rawfile-csi-node-dwfr2
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: DaemonSet
name: ck-storage-rawfile-csi-node
uid: 4c2066cc-c4f6-46de-add3-6bb4e3184995
resourceVersion: "2119936"
uid: eefc14d6-b0ea-47aa-a337-a072507f8390
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchFields:
- key: metadata.name
operator: In
values:
- k8s-node4
containers:
- env:
- name: PROVISIONER_NAME
value: rawfile.csi.openebs.io
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: IMAGE_REPOSITORY
value: ghcr.io/canonical/rawfile-localpv
- name: NODE_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: ghcr.io/canonical/rawfile-localpv:0.8.2-ck1
imagePullPolicy: IfNotPresent
name: csi-driver
ports:
- containerPort: 9100
name: metrics
protocol: TCP
- containerPort: 9808
name: csi-probe
protocol: TCP
resources:
limits:
cpu: "1"
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet
mountPropagation: Bidirectional
name: mountpoint-dir
- mountPath: /data
name: data-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-26r9b
readOnly: true
- args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --health-port=9809
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/rawfile-csi/csi.sock
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar:v2.10.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: healthz
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: node-driver-registrar
ports:
- containerPort: 9809
name: healthz
protocol: TCP
resources:
limits:
cpu: 500m
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /registration
name: registration-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-26r9b
readOnly: true
- args:
- --csi-address=$(ADDRESS)
- --feature-gates=Topology=true
- --strict-topology
- --immediate-topology=false
- --timeout=120s
- --enable-capacity=true
- --capacity-ownerref-level=1
- --node-deployment=true
env:
- name: ADDRESS
value: /csi/csi.sock
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner:v5.0.2
imagePullPolicy: IfNotPresent
name: external-provisioner
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-26r9b
readOnly: true
- args:
- --csi-address=$(ADDRESS)
- --node-deployment=true
- --extra-create-metadata=true
env:
- name: ADDRESS
value: /csi/csi.sock
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter:v8.0.2
imagePullPolicy: IfNotPresent
name: external-snapshotter
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-26r9b
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: k8s-node4
preemptionPolicy: PreemptLowerPriority
priority: 2000001000
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: ck-storage-rawfile-csi-driver
serviceAccountName: ck-storage-rawfile-csi-driver
terminationGracePeriodSeconds: 30
tolerations:
- operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/disk-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/memory-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/pid-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/unschedulable
operator: Exists
volumes:
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
name: registration-dir
- hostPath:
path: /var/lib/kubelet/plugins/rawfile-csi
type: DirectoryOrCreate
name: socket-dir
- hostPath:
path: /var/lib/kubelet
type: DirectoryOrCreate
name: mountpoint-dir
- hostPath:
path: /var/snap/k8s/common/rawfile-storage
type: DirectoryOrCreate
name: data-dir
- name: kube-api-access-26r9b
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:58:11Z"
status: "True"
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: "2025-11-04T02:41:49Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:58:11Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:58:11Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2025-11-04T02:41:48Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: containerd://dcf4a0d556a5330fff422ec5ca95900b090e93a5465cc079b245cf522201e008
image: ghcr.io/canonical/rawfile-localpv:0.8.2-ck1
imageID: ghcr.io/canonical/rawfile-localpv@sha256:f31db8b52a8399a80e892dea4edeead75ac138c83ba733dfa05b265e7c9ab02f
lastState:
terminated:
containerID: containerd://6f38a60340dec104473aedb9de53ec18e0d86fe982b3d5463c68e9a6f8503206
exitCode: 255
finishedAt: "2025-11-11T23:56:21Z"
reason: Unknown
startedAt: "2025-11-10T20:52:31Z"
name: csi-driver
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:58:09Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet
name: mountpoint-dir
- mountPath: /data
name: data-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-26r9b
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://94cd336f81ba0dfc931ada1cab0510f0c1baba53dc993bfcd3131dc9d77c69b8
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner:v5.0.2
imageID: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner@sha256:7b9cdb5830d01bda96111b4f138dbddcc01eed2f95aa980a404c45a042d60a10
lastState:
terminated:
containerID: containerd://1dd107616588d7aa1f225815df9df1f9464fab4cd93554dc05591649b8048733
exitCode: 255
finishedAt: "2025-11-11T23:56:19Z"
reason: Unknown
startedAt: "2025-11-10T20:52:33Z"
name: external-provisioner
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:58:10Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-26r9b
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://d069cdd97e091286ae9ac5465497b1176775008f7da94989dab4ed8b62d2acc0
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter:v8.0.2
imageID: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter@sha256:5f051159c95fd13b0b518436cb20808862d2f7f95f45e036da4242f3416befe4
lastState:
terminated:
containerID: containerd://57e072e1b589deb8e390b289fd12987aa6047a367197551333866639acfe9ff1
exitCode: 255
finishedAt: "2025-11-11T23:56:21Z"
reason: Unknown
startedAt: "2025-11-10T20:52:33Z"
name: external-snapshotter
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:58:10Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-26r9b
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://1a0deac2cdb8469d4bc78644a497bc0882e83465b380c6490f3764c8db8fae05
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar:v2.10.1
imageID: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar@sha256:f25af73ee708ff9c82595ae99493cdef9295bd96953366cddf36305f82555dac
lastState:
terminated:
containerID: containerd://4932d2a68fa6e420d903e58d38bd146d62e0ac67f6996f32e393699fe53eb89a
exitCode: 255
finishedAt: "2025-11-11T23:56:20Z"
reason: Unknown
startedAt: "2025-11-10T20:52:32Z"
name: node-driver-registrar
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:58:10Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /registration
name: registration-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-26r9b
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.50.14
hostIPs:
- ip: 192.168.50.14
- ip: 2404:4400:4181:9200:5054:ff:fe3a:84fd
phase: Running
podIP: 10.1.3.126
podIPs:
- ip: 10.1.3.126
qosClass: Burstable
startTime: "2025-11-04T02:41:49Z"
- apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2025-11-04T02:46:15Z"
generateName: ck-storage-rawfile-csi-node-
labels:
app.kubernetes.io/instance: ck-storage
app.kubernetes.io/name: rawfile-csi
component: node
controller-revision-hash: 5df7f564fd
pod-template-generation: "1"
name: ck-storage-rawfile-csi-node-hgmmc
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: DaemonSet
name: ck-storage-rawfile-csi-node
uid: 4c2066cc-c4f6-46de-add3-6bb4e3184995
resourceVersion: "2119882"
uid: 733eb6e2-6846-4cea-b64a-fa0118f9e0d8
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchFields:
- key: metadata.name
operator: In
values:
- k8s-node5
containers:
- env:
- name: PROVISIONER_NAME
value: rawfile.csi.openebs.io
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: IMAGE_REPOSITORY
value: ghcr.io/canonical/rawfile-localpv
- name: NODE_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: ghcr.io/canonical/rawfile-localpv:0.8.2-ck1
imagePullPolicy: IfNotPresent
name: csi-driver
ports:
- containerPort: 9100
name: metrics
protocol: TCP
- containerPort: 9808
name: csi-probe
protocol: TCP
resources:
limits:
cpu: "1"
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet
mountPropagation: Bidirectional
name: mountpoint-dir
- mountPath: /data
name: data-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-f46jr
readOnly: true
- args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --health-port=9809
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/rawfile-csi/csi.sock
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar:v2.10.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: healthz
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: node-driver-registrar
ports:
- containerPort: 9809
name: healthz
protocol: TCP
resources:
limits:
cpu: 500m
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /registration
name: registration-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-f46jr
readOnly: true
- args:
- --csi-address=$(ADDRESS)
- --feature-gates=Topology=true
- --strict-topology
- --immediate-topology=false
- --timeout=120s
- --enable-capacity=true
- --capacity-ownerref-level=1
- --node-deployment=true
env:
- name: ADDRESS
value: /csi/csi.sock
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner:v5.0.2
imagePullPolicy: IfNotPresent
name: external-provisioner
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-f46jr
readOnly: true
- args:
- --csi-address=$(ADDRESS)
- --node-deployment=true
- --extra-create-metadata=true
env:
- name: ADDRESS
value: /csi/csi.sock
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter:v8.0.2
imagePullPolicy: IfNotPresent
name: external-snapshotter
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-f46jr
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: k8s-node5
preemptionPolicy: PreemptLowerPriority
priority: 2000001000
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: ck-storage-rawfile-csi-driver
serviceAccountName: ck-storage-rawfile-csi-driver
terminationGracePeriodSeconds: 30
tolerations:
- operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/disk-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/memory-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/pid-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/unschedulable
operator: Exists
volumes:
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
name: registration-dir
- hostPath:
path: /var/lib/kubelet/plugins/rawfile-csi
type: DirectoryOrCreate
name: socket-dir
- hostPath:
path: /var/lib/kubelet
type: DirectoryOrCreate
name: mountpoint-dir
- hostPath:
path: /var/snap/k8s/common/rawfile-storage
type: DirectoryOrCreate
name: data-dir
- name: kube-api-access-f46jr
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:58:00Z"
status: "True"
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: "2025-11-04T02:46:16Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:58:00Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:58:00Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2025-11-04T02:46:15Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: containerd://c092afad77e20e70637b3ca56b8d12070b7495d6a60bd1c163943c52d860e7a5
image: ghcr.io/canonical/rawfile-localpv:0.8.2-ck1
imageID: ghcr.io/canonical/rawfile-localpv@sha256:f31db8b52a8399a80e892dea4edeead75ac138c83ba733dfa05b265e7c9ab02f
lastState:
terminated:
containerID: containerd://f7497da39e149c16d30fa47bd0cb6582acdf5fc448847ff1a029d852b1616d41
exitCode: 255
finishedAt: "2025-11-11T23:56:06Z"
reason: Unknown
startedAt: "2025-11-10T23:40:23Z"
name: csi-driver
ready: true
restartCount: 4
started: true
state:
running:
startedAt: "2025-11-11T23:57:59Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet
name: mountpoint-dir
- mountPath: /data
name: data-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-f46jr
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://9b91ffc3fab25f8c475c1117d886f261cc087bc7b295bbd2c6a90fb57398dee4
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner:v5.0.2
imageID: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner@sha256:7b9cdb5830d01bda96111b4f138dbddcc01eed2f95aa980a404c45a042d60a10
lastState:
terminated:
containerID: containerd://bbc38125e8faf0f9ab6af96b52ff029c34181815ea38210d310c9eeb5010490f
exitCode: 255
finishedAt: "2025-11-11T23:56:06Z"
reason: Unknown
startedAt: "2025-11-10T23:40:23Z"
name: external-provisioner
ready: true
restartCount: 4
started: true
state:
running:
startedAt: "2025-11-11T23:57:59Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-f46jr
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://f0a0c0d7d8e5105be61d1f955b62319d33d5bdad5e01ba0c9c2b1f19aad171cd
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter:v8.0.2
imageID: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter@sha256:5f051159c95fd13b0b518436cb20808862d2f7f95f45e036da4242f3416befe4
lastState:
terminated:
containerID: containerd://6f7828caf9b3d6678cc73668c56405b41041da6f7dd339cc5a539cf01bbe2436
exitCode: 255
finishedAt: "2025-11-11T23:56:05Z"
reason: Unknown
startedAt: "2025-11-10T23:40:23Z"
name: external-snapshotter
ready: true
restartCount: 4
started: true
state:
running:
startedAt: "2025-11-11T23:58:00Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-f46jr
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://fd888368ddbaf9ba3f95ef55ae558054b351d77855077942fd681fcba82f475d
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar:v2.10.1
imageID: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar@sha256:f25af73ee708ff9c82595ae99493cdef9295bd96953366cddf36305f82555dac
lastState:
terminated:
containerID: containerd://7e096fdf9a2d9222d39c10d30bb38307229efa28f0ac53731891d8a1847ab8a9
exitCode: 255
finishedAt: "2025-11-11T23:56:05Z"
reason: Unknown
startedAt: "2025-11-10T23:40:23Z"
name: node-driver-registrar
ready: true
restartCount: 4
started: true
state:
running:
startedAt: "2025-11-11T23:57:59Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /registration
name: registration-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-f46jr
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.50.15
hostIPs:
- ip: 192.168.50.15
- ip: 2404:4400:4181:9200:5054:ff:fe9a:4a4d
phase: Running
podIP: 10.1.4.226
podIPs:
- ip: 10.1.4.226
qosClass: Burstable
startTime: "2025-11-04T02:46:16Z"
- apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2025-11-02T23:42:16Z"
generateName: ck-storage-rawfile-csi-node-
labels:
app.kubernetes.io/instance: ck-storage
app.kubernetes.io/name: rawfile-csi
component: node
controller-revision-hash: 5df7f564fd
pod-template-generation: "1"
name: ck-storage-rawfile-csi-node-q5x5n
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: DaemonSet
name: ck-storage-rawfile-csi-node
uid: 4c2066cc-c4f6-46de-add3-6bb4e3184995
resourceVersion: "2119847"
uid: eecd3fd8-2a54-470b-8651-aeb98645a6d4
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchFields:
- key: metadata.name
operator: In
values:
- k8s-node3
containers:
- env:
- name: PROVISIONER_NAME
value: rawfile.csi.openebs.io
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: IMAGE_REPOSITORY
value: ghcr.io/canonical/rawfile-localpv
- name: NODE_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: ghcr.io/canonical/rawfile-localpv:0.8.2-ck1
imagePullPolicy: IfNotPresent
name: csi-driver
ports:
- containerPort: 9100
name: metrics
protocol: TCP
- containerPort: 9808
name: csi-probe
protocol: TCP
resources:
limits:
cpu: "1"
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet
mountPropagation: Bidirectional
name: mountpoint-dir
- mountPath: /data
name: data-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-82cjb
readOnly: true
- args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --health-port=9809
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/rawfile-csi/csi.sock
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar:v2.10.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: healthz
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: node-driver-registrar
ports:
- containerPort: 9809
name: healthz
protocol: TCP
resources:
limits:
cpu: 500m
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /registration
name: registration-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-82cjb
readOnly: true
- args:
- --csi-address=$(ADDRESS)
- --feature-gates=Topology=true
- --strict-topology
- --immediate-topology=false
- --timeout=120s
- --enable-capacity=true
- --capacity-ownerref-level=1
- --node-deployment=true
env:
- name: ADDRESS
value: /csi/csi.sock
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner:v5.0.2
imagePullPolicy: IfNotPresent
name: external-provisioner
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-82cjb
readOnly: true
- args:
- --csi-address=$(ADDRESS)
- --node-deployment=true
- --extra-create-metadata=true
env:
- name: ADDRESS
value: /csi/csi.sock
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter:v8.0.2
imagePullPolicy: IfNotPresent
name: external-snapshotter
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-82cjb
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: k8s-node3
preemptionPolicy: PreemptLowerPriority
priority: 2000001000
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: ck-storage-rawfile-csi-driver
serviceAccountName: ck-storage-rawfile-csi-driver
terminationGracePeriodSeconds: 30
tolerations:
- operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/disk-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/memory-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/pid-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/unschedulable
operator: Exists
volumes:
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
name: registration-dir
- hostPath:
path: /var/lib/kubelet/plugins/rawfile-csi
type: DirectoryOrCreate
name: socket-dir
- hostPath:
path: /var/lib/kubelet
type: DirectoryOrCreate
name: mountpoint-dir
- hostPath:
path: /var/snap/k8s/common/rawfile-storage
type: DirectoryOrCreate
name: data-dir
- name: kube-api-access-82cjb
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:53Z"
status: "True"
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:42:17Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:53Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:53Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:42:16Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: containerd://60f90eb21267906242031ba6cd5143b73dcfb4abe23da45f550b0dec6c06c55b
image: ghcr.io/canonical/rawfile-localpv:0.8.2-ck1
imageID: ghcr.io/canonical/rawfile-localpv@sha256:f31db8b52a8399a80e892dea4edeead75ac138c83ba733dfa05b265e7c9ab02f
lastState:
terminated:
containerID: containerd://a506401c559a9723fe0766de0eb7d2c479510fa93b8735a2610f3d0180d9318a
exitCode: 255
finishedAt: "2025-11-11T23:56:16Z"
reason: Unknown
startedAt: "2025-11-10T20:52:09Z"
name: csi-driver
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:51Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet
name: mountpoint-dir
- mountPath: /data
name: data-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-82cjb
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://f00972b688221e237144c3c061b6018409808fc6e399e9e3c74a6ffafe7574f4
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner:v5.0.2
imageID: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner@sha256:7b9cdb5830d01bda96111b4f138dbddcc01eed2f95aa980a404c45a042d60a10
lastState:
terminated:
containerID: containerd://f25150ad92915b2093cfec7b13a6804ab5532c7a8cdf7397ac822beae6f5512d
exitCode: 255
finishedAt: "2025-11-11T23:56:17Z"
reason: Unknown
startedAt: "2025-11-10T20:52:10Z"
name: external-provisioner
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:52Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-82cjb
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://a43e6eeadc56336091f22ab573a0e93dda2a2d82f6d26d707fb9f84309f08cc6
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter:v8.0.2
imageID: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter@sha256:5f051159c95fd13b0b518436cb20808862d2f7f95f45e036da4242f3416befe4
lastState:
terminated:
containerID: containerd://586011d9d37aef716957d9bb3ae9f392e4e5ffbefac32945c7e0d2520c39711d
exitCode: 255
finishedAt: "2025-11-11T23:56:16Z"
reason: Unknown
startedAt: "2025-11-10T20:52:11Z"
name: external-snapshotter
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:52Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-82cjb
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://6bc9a3374cc2c54d19492804160a97aeb1cbae2ad04d6849ed94dc6710c556a0
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar:v2.10.1
imageID: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar@sha256:f25af73ee708ff9c82595ae99493cdef9295bd96953366cddf36305f82555dac
lastState:
terminated:
containerID: containerd://28b8697d30b7e802b735af9ed792489c575b4e1e09c133a8e09754c07a44f0f3
exitCode: 255
finishedAt: "2025-11-11T23:56:14Z"
reason: Unknown
startedAt: "2025-11-10T20:52:10Z"
name: node-driver-registrar
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:52Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /registration
name: registration-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-82cjb
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.50.13
hostIPs:
- ip: 192.168.50.13
- ip: 2404:4400:4181:9200:5054:ff:fe2a:4db1
phase: Running
podIP: 10.1.1.209
podIPs:
- ip: 10.1.1.209
qosClass: Burstable
startTime: "2025-11-02T23:42:17Z"
- apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2025-11-02T23:25:53Z"
generateName: ck-storage-rawfile-csi-node-
labels:
app.kubernetes.io/instance: ck-storage
app.kubernetes.io/name: rawfile-csi
component: node
controller-revision-hash: 5df7f564fd
pod-template-generation: "1"
name: ck-storage-rawfile-csi-node-sthbh
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: DaemonSet
name: ck-storage-rawfile-csi-node
uid: 4c2066cc-c4f6-46de-add3-6bb4e3184995
resourceVersion: "2119710"
uid: ba253f45-ad78-481f-8777-f22b7577acb5
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchFields:
- key: metadata.name
operator: In
values:
- k8s-node1
containers:
- env:
- name: PROVISIONER_NAME
value: rawfile.csi.openebs.io
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: IMAGE_REPOSITORY
value: ghcr.io/canonical/rawfile-localpv
- name: NODE_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: ghcr.io/canonical/rawfile-localpv:0.8.2-ck1
imagePullPolicy: IfNotPresent
name: csi-driver
ports:
- containerPort: 9100
name: metrics
protocol: TCP
- containerPort: 9808
name: csi-probe
protocol: TCP
resources:
limits:
cpu: "1"
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet
mountPropagation: Bidirectional
name: mountpoint-dir
- mountPath: /data
name: data-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qfdmv
readOnly: true
- args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --health-port=9809
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/rawfile-csi/csi.sock
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar:v2.10.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: healthz
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: node-driver-registrar
ports:
- containerPort: 9809
name: healthz
protocol: TCP
resources:
limits:
cpu: 500m
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /registration
name: registration-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qfdmv
readOnly: true
- args:
- --csi-address=$(ADDRESS)
- --feature-gates=Topology=true
- --strict-topology
- --immediate-topology=false
- --timeout=120s
- --enable-capacity=true
- --capacity-ownerref-level=1
- --node-deployment=true
env:
- name: ADDRESS
value: /csi/csi.sock
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner:v5.0.2
imagePullPolicy: IfNotPresent
name: external-provisioner
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qfdmv
readOnly: true
- args:
- --csi-address=$(ADDRESS)
- --node-deployment=true
- --extra-create-metadata=true
env:
- name: ADDRESS
value: /csi/csi.sock
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter:v8.0.2
imagePullPolicy: IfNotPresent
name: external-snapshotter
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qfdmv
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: k8s-node1
preemptionPolicy: PreemptLowerPriority
priority: 2000001000
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: ck-storage-rawfile-csi-driver
serviceAccountName: ck-storage-rawfile-csi-driver
terminationGracePeriodSeconds: 30
tolerations:
- operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/disk-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/memory-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/pid-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/unschedulable
operator: Exists
volumes:
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
name: registration-dir
- hostPath:
path: /var/lib/kubelet/plugins/rawfile-csi
type: DirectoryOrCreate
name: socket-dir
- hostPath:
path: /var/lib/kubelet
type: DirectoryOrCreate
name: mountpoint-dir
- hostPath:
path: /var/snap/k8s/common/rawfile-storage
type: DirectoryOrCreate
name: data-dir
- name: kube-api-access-qfdmv
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:30Z"
status: "True"
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:25:53Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:30Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:30Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:25:53Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: containerd://7697f0796298e083d617fa6249e49c8eb51236bb6ec2eec1f450ebf4ee308936
image: ghcr.io/canonical/rawfile-localpv:0.8.2-ck1
imageID: ghcr.io/canonical/rawfile-localpv@sha256:f31db8b52a8399a80e892dea4edeead75ac138c83ba733dfa05b265e7c9ab02f
lastState:
terminated:
containerID: containerd://2eeb2c021a97284b92aafde0c5c0112712db4d5dee6fa9aa91eca060cc20e800
exitCode: 255
finishedAt: "2025-11-11T23:54:40Z"
reason: Unknown
startedAt: "2025-11-10T20:52:26Z"
name: csi-driver
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:28Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet
name: mountpoint-dir
- mountPath: /data
name: data-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qfdmv
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://f77bc021c6cc78280e3e876a92bd272b371efb9fd7204220ec7cec309ed06230
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner:v5.0.2
imageID: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner@sha256:7b9cdb5830d01bda96111b4f138dbddcc01eed2f95aa980a404c45a042d60a10
lastState:
terminated:
containerID: containerd://0ffbf5eee2f00da9324e04e4240b2188c2f0101a146073602f1a840bf8fafcbf
exitCode: 255
finishedAt: "2025-11-11T23:54:40Z"
reason: Unknown
startedAt: "2025-11-10T20:52:27Z"
name: external-provisioner
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:28Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qfdmv
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://995951fdd4e62fa447e95294a578b6569caf81ef062cf1aaf38dc28d31204412
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter:v8.0.2
imageID: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter@sha256:5f051159c95fd13b0b518436cb20808862d2f7f95f45e036da4242f3416befe4
lastState:
terminated:
containerID: containerd://e41f0ca9e78ed82fb937f6e66a96ba08942490e204e7bed1d4598265e4150aee
exitCode: 255
finishedAt: "2025-11-11T23:54:42Z"
reason: Unknown
startedAt: "2025-11-10T20:52:28Z"
name: external-snapshotter
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:29Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qfdmv
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://1de14485d5de30e8453800858bd6012aaeb4c04b17e2ac4623614016615f94e6
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar:v2.10.1
imageID: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar@sha256:f25af73ee708ff9c82595ae99493cdef9295bd96953366cddf36305f82555dac
lastState:
terminated:
containerID: containerd://1e4f9ad75c3d75f8aba4a0e008f607ceeaa27acfd4dee2c133f0552b2df3ed56
exitCode: 255
finishedAt: "2025-11-11T23:54:40Z"
reason: Unknown
startedAt: "2025-11-10T20:52:27Z"
name: node-driver-registrar
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:28Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /registration
name: registration-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-qfdmv
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.50.11
hostIPs:
- ip: 192.168.50.11
- ip: 2404:4400:4181:9200:5054:ff:fe3f:dcd9
phase: Running
podIP: 10.1.0.137
podIPs:
- ip: 10.1.0.137
qosClass: Burstable
startTime: "2025-11-02T23:25:53Z"
- apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2025-11-02T23:41:26Z"
generateName: ck-storage-rawfile-csi-node-
labels:
app.kubernetes.io/instance: ck-storage
app.kubernetes.io/name: rawfile-csi
component: node
controller-revision-hash: 5df7f564fd
pod-template-generation: "1"
name: ck-storage-rawfile-csi-node-vs45x
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: DaemonSet
name: ck-storage-rawfile-csi-node
uid: 4c2066cc-c4f6-46de-add3-6bb4e3184995
resourceVersion: "2119785"
uid: def26a78-bfc9-4196-9d8c-5cb3bb0744bc
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchFields:
- key: metadata.name
operator: In
values:
- k8s-node2
containers:
- env:
- name: PROVISIONER_NAME
value: rawfile.csi.openebs.io
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: IMAGE_REPOSITORY
value: ghcr.io/canonical/rawfile-localpv
- name: NODE_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: ghcr.io/canonical/rawfile-localpv:0.8.2-ck1
imagePullPolicy: IfNotPresent
name: csi-driver
ports:
- containerPort: 9100
name: metrics
protocol: TCP
- containerPort: 9808
name: csi-probe
protocol: TCP
resources:
limits:
cpu: "1"
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet
mountPropagation: Bidirectional
name: mountpoint-dir
- mountPath: /data
name: data-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-4hzkm
readOnly: true
- args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --health-port=9809
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/rawfile-csi/csi.sock
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar:v2.10.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: healthz
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: node-driver-registrar
ports:
- containerPort: 9809
name: healthz
protocol: TCP
resources:
limits:
cpu: 500m
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /registration
name: registration-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-4hzkm
readOnly: true
- args:
- --csi-address=$(ADDRESS)
- --feature-gates=Topology=true
- --strict-topology
- --immediate-topology=false
- --timeout=120s
- --enable-capacity=true
- --capacity-ownerref-level=1
- --node-deployment=true
env:
- name: ADDRESS
value: /csi/csi.sock
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner:v5.0.2
imagePullPolicy: IfNotPresent
name: external-provisioner
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-4hzkm
readOnly: true
- args:
- --csi-address=$(ADDRESS)
- --node-deployment=true
- --extra-create-metadata=true
env:
- name: ADDRESS
value: /csi/csi.sock
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter:v8.0.2
imagePullPolicy: IfNotPresent
name: external-snapshotter
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-4hzkm
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: k8s-node2
preemptionPolicy: PreemptLowerPriority
priority: 2000001000
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: ck-storage-rawfile-csi-driver
serviceAccountName: ck-storage-rawfile-csi-driver
terminationGracePeriodSeconds: 30
tolerations:
- operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/disk-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/memory-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/pid-pressure
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/unschedulable
operator: Exists
volumes:
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
name: registration-dir
- hostPath:
path: /var/lib/kubelet/plugins/rawfile-csi
type: DirectoryOrCreate
name: socket-dir
- hostPath:
path: /var/lib/kubelet
type: DirectoryOrCreate
name: mountpoint-dir
- hostPath:
path: /var/snap/k8s/common/rawfile-storage
type: DirectoryOrCreate
name: data-dir
- name: kube-api-access-4hzkm
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:44Z"
status: "True"
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:41:26Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:44Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:44Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:41:26Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: containerd://65002b32f9e1397cb3054fdf9f4a7d3b96b1fc0bbb9d1e780983c3bfdcb1dc98
image: ghcr.io/canonical/rawfile-localpv:0.8.2-ck1
imageID: ghcr.io/canonical/rawfile-localpv@sha256:f31db8b52a8399a80e892dea4edeead75ac138c83ba733dfa05b265e7c9ab02f
lastState:
terminated:
containerID: containerd://1b69db907ed37a0d8f147105f18cfdb4ee31797bce5c6efe7d097b35ae68f433
exitCode: 255
finishedAt: "2025-11-11T23:55:48Z"
reason: Unknown
startedAt: "2025-11-10T20:52:06Z"
name: csi-driver
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:42Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet
name: mountpoint-dir
- mountPath: /data
name: data-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-4hzkm
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://257908701aaeecaceb187207eb0533cfd2e386f62bfdb27509dcfefb627455c4
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner:v5.0.2
imageID: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner@sha256:7b9cdb5830d01bda96111b4f138dbddcc01eed2f95aa980a404c45a042d60a10
lastState:
terminated:
containerID: containerd://dc6e331f167ed86022799006976ff2b5e52795e7faa35205bffcafb90c1d8c49
exitCode: 255
finishedAt: "2025-11-11T23:55:53Z"
reason: Unknown
startedAt: "2025-11-10T20:52:07Z"
name: external-provisioner
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:43Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-4hzkm
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://cdec1c79f57c8afae44628b7d572390dcac3bdde4e8138a5eb8e90592527a2da
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter:v8.0.2
imageID: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter@sha256:5f051159c95fd13b0b518436cb20808862d2f7f95f45e036da4242f3416befe4
lastState:
terminated:
containerID: containerd://a185be7c6156db8aaad69e06ae72e5ac065db53382fdb2b64581157ec993eb71
exitCode: 255
finishedAt: "2025-11-11T23:55:51Z"
reason: Unknown
startedAt: "2025-11-10T20:52:08Z"
name: external-snapshotter
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:44Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-4hzkm
readOnly: true
recursiveReadOnly: Disabled
- containerID: containerd://23baa53a6de97dc5148518224f30a84403442f01d289a4a3bec2a1b746efc3e4
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar:v2.10.1
imageID: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar@sha256:f25af73ee708ff9c82595ae99493cdef9295bd96953366cddf36305f82555dac
lastState:
terminated:
containerID: containerd://c08528ed8c29f3b9aaf2f8595c0c92046e3cca7c5eba3ac10043bc0214a16296
exitCode: 255
finishedAt: "2025-11-11T23:55:52Z"
reason: Unknown
startedAt: "2025-11-10T20:52:07Z"
name: node-driver-registrar
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:43Z"
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /registration
name: registration-dir
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-4hzkm
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.50.12
hostIPs:
- ip: 192.168.50.12
- ip: 2404:4400:4181:9200:5054:ff:fe26:38b3
phase: Running
podIP: 10.1.2.211
podIPs:
- ip: 10.1.2.211
qosClass: Burstable
startTime: "2025-11-02T23:41:26Z"
- apiVersion: v1
kind: Pod
metadata:
annotations:
checksum/config: fc4c9b56cf9b744229b59b8dd4ae4d31d7bb5b8a21d2589279812d3ddb6e384f
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
creationTimestamp: "2025-11-02T23:25:53Z"
generateName: coredns-fc9c778db-
labels:
app.kubernetes.io/instance: ck-dns
app.kubernetes.io/name: coredns
k8s-app: coredns
pod-template-hash: fc9c778db
name: coredns-fc9c778db-h9jfc
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: coredns-fc9c778db
uid: 257997c2-8382-443c-81ee-393aabecc552
resourceVersion: "2119886"
uid: 483da0a2-e6fd-4073-bcec-90b0ae16b405
spec:
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: ghcr.io/canonical/coredns:1.12.0-ck1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: udp-53
protocol: UDP
- containerPort: 53
name: tcp-53
protocol: TCP
- containerPort: 9153
name: tcp-9153
protocol: TCP
readinessProbe:
failureThreshold: 1
httpGet:
path: /ready
port: 8181
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
securityContext:
allowPrivilegeEscalation: true
capabilities:
add:
- NET_BIND_SERVICE
readOnlyRootFilesystem: false
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-p9bd7
readOnly: true
dnsPolicy: Default
enableServiceLinks: true
nodeName: k8s-node1
preemptionPolicy: PreemptLowerPriority
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: coredns
serviceAccountName: coredns
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- configMap:
defaultMode: 420
items:
- key: Corefile
path: Corefile
name: ck-dns-coredns
name: config-volume
- name: kube-api-access-p9bd7
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:29Z"
status: "True"
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:26:40Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:58:01Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:58:01Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:26:40Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: containerd://60d3e5bf55b2b24e7b4c3659181b31c7cb74f7b47d3fb5e94de93dceb4ee8446
image: ghcr.io/canonical/coredns:1.12.0-ck1
imageID: ghcr.io/canonical/coredns@sha256:2a580516c873cfbe2b32682ab3613adaf380769f9020d7e11e30055dba8a993a
lastState:
terminated:
containerID: containerd://990c8eb9da243f60490603730964a6142d1f55b711016197a1618cb9c6c2753d
exitCode: 255
finishedAt: "2025-11-11T23:54:42Z"
reason: Unknown
startedAt: "2025-11-10T20:52:27Z"
name: coredns
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:28Z"
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-p9bd7
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.50.11
hostIPs:
- ip: 192.168.50.11
- ip: 2404:4400:4181:9200:5054:ff:fe3f:dcd9
phase: Running
podIP: 10.1.0.223
podIPs:
- ip: 10.1.0.223
qosClass: Guaranteed
startTime: "2025-11-02T23:26:40Z"
- apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2025-11-02T23:25:53Z"
generateName: metrics-server-8694c96fb7-
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/name: metrics-server
pod-template-hash: 8694c96fb7
name: metrics-server-8694c96fb7-rsdhj
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: metrics-server-8694c96fb7
uid: d07039d9-15e5-41cf-af16-2d4e1122b29a
resourceVersion: "2119808"
uid: 99ddd704-8ce8-40d5-8b81-66bad99c391d
spec:
containers:
- args:
- --secure-port=10250
- --cert-dir=/tmp
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
image: ghcr.io/canonical/metrics-server:0.7.2-ck0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: metrics-server
ports:
- containerPort: 10250
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-5gnb6
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: k8s-node1
preemptionPolicy: PreemptLowerPriority
priority: 2000000000
priorityClassName: system-cluster-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: metrics-server
serviceAccountName: metrics-server
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- emptyDir: {}
name: tmp
- name: kube-api-access-5gnb6
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:29Z"
status: "True"
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:26:40Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:50Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2025-11-11T23:57:50Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2025-11-02T23:26:40Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: containerd://5d9414633648316d9db5eb61ef868297098941eed4bc3b6d73e194297341e1fe
image: ghcr.io/canonical/metrics-server:0.7.2-ck0
imageID: ghcr.io/canonical/metrics-server@sha256:2b94444cf67479f2fe77e353f64d04aab98a222c057cd40b2000aff9a2fb1682
lastState:
terminated:
containerID: containerd://31f1de01bb292bf14d6ba18405383c4271ef0ae60b4fa4df91e0eb8950c8d9ea
exitCode: 255
finishedAt: "2025-11-11T23:54:41Z"
reason: Unknown
startedAt: "2025-11-10T20:52:26Z"
name: metrics-server
ready: true
restartCount: 3
started: true
state:
running:
startedAt: "2025-11-11T23:57:28Z"
volumeMounts:
- mountPath: /tmp
name: tmp
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-5gnb6
readOnly: true
recursiveReadOnly: Disabled
hostIP: 192.168.50.11
hostIPs:
- ip: 192.168.50.11
- ip: 2404:4400:4181:9200:5054:ff:fe3f:dcd9
phase: Running
podIP: 10.1.0.74
podIPs:
- ip: 10.1.0.74
qosClass: Burstable
startTime: "2025-11-02T23:26:40Z"
- apiVersion: v1
kind: Service
metadata:
creationTimestamp: "2025-11-10T05:51:26Z"
labels:
component: apiserver
provider: kubernetes
name: kubernetes
namespace: default
resourceVersion: "1720322"
uid: 30330406-18a8-4dcb-9f40-cd5e6ab4aac1
spec:
clusterIP: 10.152.183.1
clusterIPs:
- 10.152.183.1
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: https
port: 443
protocol: TCP
targetPort: 6443
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
- apiVersion: v1
kind: Service
metadata:
annotations:
meta.helm.sh/release-name: ck-storage
meta.helm.sh/release-namespace: kube-system
creationTimestamp: "2025-11-02T23:25:49Z"
labels:
app.kubernetes.io/instance: ck-storage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: rawfile-csi
app.kubernetes.io/version: 0.8.2
component: controller
helm.sh/chart: rawfile-csi-0.9.1
name: ck-storage-rawfile-csi-controller
namespace: kube-system
resourceVersion: "309"
uid: d9016323-802b-404c-94e3-01192745d966
spec:
clusterIP: None
clusterIPs:
- None
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
selector:
app.kubernetes.io/instance: ck-storage
app.kubernetes.io/name: rawfile-csi
component: controller
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
- apiVersion: v1
kind: Service
metadata:
annotations:
meta.helm.sh/release-name: ck-storage
meta.helm.sh/release-namespace: kube-system
creationTimestamp: "2025-11-02T23:25:49Z"
labels:
app.kubernetes.io/instance: ck-storage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: rawfile-csi
app.kubernetes.io/version: 0.8.2
component: node
helm.sh/chart: rawfile-csi-0.9.1
name: ck-storage-rawfile-csi-node
namespace: kube-system
resourceVersion: "312"
uid: 5e525e2d-7f3d-476a-9a37-45fec8af017e
spec:
clusterIP: 10.152.183.72
clusterIPs:
- 10.152.183.72
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: metrics
port: 9100
protocol: TCP
targetPort: metrics
selector:
app.kubernetes.io/instance: ck-storage
app.kubernetes.io/name: rawfile-csi
component: node
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
- apiVersion: v1
kind: Service
metadata:
annotations:
meta.helm.sh/release-name: ck-dns
meta.helm.sh/release-namespace: kube-system
creationTimestamp: "2025-11-02T23:25:48Z"
labels:
app.kubernetes.io/instance: ck-dns
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: coredns
helm.sh/chart: coredns-1.39.2
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: coredns
namespace: kube-system
resourceVersion: "241"
uid: 8c67b081-1fce-48a8-acdf-3036396877bd
spec:
clusterIP: 10.152.183.178
clusterIPs:
- 10.152.183.178
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: udp-53
port: 53
protocol: UDP
targetPort: 53
- name: tcp-53
port: 53
protocol: TCP
targetPort: 53
selector:
app.kubernetes.io/instance: ck-dns
app.kubernetes.io/name: coredns
k8s-app: coredns
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
- apiVersion: v1
kind: Service
metadata:
annotations:
meta.helm.sh/release-name: ck-network
meta.helm.sh/release-namespace: kube-system
creationTimestamp: "2025-11-02T23:25:54Z"
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: hubble-peer
app.kubernetes.io/part-of: cilium
k8s-app: cilium
name: hubble-peer
namespace: kube-system
resourceVersion: "431"
uid: b65f55a0-2b4e-45f5-9859-8fc96b0388f2
spec:
clusterIP: 10.152.183.76
clusterIPs:
- 10.152.183.76
internalTrafficPolicy: Local
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: peer-service
port: 443
protocol: TCP
targetPort: 4244
selector:
k8s-app: cilium
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
- apiVersion: v1
kind: Service
metadata:
annotations:
meta.helm.sh/release-name: metrics-server
meta.helm.sh/release-namespace: kube-system
creationTimestamp: "2025-11-02T23:25:48Z"
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: metrics-server
app.kubernetes.io/version: 0.7.2
helm.sh/chart: metrics-server-3.12.2
name: metrics-server
namespace: kube-system
resourceVersion: "272"
uid: 0ae8c5d5-90ea-49f6-8241-6d8b7a867696
spec:
clusterIP: 10.152.183.69
clusterIPs:
- 10.152.183.69
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- appProtocol: https
name: https
port: 443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/name: metrics-server
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
- apiVersion: apps/v1
kind: DaemonSet
metadata:
annotations:
deprecated.daemonset.template.generation: "2"
meta.helm.sh/release-name: ck-network
meta.helm.sh/release-namespace: kube-system
creationTimestamp: "2025-11-02T23:25:54Z"
generation: 2
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
name: cilium
namespace: kube-system
resourceVersion: "2119829"
uid: 0f5033cb-a382-446b-8b7d-ead01e3cdc64
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: cilium
template:
metadata:
annotations:
kubectl.kubernetes.io/restartedAt: "2025-11-02T23:26:02Z"
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- args:
- --config-dir=/tmp/cilium/config-map
command:
- cilium-agent
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: GOMEMLIMIT
valueFrom:
resourceFieldRef:
divisor: "1"
resource: limits.memory
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- bash
- -c
- |
set -o errexit
set -o pipefail
set -o nounset
# When running in AWS ENI mode, it's likely that 'aws-node' has
# had a chance to install SNAT iptables rules. These can result
# in dropped traffic, so we should attempt to remove them.
# We do it using a 'postStart' hook since this may need to run
# for nodes which might have already been init'ed but may still
# have dangling rules. This is safe because there are no
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
fi
echo 'Done!'
preStop:
exec:
command:
- /cni-uninstall.sh
livenessProbe:
failureThreshold: 10
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
name: cilium-agent
readinessProbe:
failureThreshold: 3
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
capabilities:
add:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
startupProbe:
failureThreshold: 105
httpGet:
host: 127.0.0.1
httpHeaders:
- name: brief
value: "true"
path: /healthz
port: 9879
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 1
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/proc/sys/net
name: host-proc-sys-net
- mountPath: /host/proc/sys/kernel
name: host-proc-sys-kernel
- mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
name: bpf-maps
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /var/run/cilium/netns
mountPropagation: HostToContainer
name: cilium-netns
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /var/lib/cilium/tls/hubble
name: hubble-tls
readOnly: true
- mountPath: /tmp
name: tmp
dnsPolicy: ClusterFirst
hostNetwork: true
initContainers:
- command:
- cilium-dbg
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: config
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /tmp
name: tmp
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: mount-cgroup
resources: {}
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
env:
- name: BIN_PATH
value: /opt/cni/bin
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: apply-sysctl-overwrites
resources: {}
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- args:
- mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
command:
- /bin/bash
- -c
- --
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: mount-bpf-fs
resources: {}
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
name: bpf-maps
- command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-state
name: cilium-config
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
key: clean-cilium-bpf-state
name: cilium-config
optional: true
- name: WRITE_CNI_CONF_WHEN_READY
valueFrom:
configMapKeyRef:
key: write-cni-conf-when-ready
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
- SYS_ADMIN
- SYS_RESOURCE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
mountPropagation: HostToContainer
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- command:
- /install-plugin.sh
image: ghcr.io/canonical/cilium:1.17.1-ck2
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
appArmorProfile:
type: Unconfined
serviceAccount: cilium
serviceAccountName: cilium
terminationGracePeriodSeconds: 1
tolerations:
- operator: Exists
volumes:
- emptyDir: {}
name: tmp
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
name: cilium-run
- hostPath:
path: /var/run/netns
type: DirectoryOrCreate
name: cilium-netns
- hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
- hostPath:
path: /proc
type: Directory
name: hostproc
- hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
name: cilium-cgroup
- hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
name: cni-path
- hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
name: etc-cni-netd
- hostPath:
path: /lib/modules
type: ""
name: lib-modules
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock
- name: clustermesh-secrets
projected:
defaultMode: 256
sources:
- secret:
name: cilium-clustermesh
optional: true
- secret:
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
name: clustermesh-apiserver-remote-cert
optional: true
- secret:
items:
- key: tls.key
path: local-etcd-client.key
- key: tls.crt
path: local-etcd-client.crt
- key: ca.crt
path: local-etcd-client-ca.crt
name: clustermesh-apiserver-local-cert
optional: true
- hostPath:
path: /proc/sys/net
type: Directory
name: host-proc-sys-net
- hostPath:
path: /proc/sys/kernel
type: Directory
name: host-proc-sys-kernel
- name: hubble-tls
projected:
defaultMode: 256
sources:
- secret:
items:
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
- key: ca.crt
path: client-ca.crt
name: hubble-server-certs
optional: true
updateStrategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 2
type: RollingUpdate
status:
currentNumberScheduled: 5
desiredNumberScheduled: 5
numberAvailable: 5
numberMisscheduled: 0
numberReady: 5
observedGeneration: 2
updatedNumberScheduled: 5
- apiVersion: apps/v1
kind: DaemonSet
metadata:
annotations:
deprecated.daemonset.template.generation: "1"
meta.helm.sh/release-name: ck-storage
meta.helm.sh/release-namespace: kube-system
creationTimestamp: "2025-11-02T23:25:49Z"
generation: 1
labels:
app.kubernetes.io/managed-by: Helm
name: ck-storage-rawfile-csi-node
namespace: kube-system
resourceVersion: "2119939"
uid: 4c2066cc-c4f6-46de-add3-6bb4e3184995
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: ck-storage
app.kubernetes.io/name: rawfile-csi
component: node
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/instance: ck-storage
app.kubernetes.io/name: rawfile-csi
component: node
spec:
containers:
- env:
- name: PROVISIONER_NAME
value: rawfile.csi.openebs.io
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: IMAGE_REPOSITORY
value: ghcr.io/canonical/rawfile-localpv
- name: NODE_ID
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: ghcr.io/canonical/rawfile-localpv:0.8.2-ck1
imagePullPolicy: IfNotPresent
name: csi-driver
ports:
- containerPort: 9100
name: metrics
protocol: TCP
- containerPort: 9808
name: csi-probe
protocol: TCP
resources:
limits:
cpu: "1"
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet
mountPropagation: Bidirectional
name: mountpoint-dir
- mountPath: /data
name: data-dir
- args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --health-port=9809
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/rawfile-csi/csi.sock
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar:v2.10.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: healthz
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: node-driver-registrar
ports:
- containerPort: 9809
name: healthz
protocol: TCP
resources:
limits:
cpu: 500m
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /registration
name: registration-dir
- args:
- --csi-address=$(ADDRESS)
- --feature-gates=Topology=true
- --strict-topology
- --immediate-topology=false
- --timeout=120s
- --enable-capacity=true
- --capacity-ownerref-level=1
- --node-deployment=true
env:
- name: ADDRESS
value: /csi/csi.sock
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner:v5.0.2
imagePullPolicy: IfNotPresent
name: external-provisioner
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- args:
- --csi-address=$(ADDRESS)
- --node-deployment=true
- --extra-create-metadata=true
env:
- name: ADDRESS
value: /csi/csi.sock
- name: NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter:v8.0.2
imagePullPolicy: IfNotPresent
name: external-snapshotter
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
dnsPolicy: ClusterFirst
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: ck-storage-rawfile-csi-driver
serviceAccountName: ck-storage-rawfile-csi-driver
terminationGracePeriodSeconds: 30
tolerations:
- operator: Exists
volumes:
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
name: registration-dir
- hostPath:
path: /var/lib/kubelet/plugins/rawfile-csi
type: DirectoryOrCreate
name: socket-dir
- hostPath:
path: /var/lib/kubelet
type: DirectoryOrCreate
name: mountpoint-dir
- hostPath:
path: /var/snap/k8s/common/rawfile-storage
type: DirectoryOrCreate
name: data-dir
updateStrategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 100%
type: RollingUpdate
status:
currentNumberScheduled: 5
desiredNumberScheduled: 5
numberAvailable: 5
numberMisscheduled: 0
numberReady: 5
observedGeneration: 1
updatedNumberScheduled: 5
- apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "2"
meta.helm.sh/release-name: ck-network
meta.helm.sh/release-namespace: kube-system
creationTimestamp: "2025-11-02T23:25:54Z"
generation: 2
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
name: cilium-operator
namespace: kube-system
resourceVersion: "2119152"
uid: efe28031-7016-437c-896d-7c183a96b85b
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 100%
type: RollingUpdate
template:
metadata:
annotations:
kubectl.kubernetes.io/restartedAt: "2025-11-02T23:26:02Z"
prometheus.io/port: "9963"
prometheus.io/scrape: "true"
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
io.cilium/app: operator
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- args:
- --config-dir=/tmp/cilium/config-map
- --debug=$(CILIUM_DEBUG)
command:
- cilium-operator-generic
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium-operator-generic:1.17.1-ck2
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
host: 127.0.0.1
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
name: cilium-operator
ports:
- containerPort: 9963
hostPort: 9963
name: prometheus
protocol: TCP
readinessProbe:
failureThreshold: 5
httpGet:
host: 127.0.0.1
path: /healthz
port: 9234
scheme: HTTP
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /tmp/cilium/config-map
name: cilium-config-path
readOnly: true
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: cilium-operator
serviceAccountName: cilium-operator
terminationGracePeriodSeconds: 30
tolerations:
- operator: Exists
volumes:
- configMap:
defaultMode: 420
name: cilium-config
name: cilium-config-path
status:
availableReplicas: 1
conditions:
- lastTransitionTime: "2025-11-02T23:25:55Z"
lastUpdateTime: "2025-11-02T23:25:55Z"
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: "True"
type: Available
- lastTransitionTime: "2025-11-02T23:25:54Z"
lastUpdateTime: "2025-11-02T23:26:14Z"
message: ReplicaSet "cilium-operator-678d7868c8" has successfully progressed.
reason: NewReplicaSetAvailable
status: "True"
type: Progressing
observedGeneration: 2
readyReplicas: 1
replicas: 1
updatedReplicas: 1
- apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
meta.helm.sh/release-name: ck-dns
meta.helm.sh/release-namespace: kube-system
creationTimestamp: "2025-11-02T23:25:48Z"
generation: 1
labels:
app.kubernetes.io/instance: ck-dns
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: coredns
app.kubernetes.io/version: 1.12.0-ck1
helm.sh/chart: coredns-1.39.2
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: coredns
namespace: kube-system
resourceVersion: "2119890"
uid: 38a62248-26ac-48f7-8c4d-adb99f162968
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: ck-dns
app.kubernetes.io/name: coredns
k8s-app: coredns
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: fc4c9b56cf9b744229b59b8dd4ae4d31d7bb5b8a21d2589279812d3ddb6e384f
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly",
"operator":"Exists"}]'
creationTimestamp: null
labels:
app.kubernetes.io/instance: ck-dns
app.kubernetes.io/name: coredns
k8s-app: coredns
spec:
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: ghcr.io/canonical/coredns:1.12.0-ck1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: udp-53
protocol: UDP
- containerPort: 53
name: tcp-53
protocol: TCP
- containerPort: 9153
name: tcp-9153
protocol: TCP
readinessProbe:
failureThreshold: 1
httpGet:
path: /ready
port: 8181
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
securityContext:
allowPrivilegeEscalation: true
capabilities:
add:
- NET_BIND_SERVICE
readOnlyRootFilesystem: false
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
dnsPolicy: Default
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: coredns
serviceAccountName: coredns
terminationGracePeriodSeconds: 30
volumes:
- configMap:
defaultMode: 420
items:
- key: Corefile
path: Corefile
name: ck-dns-coredns
name: config-volume
status:
availableReplicas: 1
conditions:
- lastTransitionTime: "2025-11-02T23:25:53Z"
lastUpdateTime: "2025-11-02T23:25:53Z"
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: "True"
type: Available
- lastTransitionTime: "2025-11-02T23:25:53Z"
lastUpdateTime: "2025-11-02T23:27:25Z"
message: ReplicaSet "coredns-fc9c778db" has successfully progressed.
reason: NewReplicaSetAvailable
status: "True"
type: Progressing
observedGeneration: 1
readyReplicas: 1
replicas: 1
updatedReplicas: 1
- apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
meta.helm.sh/release-name: metrics-server
meta.helm.sh/release-namespace: kube-system
creationTimestamp: "2025-11-02T23:25:49Z"
generation: 1
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: metrics-server
app.kubernetes.io/version: 0.7.2
helm.sh/chart: metrics-server-3.12.2
name: metrics-server
namespace: kube-system
resourceVersion: "2119812"
uid: 8bc2f677-b2a7-4729-85d1-0a8926b847a6
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/name: metrics-server
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/name: metrics-server
spec:
containers:
- args:
- --secure-port=10250
- --cert-dir=/tmp
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
image: ghcr.io/canonical/metrics-server:0.7.2-ck0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: metrics-server
ports:
- containerPort: 10250
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /tmp
name: tmp
dnsPolicy: ClusterFirst
priorityClassName: system-cluster-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: metrics-server
serviceAccountName: metrics-server
terminationGracePeriodSeconds: 30
volumes:
- emptyDir: {}
name: tmp
status:
availableReplicas: 1
conditions:
- lastTransitionTime: "2025-11-02T23:25:53Z"
lastUpdateTime: "2025-11-02T23:27:15Z"
message: ReplicaSet "metrics-server-8694c96fb7" has successfully progressed.
reason: NewReplicaSetAvailable
status: "True"
type: Progressing
- lastTransitionTime: "2025-11-11T23:57:50Z"
lastUpdateTime: "2025-11-11T23:57:50Z"
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: "True"
type: Available
observedGeneration: 1
readyReplicas: 1
replicas: 1
updatedReplicas: 1
- apiVersion: apps/v1
kind: ReplicaSet
metadata:
annotations:
deployment.kubernetes.io/desired-replicas: "1"
deployment.kubernetes.io/max-replicas: "2"
deployment.kubernetes.io/revision: "2"
meta.helm.sh/release-name: ck-network
meta.helm.sh/release-namespace: kube-system
creationTimestamp: "2025-11-02T23:26:02Z"
generation: 1
labels:
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
pod-template-hash: 678d7868c8
name: cilium-operator-678d7868c8
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: Deployment
name: cilium-operator
uid: efe28031-7016-437c-896d-7c183a96b85b
resourceVersion: "2119151"
uid: 54d5cbf9-63e4-45f6-a6be-22fcac89aae5
spec:
replicas: 1
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
pod-template-hash: 678d7868c8
template:
metadata:
annotations:
kubectl.kubernetes.io/restartedAt: "2025-11-02T23:26:02Z"
prometheus.io/port: "9963"
prometheus.io/scrape: "true"
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
pod-template-hash: 678d7868c8
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
io.cilium/app: operator
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- args:
- --config-dir=/tmp/cilium/config-map
- --debug=$(CILIUM_DEBUG)
command:
- cilium-operator-generic
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium-operator-generic:1.17.1-ck2
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
host: 127.0.0.1
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
name: cilium-operator
ports:
- containerPort: 9963
hostPort: 9963
name: prometheus
protocol: TCP
readinessProbe:
failureThreshold: 5
httpGet:
host: 127.0.0.1
path: /healthz
port: 9234
scheme: HTTP
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /tmp/cilium/config-map
name: cilium-config-path
readOnly: true
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: cilium-operator
serviceAccountName: cilium-operator
terminationGracePeriodSeconds: 30
tolerations:
- operator: Exists
volumes:
- configMap:
defaultMode: 420
name: cilium-config
name: cilium-config-path
status:
availableReplicas: 1
fullyLabeledReplicas: 1
observedGeneration: 1
readyReplicas: 1
replicas: 1
- apiVersion: apps/v1
kind: ReplicaSet
metadata:
annotations:
deployment.kubernetes.io/desired-replicas: "1"
deployment.kubernetes.io/max-replicas: "2"
deployment.kubernetes.io/revision: "1"
meta.helm.sh/release-name: ck-network
meta.helm.sh/release-namespace: kube-system
creationTimestamp: "2025-11-02T23:25:54Z"
generation: 2
labels:
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
pod-template-hash: c9487b6f6
name: cilium-operator-c9487b6f6
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: Deployment
name: cilium-operator
uid: efe28031-7016-437c-896d-7c183a96b85b
resourceVersion: "517"
uid: 366e504f-4280-4870-a59a-2268b2f0617e
spec:
replicas: 0
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
pod-template-hash: c9487b6f6
template:
metadata:
annotations:
prometheus.io/port: "9963"
prometheus.io/scrape: "true"
creationTimestamp: null
labels:
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
pod-template-hash: c9487b6f6
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
io.cilium/app: operator
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- args:
- --config-dir=/tmp/cilium/config-map
- --debug=$(CILIUM_DEBUG)
command:
- cilium-operator-generic
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: ghcr.io/canonical/cilium-operator-generic:1.17.1-ck2
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
host: 127.0.0.1
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
name: cilium-operator
ports:
- containerPort: 9963
hostPort: 9963
name: prometheus
protocol: TCP
readinessProbe:
failureThreshold: 5
httpGet:
host: 127.0.0.1
path: /healthz
port: 9234
scheme: HTTP
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 3
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /tmp/cilium/config-map
name: cilium-config-path
readOnly: true
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: cilium-operator
serviceAccountName: cilium-operator
terminationGracePeriodSeconds: 30
tolerations:
- operator: Exists
volumes:
- configMap:
defaultMode: 420
name: cilium-config
name: cilium-config-path
status:
observedGeneration: 2
replicas: 0
- apiVersion: apps/v1
kind: ReplicaSet
metadata:
annotations:
deployment.kubernetes.io/desired-replicas: "1"
deployment.kubernetes.io/max-replicas: "2"
deployment.kubernetes.io/revision: "1"
meta.helm.sh/release-name: ck-dns
meta.helm.sh/release-namespace: kube-system
creationTimestamp: "2025-11-02T23:25:53Z"
generation: 1
labels:
app.kubernetes.io/instance: ck-dns
app.kubernetes.io/name: coredns
k8s-app: coredns
pod-template-hash: fc9c778db
name: coredns-fc9c778db
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: Deployment
name: coredns
uid: 38a62248-26ac-48f7-8c4d-adb99f162968
resourceVersion: "2119889"
uid: 257997c2-8382-443c-81ee-393aabecc552
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: ck-dns
app.kubernetes.io/name: coredns
k8s-app: coredns
pod-template-hash: fc9c778db
template:
metadata:
annotations:
checksum/config: fc4c9b56cf9b744229b59b8dd4ae4d31d7bb5b8a21d2589279812d3ddb6e384f
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly",
"operator":"Exists"}]'
creationTimestamp: null
labels:
app.kubernetes.io/instance: ck-dns
app.kubernetes.io/name: coredns
k8s-app: coredns
pod-template-hash: fc9c778db
spec:
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: ghcr.io/canonical/coredns:1.12.0-ck1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: udp-53
protocol: UDP
- containerPort: 53
name: tcp-53
protocol: TCP
- containerPort: 9153
name: tcp-9153
protocol: TCP
readinessProbe:
failureThreshold: 1
httpGet:
path: /ready
port: 8181
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
securityContext:
allowPrivilegeEscalation: true
capabilities:
add:
- NET_BIND_SERVICE
readOnlyRootFilesystem: false
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
dnsPolicy: Default
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: coredns
serviceAccountName: coredns
terminationGracePeriodSeconds: 30
volumes:
- configMap:
defaultMode: 420
items:
- key: Corefile
path: Corefile
name: ck-dns-coredns
name: config-volume
status:
availableReplicas: 1
fullyLabeledReplicas: 1
observedGeneration: 1
readyReplicas: 1
replicas: 1
- apiVersion: apps/v1
kind: ReplicaSet
metadata:
annotations:
deployment.kubernetes.io/desired-replicas: "1"
deployment.kubernetes.io/max-replicas: "2"
deployment.kubernetes.io/revision: "1"
meta.helm.sh/release-name: metrics-server
meta.helm.sh/release-namespace: kube-system
creationTimestamp: "2025-11-02T23:25:53Z"
generation: 1
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/name: metrics-server
pod-template-hash: 8694c96fb7
name: metrics-server-8694c96fb7
namespace: kube-system
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: Deployment
name: metrics-server
uid: 8bc2f677-b2a7-4729-85d1-0a8926b847a6
resourceVersion: "2119810"
uid: d07039d9-15e5-41cf-af16-2d4e1122b29a
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/name: metrics-server
pod-template-hash: 8694c96fb7
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/instance: metrics-server
app.kubernetes.io/name: metrics-server
pod-template-hash: 8694c96fb7
spec:
containers:
- args:
- --secure-port=10250
- --cert-dir=/tmp
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
image: ghcr.io/canonical/metrics-server:0.7.2-ck0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: metrics-server
ports:
- containerPort: 10250
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /tmp
name: tmp
dnsPolicy: ClusterFirst
priorityClassName: system-cluster-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: metrics-server
serviceAccountName: metrics-server
terminationGracePeriodSeconds: 30
volumes:
- emptyDir: {}
name: tmp
status:
availableReplicas: 1
fullyLabeledReplicas: 1
observedGeneration: 1
readyReplicas: 1
replicas: 1
- apiVersion: apps/v1
kind: StatefulSet
metadata:
annotations:
meta.helm.sh/release-name: ck-storage
meta.helm.sh/release-namespace: kube-system
creationTimestamp: "2025-11-02T23:25:50Z"
generation: 1
labels:
app.kubernetes.io/managed-by: Helm
name: ck-storage-rawfile-csi-controller
namespace: kube-system
resourceVersion: "2119701"
uid: 80bd7171-7f66-4fac-b489-836a98e06d72
spec:
persistentVolumeClaimRetentionPolicy:
whenDeleted: Retain
whenScaled: Retain
podManagementPolicy: OrderedReady
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: ck-storage
app.kubernetes.io/name: rawfile-csi
component: controller
serviceName: ck-storage-rawfile-csi
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/instance: ck-storage
app.kubernetes.io/name: rawfile-csi
component: controller
spec:
containers:
- args:
- --args
- rawfile
- csi-driver
- --disable-metrics
env:
- name: PROVISIONER_NAME
value: rawfile.csi.openebs.io
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: IMAGE_REPOSITORY
value: ghcr.io/canonical/rawfile-localpv
image: ghcr.io/canonical/rawfile-localpv:0.8.2-ck1
imagePullPolicy: IfNotPresent
name: csi-driver
ports:
- containerPort: 9808
name: csi-probe
protocol: TCP
resources:
limits:
cpu: "1"
memory: 100Mi
requests:
cpu: 10m
memory: 100Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- args:
- --csi-address=$(ADDRESS)
- --handle-volume-inuse-error=false
env:
- name: ADDRESS
value: /csi/csi.sock
image: ghcr.io/canonical/k8s-snap/sig-storage/csi-resizer:v1.11.2
imagePullPolicy: IfNotPresent
name: external-resizer
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
dnsPolicy: ClusterFirst
priorityClassName: system-cluster-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: ck-storage-rawfile-csi-driver
serviceAccountName: ck-storage-rawfile-csi-driver
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Equal
value: "true"
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Equal
value: "true"
volumes:
- emptyDir: {}
name: socket-dir
updateStrategy:
rollingUpdate:
partition: 0
type: RollingUpdate
status:
availableReplicas: 1
collisionCount: 0
currentReplicas: 1
currentRevision: ck-storage-rawfile-csi-controller-6ddc646dbb
observedGeneration: 1
readyReplicas: 1
replicas: 1
updateRevision: ck-storage-rawfile-csi-controller-6ddc646dbb
updatedReplicas: 1
kind: List
metadata:
resourceVersion: ""