{"tests":[{"name":"cl.etcd-member.v2-backup-restore","result":"PASS","duration":43915926706,"output":"        cluster.go:125: 2026-04-23T23:49:12Z\tinfo\tetcdutl/backup_command.go:216\tignoring EntryConfChange raft entry\n        cluster.go:125: 2026-04-23T23:49:12Z\tinfo\tetcdutl/backup_command.go:231\tignoring member attribute update on\t{\"entry\": \"Term:2 Index:3 Data:\\\"\\\\010\\\\202\\\\210\\\\213\\\\372\\\\313\\\\267\\\\347\\\\333\\\\272\\\\001\\\\022\\\\003PUT\\\\032\u0026/0/members/a89776f56a6abab7/attributes\\\\\\\"Q{\\\\\\\"name\\\\\\\":\\\\\\\"d48ba14e65ef45d5b7f904766895c275\\\\\\\",\\\\\\\"clientURLs\\\\\\\":[\\\\\\\"http://10.0.0.4:2379\\\\\\\"]}(\\\\0002\\\\0008\\\\000H\\\\000P\\\\000X\\\\000`\\\\000h\\\\000p\\\\000x\\\\000\\\\200\\\\001\\\\000\\\" \", \"v2Req.Path\": \"/0/members/a89776f56a6abab7/attributes\"}\n        cluster.go:125: 2026-04-23T23:49:12Z\tinfo\tetcdutl/backup_command.go:252\tignoring v3 raft entry\n        cluster.go:125: 2026-04-23T23:49:12Z\tinfo\tmembership/store.go:119\tTrimming membership information from the backend...\n"},{"name":"cl.disk.raid0.data","result":"PASS","duration":63009356861,"output":""},{"name":"cl.ignition.oem.reuse","result":"PASS","duration":66594246376,"output":""},{"name":"cl.toolbox.dnf-install","result":"PASS","duration":48265575317,"output":""},{"name":"cl.disk.raid1.root","result":"PASS","duration":68160193683,"output":""},{"name":"cl.cloudinit.basic","result":"PASS","duration":47865283585,"output":""},{"name":"sysext.disable-docker","result":"PASS","duration":52321482159,"output":""},{"name":"coreos.ignition.once","result":"PASS","duration":87804656981,"output":""},{"name":"coreos.ignition.sethostname","result":"PASS","duration":41521404208,"output":""},{"name":"kubeadm.v1.35.1.cilium.base/node_readiness","result":"PASS","duration":25500469885,"output":""},{"name":"kubeadm.v1.35.1.cilium.base/nginx_deployment","result":"PASS","duration":18691715888,"output":""},{"name":"kubeadm.v1.35.1.cilium.base/NFS_deployment","result":"PASS","duration":24398742823,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"cl.ignition.oem.indirect","result":"PASS","duration":66546990825,"output":""},{"name":"kubeadm.v1.35.1.cilium.base/IPSec_encryption","result":"PASS","duration":29965010346,"output":"        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"kubeadm.v1.35.1.cilium.base","result":"PASS","duration":275336756209,"output":"        cluster.go:125: I0423 23:52:22.113513    2020 version.go:260] remote version is much newer: v1.36.0; falling back to: stable-1.35\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.13.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.6-0\n        cluster.go:125: I0423 23:52:42.923886    2256 version.go:260] remote version is much newer: v1.36.0; falling back to: stable-1.35\n        cluster.go:125: [init] Using Kubernetes version: v1.35.4\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local localhost] and IPs [10.96.0.1 10.0.0.11]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.014164479s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.11:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 2.537648206s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 3.704729681s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 7.505202594s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: h8njt4.jx0jprdhcixzms9d\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.11:6443 --token h8njt4.jx0jprdhcixzms9d \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:9b8bfaab8116e3b8e73157699dffa69aeaa1791fc43ac372a4419105b2a44a0f \n        cluster.go:125: i  Using Cilium version 1.12.5\n        cluster.go:125: ? Auto-detected cluster name: kubernetes\n        cluster.go:125: ? Auto-detected datapath mode: tunnel\n        cluster.go:125: ? Auto-detected kube-proxy has been installed\n        cluster.go:125: i  helm template --namespace kube-system cilium cilium/cilium --version 1.12.5 --set cluster.id=0,cluster.name=kubernetes,encryption.nodeEncryption=false,extraConfig.cluster-pool-ipv4-cidr=192.168.0.0/17,extraConfig.enable-endpoint-routes=true,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=vxlan\n        cluster.go:125: i  Storing helm values file in kube-system/cilium-cli-helm-values Secret\n        cluster.go:125: ? Created CA in secret cilium-ca\n        cluster.go:125: ? Generating certificates for Hubble...\n        cluster.go:125: ? Creating Service accounts...\n        cluster.go:125: ? Creating Cluster roles...\n        cluster.go:125: ? Creating ConfigMap for Cilium version 1.12.5...\n        cluster.go:125: i  Manual overwrite in ConfigMap: cluster-pool-ipv4-cidr=192.168.0.0/17\n        cluster.go:125: i  Manual overwrite in ConfigMap: enable-endpoint-routes=true\n        cluster.go:125: ? Creating Agent DaemonSet...\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: ? Creating Operator Deployment...\n        cluster.go:125: ? Waiting for Cilium to be installed and ready...\n        cluster.go:125: ? Cilium was successfully installed! Run 'cilium status' to view installation health\n        cluster.go:125: \u001b[33m    /??\\\n        cluster.go:125: \u001b[36m /??\u001b[33m\\__/\u001b[32m??\\\u001b[0m    Cilium:         \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[36m \\__\u001b[31m/??\\\u001b[32m__/\u001b[0m    Operator:       \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[32m /??\u001b[31m\\__/\u001b[35m??\\\u001b[0m    Hubble:         \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[32m \\__\u001b[34m/??\\\u001b[35m__/\u001b[0m    ClusterMesh:    \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[34m    \\__/\n        cluster.go:125: \u001b[0m\n        cluster.go:125: Deployment       cilium-operator    \n        cluster.go:125: DaemonSet        cilium             \n        cluster.go:125: Containers:      cilium             \n        cluster.go:125:                  cilium-operator    \n        cluster.go:125: Cluster Pods:    0/0 managed by Cilium\n        cluster.go:125: W0423 23:53:34.088432    1830 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.35.1.cilium.base/node_readiness (25.50s)\n    --- PASS: kubeadm.v1.35.1.cilium.base/nginx_deployment (18.69s)\n    --- PASS: kubeadm.v1.35.1.cilium.base/NFS_deployment (24.40s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n    --- PASS: kubeadm.v1.35.1.cilium.base/IPSec_encryption (29.97s)\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"coreos.selinux.enforce","result":"FAIL","duration":54157278906,"output":"        selinux.go:115: failed to reboot machine: machine \"99220e37-7c1c-40ce-8b16-1e10d40eb8f5\" failed basic checks: some systemd units failed:\n● ldconfig.service loaded failed failed Rebuild Dynamic Linker Cache\nstatus: \njournal:-- No entries --\n        harness.go:616: Found systemd unit failed to start (\u001b[0;1;39mldconfig.service\u001b[0m - Rebuild Dynamic Linker Cache.\r\r) on machine 99220e37-7c1c-40ce-8b16-1e10d40eb8f5 console\n"},{"name":"cl.ignition.luks","result":"PASS","duration":76477775928,"output":""},{"name":"kubeadm.v1.33.8.calico.base/node_readiness","result":"PASS","duration":37844657615,"output":""},{"name":"coreos.ignition.resource.remote","result":"PASS","duration":44382884327,"output":""},{"name":"kubeadm.v1.33.8.calico.base/nginx_deployment","result":"PASS","duration":11182232413,"output":""},{"name":"kubeadm.v1.33.8.calico.base/NFS_deployment","result":"PASS","duration":24564576936,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.calico.base","result":"PASS","duration":225478509207,"output":"        cluster.go:125: I0423 23:55:54.382977    2018 version.go:261] remote version is much newer: v1.36.0; falling back to: stable-1.33\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.0\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.5.24-0\n        cluster.go:125: I0423 23:56:10.084778    2237 version.go:261] remote version is much newer: v1.36.0; falling back to: stable-1.33\n        cluster.go:125: [init] Using Kubernetes version: v1.33.11\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local localhost] and IPs [10.96.0.1 10.0.0.18]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.001876314s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.18:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 1.110084746s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 1.860034524s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 4.003415415s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: k9p3nn.8mjwnp8g8bozidem\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.18:6443 --token k9p3nn.8mjwnp8g8bozidem \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:765cf9663f8f23884aa7501f2c54085eef4032be78ae142e23757d6be4ab9c58 \n        cluster.go:125: namespace/tigera-operator created\n        cluster.go:125: serviceaccount/tigera-operator created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: rolebinding.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: deployment.apps/tigera-operator created\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: installation.operator.tigera.io/default created\n        cluster.go:125: apiserver.operator.tigera.io/default created\n        cluster.go:125: goldmane.operator.tigera.io/default created\n        cluster.go:125: whisker.operator.tigera.io/default created\n        cluster.go:125: W0423 23:57:28.064508    1837 joinconfiguration.go:113] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.33.8.calico.base/node_readiness (37.84s)\n    --- PASS: kubeadm.v1.33.8.calico.base/nginx_deployment (11.18s)\n    --- PASS: kubeadm.v1.33.8.calico.base/NFS_deployment (24.56s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"docker.containerd-restart","result":"PASS","duration":98411989625,"output":"        cluster.go:125: Unable to find image 'ghcr.io/flatcar/busybox:latest' locally\n        cluster.go:125: latest: Pulling from flatcar/busybox\n        cluster.go:125: 4bf2067f7735: Pulling fs layer\n        cluster.go:125: 4bf2067f7735: Verifying Checksum\n        cluster.go:125: 4bf2067f7735: Download complete\n        cluster.go:125: 4bf2067f7735: Pull complete\n        cluster.go:125: Digest: sha256:93e8234eb9ca92b9aae20fd73d6c9447ac3d1cc741c6e80c737f821dca582a0e\n        cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/busybox:latest\n"},{"name":"cl.tpm.nonroot","result":"PASS","duration":136591644116,"output":""},{"name":"systemd.journal.user","result":"PASS","duration":27478889195,"output":""},{"name":"docker.enable-service.sysext","result":"PASS","duration":30897017355,"output":"        cluster.go:152: + systemctl is-enabled docker\n"},{"name":"cl.ignition.kargs","result":"PASS","duration":51676435764,"output":"        cluster.go:152: + cat /proc/cmdline\n"},{"name":"cl.sysext.fallbackdownload","result":"PASS","duration":61055880598,"output":"        update.go:664: Rebooting test machine\n"},{"name":"cl.locksmith.cluster","result":"PASS","duration":390967400140,"output":""},{"name":"coreos.ignition.security.tls","result":"PASS","duration":86018698484,"output":""},{"name":"cl.ignition.partition_on_boot_disk","result":"PASS","duration":47821410765,"output":"        filesystem.go:487: lsblk -f:\nNAME    FSTYPE   FSVER LABEL      UUID                                 FSAVAIL FSUSE% MOUNTPOINTS\nloop2   squashfs 4.0                                                                  \nloop3   squashfs 4.0                                                                  \nsr0                                                                                   \nvda                                                                                   \n|-vda1  vfat     FAT32 EFI-SYSTEM 2BE6-8EC2                                           \n|-vda2                                                                                \n|-vda3  btrfs                     b0afcb9a-4dc6-42cc-b61f-b370046a03ca                \n| `-usr btrfs                     b0afcb9a-4dc6-42cc-b61f-b370046a03ca    1.2G     0% /usr\n|-vda4                                                                                \n|-vda6  btrfs          OEM        198e7c3b-b6f6-48f6-8d3f-d053e5a12995    123M     1% /oem\n|-vda7                                                                                \n|-vda9  ext4     1.0   ROOT       8c3ace63-1728-4b5e-a7b6-4ef650e41ba1    5.9G     0% /\n`-vda10 xfs            VAR        0ee5b79f-97f1-42ab-a37f-c0635161b35b    5.2G     3% /var\n        filesystem.go:489: findmnt:\nTARGET                                                        SOURCE          FSTYPE     OPTIONS\n/                                                             /dev/vda9       ext4       rw,relatime,seclabel\n|-/boot                                                       systemd-1       autofs     rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=5539\n|-/media                                                      tmpfs           tmpfs      rw,nosuid,nodev,noexec,relatime,seclabel\n|-/tmp                                                        tmpfs           tmpfs      rw,nosuid,nodev,seclabel,size=1233088k,nr_inodes=1048576\n|-/dev                                                        devtmpfs        devtmpfs   rw,nosuid,seclabel,size=4096k,nr_inodes=297573,mode=755\n| |-/dev/hugepages                                            hugetlbfs       hugetlbfs  rw,nosuid,nodev,relatime,seclabel,pagesize=2M\n| |-/dev/mqueue                                               mqueue          mqueue     rw,nosuid,nodev,noexec,relatime,seclabel\n| |-/dev/shm                                                  tmpfs           tmpfs      rw,nosuid,nodev,seclabel\n| `-/dev/pts                                                  devpts          devpts     rw,nosuid,noexec,relatime,seclabel,gid=5,mode=620,ptmxmode=000\n|-/sys                                                        sysfs           sysfs      rw,nosuid,nodev,noexec,relatime,seclabel\n| |-/sys/fs/selinux                                           selinuxfs       selinuxfs  rw,nosuid,noexec,relatime\n| |-/sys/kernel/debug                                         debugfs         debugfs    rw,nosuid,nodev,noexec,relatime,seclabel\n| |-/sys/kernel/tracing                                       tracefs         tracefs    rw,nosuid,nodev,noexec,relatime,seclabel\n| |-/sys/kernel/config                                        configfs        configfs   rw,nosuid,nodev,noexec,relatime\n| |-/sys/kernel/security                                      securityfs      securityfs rw,nosuid,nodev,noexec,relatime\n| |-/sys/fs/cgroup                                            cgroup2         cgroup2    rw,nosuid,nodev,noexec,relatime,seclabel,nsdelegate,memory_recursiveprot\n| |-/sys/fs/pstore                                            pstore          pstore     rw,nosuid,nodev,noexec,relatime,seclabel\n| |-/sys/firmware/efi/efivars                                 efivarfs        efivarfs   rw,nosuid,nodev,noexec,relatime\n| |-/sys/fs/bpf                                               bpf             bpf        rw,nosuid,nodev,noexec,relatime,mode=700\n| `-/sys/fs/fuse/connections                                  fusectl         fusectl    rw,nosuid,nodev,noexec,relatime\n|-/proc                                                       proc            proc       rw,nosuid,nodev,noexec,relatime\n| `-/proc/sys/fs/binfmt_misc                                  systemd-1       autofs     rw,relatime,fd=34,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=5543\n|-/run                                                        tmpfs           tmpfs      rw,nosuid,nodev,seclabel,size=493236k,nr_inodes=819200,mode=755\n| |-/run/user/500                                             tmpfs           tmpfs      rw,nosuid,nodev,relatime,seclabel,size=246616k,nr_inodes=61654,mode=700,uid=500,gid=500\n| |-/run/credentials/systemd-journald.service                 tmpfs           tmpfs      ro,nosuid,nodev,noexec,relatime,nosymfollow,seclabel,size=1024k,nr_inodes=1024,mode=700,noswap\n| |-/run/credentials/systemd-network-generator.service        tmpfs           tmpfs      ro,nosuid,nodev,noexec,relatime,nosymfollow,seclabel,size=1024k,nr_inodes=1024,mode=700,noswap\n| |-/run/credentials/systemd-udev-load-credentials.service    tmpfs           tmpfs      ro,nosuid,nodev,noexec,relatime,nosymfollow,seclabel,size=1024k,nr_inodes=1024,mode=700,noswap\n| |-/run/credentials/systemd-sysctl.service                   tmpfs           tmpfs      ro,nosuid,nodev,noexec,relatime,nosymfollow,seclabel,size=1024k,nr_inodes=1024,mode=700,noswap\n| |-/run/credentials/systemd-tmpfiles-setup-dev-early.service tmpfs           tmpfs      ro,nosuid,nodev,noexec,relatime,nosymfollow,seclabel,size=1024k,nr_inodes=1024,mode=700,noswap\n| |-/run/credentials/systemd-sysusers.service                 tmpfs           tmpfs      ro,nosuid,nodev,noexec,relatime,nosymfollow,seclabel,size=1024k,nr_inodes=1024,mode=700,noswap\n| |-/run/credentials/systemd-tmpfiles-setup-dev.service       tmpfs           tmpfs      ro,nosuid,nodev,noexec,relatime,nosymfollow,seclabel,size=1024k,nr_inodes=1024,mode=700,noswap\n| |-/run/credentials/systemd-networkd.service                 tmpfs           tmpfs      ro,nosuid,nodev,noexec,relatime,nosymfollow,seclabel,size=1024k,nr_inodes=1024,mode=700,noswap\n| |-/run/credentials/systemd-vconsole-setup.service           tmpfs           tmpfs      ro,nosuid,nodev,noexec,relatime,nosymfollow,seclabel,size=1024k,nr_inodes=1024,mode=700,noswap\n| |-/run/credentials/systemd-tmpfiles-setup.service           tmpfs           tmpfs      ro,nosuid,nodev,noexec,relatime,nosymfollow,seclabel,size=1024k,nr_inodes=1024,mode=700,noswap\n| |-/run/credentials/systemd-resolved.service                 tmpfs           tmpfs      ro,nosuid,nodev,noexec,relatime,nosymfollow,seclabel,size=1024k,nr_inodes=1024,mode=700,noswap\n| |-/run/credentials/getty@tty1.service                       tmpfs           tmpfs      ro,nosuid,nodev,noexec,relatime,nosymfollow,seclabel,size=1024k,nr_inodes=1024,mode=700,noswap\n| `-/run/credentials/serial-getty@ttyS0.service               tmpfs           tmpfs      ro,nosuid,nodev,noexec,relatime,nosymfollow,seclabel,size=1024k,nr_inodes=1024,mode=700,noswap\n|-/var                                                        /dev/vda10      xfs        rw,relatime,seclabel,attr2,inode64,logbufs=8,logbsize=32k,noquota\n|-/usr                                                        /dev/mapper/usr btrfs      ro,relatime,seclabel,rescue=nologreplay,space_cache=v2,subvolid=5,subvol=/\n| `-/usr                                                      sysext          overlay    ro,nodev,relatime,seclabel,lowerdir=/run/systemd/sysext/meta/usr:/run/systemd/sysext/extensions/docker-flatcar/usr:/run/systemd/sysext/extensions/containerd-flatcar/usr:/usr\n|-/etc                                                        overlay         overlay    rw,noatime,seclabel,lowerdir=/sysroot/usr/share/flatcar/etc,upperdir=/sysroot/etc,workdir=/sysroot/.etc-work,uuid=on,metacopy=off\n`-/oem                                                        /dev/vda6       btrfs      rw,nodev,relatime,seclabel,discard=async,space_cache=v2,subvolid=5,subvol=/\n        cluster.go:152: + findmnt /var\n        cluster.go:152: + cat /var/hello\n"},{"name":"coreos.ignition.groups","result":"PASS","duration":41748848077,"output":""},{"name":"cl.osreset.ignition-rerun","result":"PASS","duration":84587044635,"output":""},{"name":"kubeadm.v1.35.1.flannel.base/node_readiness","result":"PASS","duration":17474901115,"output":""},{"name":"kubeadm.v1.35.1.flannel.base/nginx_deployment","result":"PASS","duration":12272563133,"output":""},{"name":"coreos.selinux.boolean","result":"FAIL","duration":68613054957,"output":"        selinux.go:115: failed to reboot machine: machine \"7d69933b-b9b7-462e-a95f-1c924edb1f10\" failed basic checks: some systemd units failed:\n● ldconfig.service loaded failed failed Rebuild Dynamic Linker Cache\nstatus: \njournal:-- No entries --\n        harness.go:616: Found systemd unit failed to start (\u001b[0;1;39mldconfig.service\u001b[0m - Rebuild Dynamic Linker Cache.\r\r) on machine 7d69933b-b9b7-462e-a95f-1c924edb1f10 console\n"},{"name":"kubeadm.v1.35.1.flannel.base/NFS_deployment","result":"PASS","duration":29652177993,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.35.1.flannel.base","result":"PASS","duration":322123612125,"output":"        cluster.go:125: I0424 00:07:36.601307    2015 version.go:260] remote version is much newer: v1.36.0; falling back to: stable-1.35\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.13.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.6-0\n        cluster.go:125: I0424 00:08:15.262699    2281 version.go:260] remote version is much newer: v1.36.0; falling back to: stable-1.35\n        cluster.go:125: [init] Using Kubernetes version: v1.35.4\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local localhost] and IPs [10.96.0.1 10.0.0.34]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.004992976s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.34:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 3.033212675s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 10.560339535s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 16.02312144s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: rpo73c.fd2l0ernfj709qiq\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.34:6443 --token rpo73c.fd2l0ernfj709qiq \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:ce54ce370c42f33dd880b905367af2a7c8c617fd26e11ec473b1d3475ba3f0e4 \n        cluster.go:125: namespace/kube-flannel created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: serviceaccount/flannel created\n        cluster.go:125: configmap/kube-flannel-cfg created\n        cluster.go:125: daemonset.apps/kube-flannel-ds created\n        cluster.go:125: W0424 00:09:36.835490    1788 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.35.1.flannel.base/node_readiness (17.47s)\n    --- PASS: kubeadm.v1.35.1.flannel.base/nginx_deployment (12.27s)\n    --- PASS: kubeadm.v1.35.1.flannel.base/NFS_deployment (29.65s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"cl.ignition.v1.btrfsroot","result":"PASS","duration":28803956884,"output":""},{"name":"devcontainer.docker","result":"PASS","duration":1295855439711,"output":"        cluster.go:125: + set -euo pipefail\n        cluster.go:125: + source /home/core/download-library.sh\n        cluster.go:125: + download_dev_container_image flatcar_developer_container.bin\n        cluster.go:125: + local output_bin=flatcar_developer_container.bin\n        cluster.go:125: + shift\n        cluster.go:125: + local arch version image_url bzip2cat\n        cluster.go:125: ++ source /usr/share/flatcar/release\n        cluster.go:125: +++ FLATCAR_RELEASE_VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: +++ FLATCAR_RELEASE_BOARD=amd64-usr\n        cluster.go:125: +++ FLATCAR_RELEASE_APPID='{e96281a6-d1af-4bde-9a0a-97b76e56dc57}'\n        cluster.go:125: ++ echo amd64\n        cluster.go:125: + arch=amd64\n        cluster.go:125: ++ source /usr/share/flatcar/release\n        cluster.go:125: +++ FLATCAR_RELEASE_VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: +++ FLATCAR_RELEASE_BOARD=amd64-usr\n        cluster.go:125: +++ FLATCAR_RELEASE_APPID='{e96281a6-d1af-4bde-9a0a-97b76e56dc57}'\n        cluster.go:125: ++ echo 4459.2.4+nightly-20260423-2100\n        cluster.go:125: + version=4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ process_template http://bincache.flatcar-linux.net/images/@ARCH@/@VERSION@/flatcar_developer_container.bin.bz2 amd64 4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ local template=http://bincache.flatcar-linux.net/images/@ARCH@/@VERSION@/flatcar_developer_container.bin.bz2\n        cluster.go:125: ++ shift\n        cluster.go:125: ++ local arch=amd64\n        cluster.go:125: ++ shift\n        cluster.go:125: ++ local version=4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ shift\n        cluster.go:125: ++ local result=http://bincache.flatcar-linux.net/images/@ARCH@/@VERSION@/flatcar_developer_container.bin.bz2\n        cluster.go:125: ++ result=http://bincache.flatcar-linux.net/images/amd64/@VERSION@/flatcar_developer_container.bin.bz2\n        cluster.go:125: ++ result=http://bincache.flatcar-linux.net/images/amd64/4459.2.4+nightly-20260423-2100/flatcar_developer_container.bin.bz2\n        cluster.go:125: ++ echo http://bincache.flatcar-linux.net/images/amd64/4459.2.4+nightly-20260423-2100/flatcar_developer_container.bin.bz2\n        cluster.go:125: + image_url=http://bincache.flatcar-linux.net/images/amd64/4459.2.4+nightly-20260423-2100/flatcar_developer_container.bin.bz2\n        cluster.go:125: + echo 'Fetching developer container from http://bincache.flatcar-linux.net/images/amd64/4459.2.4+nightly-20260423-2100/flatcar_developer_container.bin.bz2'\n        cluster.go:125: + curl --fail --silent --show-error --location --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 --remote-name http://bincache.flatcar-linux.net/images/amd64/4459.2.4+nightly-20260423-2100/flatcar_developer_container.bin.bz2\n        cluster.go:125: + bzip2cat=bzcat\n        cluster.go:125: + command -v lbzcat\n        cluster.go:125: + bzip2cat=lbzcat\n        cluster.go:125: ++ lbzcat flatcar_developer_container.bin.bz2\n        cluster.go:125: + cp --sparse=always /dev/fd/63 flatcar_developer_container.bin\n        cluster.go:125: + source /usr/share/coreos/release\n        cluster.go:125: ++ FLATCAR_RELEASE_VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ FLATCAR_RELEASE_BOARD=amd64-usr\n        cluster.go:125: ++ FLATCAR_RELEASE_APPID='{e96281a6-d1af-4bde-9a0a-97b76e56dc57}'\n        cluster.go:125: + ARCH=amd64\n        cluster.go:125: + VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ process_template http://bincache.flatcar-linux.net/boards/@ARCH@-usr/@VERSION@/pkgs amd64 4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ local template=http://bincache.flatcar-linux.net/boards/@ARCH@-usr/@VERSION@/pkgs\n        cluster.go:125: ++ shift\n        cluster.go:125: ++ local arch=amd64\n        cluster.go:125: ++ shift\n        cluster.go:125: ++ local version=4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ shift\n        cluster.go:125: ++ local result=http://bincache.flatcar-linux.net/boards/@ARCH@-usr/@VERSION@/pkgs\n        cluster.go:125: ++ result=http://bincache.flatcar-linux.net/boards/amd64-usr/@VERSION@/pkgs\n        cluster.go:125: ++ result=http://bincache.flatcar-linux.net/boards/amd64-usr/4459.2.4+nightly-20260423-2100/pkgs\n        cluster.go:125: ++ echo http://bincache.flatcar-linux.net/boards/amd64-usr/4459.2.4+nightly-20260423-2100/pkgs\n        cluster.go:125: + PORTAGE_BINHOST=http://bincache.flatcar-linux.net/boards/amd64-usr/4459.2.4+nightly-20260423-2100/pkgs\n        cluster.go:125: + EXPECTED_VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: + workdir=/home/core/dev-container-workdir-7014\n        cluster.go:125: + USR_SRC_DIR=/home/core/dev-container-workdir-7014/src\n        cluster.go:125: + VAR_TMP_DIR=/home/core/dev-container-workdir-7014/tmp\n        cluster.go:125: + mkdir -p /home/core/dev-container-workdir-7014/src /home/core/dev-container-workdir-7014/tmp\n        cluster.go:125: ++ parted flatcar_developer_container.bin unit b print\n        cluster.go:125: ++ tail --lines=1\n        cluster.go:125: ++ grep Start --after-context=1\n        cluster.go:125: ++ head --bytes=-2\n        cluster.go:125: ++ awk '{ print $2 }'\n        cluster.go:125: + offset=2097152\n        cluster.go:125: + mkdir root\n        cluster.go:125: + sudo mount -o loop,ro,offset=2097152 flatcar_developer_container.bin root\n        cluster.go:125: + sudo tar -C root -czf dev-container-image.tar.gz .\n        cluster.go:125: + sudo umount root\n        cluster.go:125: + rm -f flatcar_developer_container.bin\n        cluster.go:125: + docker import dev-container-image.tar.gz dev-container:42\n        cluster.go:125: + sudo rm -f dev-container-image.tar.gz\n        cluster.go:125: + restorecon /home/core/dev-container-script\n        cluster.go:125: + docker run --log-driver=journald --env PORTAGE_BINHOST=http://bincache.flatcar-linux.net/boards/amd64-usr/4459.2.4+nightly-20260423-2100/pkgs --env EXPECTED_VERSION=4459.2.4+nightly-20260423-2100 --mount type=bind,source=/lib/modules,target=/lib/modules,readonly=true --mount type=bind,source=/home/core/dev-container-script,target=/home/core/dev-container-script,readonly=true --mount type=bind,source=/home/core/dev-container-workdir-7014/src,target=/usr/src --mount type=bind,source=/home/core/dev-container-workdir-7014/tmp,target=/var/tmp dev-container:42 /bin/bash /home/core/dev-container-script\n        cluster.go:125: + source /usr/share/coreos/release\n        cluster.go:125: ++ FLATCAR_RELEASE_VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ FLATCAR_RELEASE_BOARD=amd64-usr\n        cluster.go:125: ++ FLATCAR_RELEASE_APPID='{e96281a6-d1af-4bde-9a0a-97b76e56dc57}'\n        cluster.go:125: + [[ 4459.2.4+nightly-20260423-2100 != \\4\\4\\5\\9\\.\\2\\.\\4\\+\\n\\i\\g\\h\\t\\l\\y\\-\\2\\0\\2\\6\\0\\4\\2\\3\\-\\2\\1\\0\\0 ]]\n        cluster.go:125: + export PORTAGE_BINHOST=http://bincache.flatcar-linux.net/boards/amd64-usr/4459.2.4+nightly-20260423-2100/pkgs\n        cluster.go:125: + PORTAGE_BINHOST=http://bincache.flatcar-linux.net/boards/amd64-usr/4459.2.4+nightly-20260423-2100/pkgs\n        cluster.go:125: + export 'FEATURES=-ipc-sandbox -network-sandbox'\n        cluster.go:125: + FEATURES='-ipc-sandbox -network-sandbox'\n        cluster.go:125: + emerge-gitclone\n        cluster.go:125: Cloning into '/var/lib/portage/scripts'...\n        cluster.go:125: Updating files:  39% (5263/13423)\rUpdating files:  40% (5370/13423)\rUpdating files:  41% (5504/13423)\rUpdating files:  42% (5638/13423)\rUpdating files:  43% (5772/13423)\rUpdating files:  44% (5907/13423)\rUpdating files:  45% (6041/13423)\rUpdating files:  46% (6175/13423)\rUpdating files:  47% (6309/13423)\rUpdating files:  48% (6444/13423)\rUpdating files:  49% (6578/13423)\rUpdating files:  50% (6712/13423)\rUpdating files:  51% (6846/13423)\rUpdating files:  52% (6980/13423)\rUpdating files:  53% (7115/13423)\rUpdating files:  54% (7249/13423)\rUpdating files:  55% (7383/13423)\rUpdating files:  56% (7517/13423)\rUpdating files:  57% (7652/13423)\rUpdating files:  58% (7786/13423)\rUpdating files:  59% (7920/13423)\rUpdating files:  60% (8054/13423)\rUpdating files:  61% (8189/13423)\rUpdating files:  62% (8323/13423)\rUpdating files:  62% (8414/13423)\rUpdating files:  63% (8457/13423)\rUpdating files:  64% (8591/13423)\rUpdating files:  65% (8725/13423)\rUpdating files:  66% (8860/13423)\rUpdating files:  67% (8994/13423)\rUpdating files:  68% (9128/13423)\rUpdating files:  69% (9262/13423)\rUpdating files:  70% (9397/13423)\rUpdating files:  71% (9531/13423)\rUpdating files:  72% (9665/13423)\rUpdating files:  73% (9799/13423)\rUpdating files:  74% (9934/13423)\rUpdating files:  75% (10068/13423)\rUpdating files:  76% (10202/13423)\rUpdating files:  77% (10336/13423)\rUpdating files:  78% (10470/13423)\rUpdating files:  79% (10605/13423)\rUpdating files:  80% (10739/13423)\rUpdating files:  81% (10873/13423)\rUpdating files:  82% (11007/13423)\rUpdating files:  83% (11142/13423)\rUpdating files:  84% (11276/13423)\rUpdating files:  85% (11410/13423)\rUpdating files:  86% (11544/13423)\rUpdating files:  87% (11679/13423)\rUpdating files:  88% (11813/13423)\rUpdating files:  89% (11947/13423)\rUpdating files:  90% (12081/13423)\rUpdating files:  91% (12215/13423)\rUpdating files:  92% (12350/13423)\rUpdating files:  93% (12484/13423)\rUpdating files:  94% (12618/13423)\rUpdating files:  95% (12752/13423)\rUpdating files:  96% (12887/13423)\rUpdating files:  97% (13021/13423)\rUpdating files:  97% (13080/13423)\rUpdating files:  98% (13155/13423)\rUpdating files:  99% (13289/13423)\rUpdating files: 100% (13423/13423)\rUpdating files: 100% (13423/13423), done.\n        cluster.go:125: Note: switching to 'stable-4459.2.4-nightly-20260423-2100'.\n        cluster.go:125: \n        cluster.go:125: You are in 'detached HEAD' state. You can look around, make experimental\n        cluster.go:125: changes and commit them, and you can discard any commits you make in this\n        cluster.go:125: state without impacting any branches by switching back to a branch.\n        cluster.go:125: \n        cluster.go:125: If you want to create a new branch to retain commits you create, you may\n        cluster.go:125: do so (now or later) by using -c with the switch command. Example:\n        cluster.go:125: \n        cluster.go:125:   git switch -c \u003cnew-branch-name\u003e\n        cluster.go:125: \n        cluster.go:125: Or undo this operation with:\n        cluster.go:125: \n        cluster.go:125:   git switch -\n        cluster.go:125: \n        cluster.go:125: Turn off this advice by setting config variable advice.detachedHead to false\n        cluster.go:125: \n        cluster.go:125: HEAD is now at 4aa1ccb8c8 New version: stable-4459.2.4-nightly-20260423-2100\n        cluster.go:125: + emerge --getbinpkg --verbose coreos-sources\n        cluster.go:125: Unable to unshare: EPERM (for FEATURES=\"pid-sandbox\")\n        cluster.go:125: Unable to unshare: EPERM (for FEATURES=\"pid-sandbox\")\n        cluster.go:125: Unable to unshare: EPERM (for FEATURES=\"pid-sandbox\")\n        cluster.go:125: Unable to unshare: EPERM (for FEATURES=\"pid-sandbox\")\n        cluster.go:125: Unable to unshare: EPERM (for FEATURES=\"pid-sandbox\")\n        cluster.go:125: Unable to unshare: EPERM (for FEATURES=\"pid-sandbox\")\n        cluster.go:125: Unable to unshare: EPERM (for FEATURES=\"pid-sandbox\")\n        cluster.go:125: Unable to unshare: EPERM (for FEATURES=\"pid-sandbox\")\n        cluster.go:125: + zcat /proc/config.gz\n        cluster.go:125: ++ nproc\n        cluster.go:125: + exec make -C /usr/src/linux -j4 modules_prepare V=1\n        cluster.go:125: \n        cluster.go:125: Error in reading or end of file.\n"},{"name":"cl.ignition.v2.ext4root","result":"PASS","duration":31273551308,"output":""},{"name":"cl.tpm.root","result":"PASS","duration":101642741716,"output":""},{"name":"cl.ignition.v1.xfsroot","result":"PASS","duration":50003616721,"output":""},{"name":"cl.tpm.root-cryptenroll","result":"PASS","duration":141473628437,"output":""},{"name":"cl.ignition.oem.indirect.new","result":"PASS","duration":208535468444,"output":""},{"name":"cl.etcd-member.discovery","result":"PASS","duration":180731412249,"output":""},{"name":"coreos.auth.verify","result":"PASS","duration":41051236411,"output":""},{"name":"cl.internet/UpdateEngine","result":"PASS","duration":293735692,"output":""},{"name":"cl.internet/DockerPing","result":"PASS","duration":9799038496,"output":""},{"name":"cl.internet/DockerEcho","result":"PASS","duration":1331128992,"output":""},{"name":"cl.internet/NTPDate","result":"PASS","duration":7117879085,"output":""},{"name":"cl.internet","result":"PASS","duration":52167840948,"output":"    --- PASS: cl.internet/UpdateEngine (0.29s)\n    --- PASS: cl.internet/DockerPing (9.80s)\n    --- PASS: cl.internet/DockerEcho (1.33s)\n    --- PASS: cl.internet/NTPDate (7.12s)\n"},{"name":"bpf.ig/ig","result":"PASS","duration":45903021941,"output":"        cluster.go:125: + sudo ig run trace_exec:v0.50.0 --help\n        cluster.go:125: + trap 'kill %%' ERR\n        cluster.go:125: + timeout 30 grep -F -m1 running...\n        cluster.go:125: + sudo ig run trace_exec:v0.50.0 --host --filter proc.comm=docker,args~ps --output json --verbose\n        cluster.go:125: + docker info\n        cluster.go:125: + docker ps\n        cluster.go:125: + docker images\n        cluster.go:125: + kill %%\n        cluster.go:125: + wait\n        cluster.go:125: + jq -s -e '.[] | select(.args == \"/usr/bin/docker\\u00a0ps\")' ig.json\n        cluster.go:125: + jq -s -e 'isempty(.[] | select(.args == \"/usr/bin/docker\\u00a0info\"))' ig.json\n        cluster.go:125: + jq -s -e 'isempty(.[] | select(.args == \"/usr/bin/docker\\u00a0images\"))' ig.json\n        cluster.go:125: + sudo ig run trace_dns:v0.50.0 --help\n        cluster.go:125: + trap 'kill %%' ERR\n        cluster.go:125: + timeout 30 grep -F -m1 running...\n        cluster.go:125: + sudo ig run trace_dns:v0.50.0 --host --filter name=flatcar.org. --output json --verbose\n        cluster.go:125: + dig kinvolk.io\n        cluster.go:125: + dig flatcar.org\n        cluster.go:125: + dig stable.release.flatcar-linux.net\n        cluster.go:125: + kill %%\n        cluster.go:125: + wait\n        cluster.go:125: + jq -s -e '.[] | select(.name == \"flatcar.org.\")' ig.json\n        cluster.go:125: + jq -s -e 'isempty(.[] | select(.name == \"kinvolk.io.\"))' ig.json\n        cluster.go:125: + jq -s -e 'isempty(.[] | select(.name == \"stable.release.flatcar-linux.net.\"))' ig.json\n"},{"name":"bpf.ig","result":"PASS","duration":48081668836,"output":"    --- PASS: bpf.ig/ig (45.90s)\n            cluster.go:125: + sudo ig run trace_exec:v0.50.0 --help\n            cluster.go:125: + trap 'kill %%' ERR\n            cluster.go:125: + timeout 30 grep -F -m1 running...\n            cluster.go:125: + sudo ig run trace_exec:v0.50.0 --host --filter proc.comm=docker,args~ps --output json --verbose\n            cluster.go:125: + docker info\n            cluster.go:125: + docker ps\n            cluster.go:125: + docker images\n            cluster.go:125: + kill %%\n            cluster.go:125: + wait\n            cluster.go:125: + jq -s -e '.[] | select(.args == \"/usr/bin/docker\\u00a0ps\")' ig.json\n            cluster.go:125: + jq -s -e 'isempty(.[] | select(.args == \"/usr/bin/docker\\u00a0info\"))' ig.json\n            cluster.go:125: + jq -s -e 'isempty(.[] | select(.args == \"/usr/bin/docker\\u00a0images\"))' ig.json\n            cluster.go:125: + sudo ig run trace_dns:v0.50.0 --help\n            cluster.go:125: + trap 'kill %%' ERR\n            cluster.go:125: + timeout 30 grep -F -m1 running...\n            cluster.go:125: + sudo ig run trace_dns:v0.50.0 --host --filter name=flatcar.org. --output json --verbose\n            cluster.go:125: + dig kinvolk.io\n            cluster.go:125: + dig flatcar.org\n            cluster.go:125: + dig stable.release.flatcar-linux.net\n            cluster.go:125: + kill %%\n            cluster.go:125: + wait\n            cluster.go:125: + jq -s -e '.[] | select(.name == \"flatcar.org.\")' ig.json\n            cluster.go:125: + jq -s -e 'isempty(.[] | select(.name == \"kinvolk.io.\"))' ig.json\n            cluster.go:125: + jq -s -e 'isempty(.[] | select(.name == \"stable.release.flatcar-linux.net.\"))' ig.json\n"},{"name":"cl.tang.root","result":"PASS","duration":80800286056,"output":"        tang.go:148: Started tang on 10.0.0.1:34713\n"},{"name":"cl.omaha.ping","result":"PASS","duration":27446454817,"output":""},{"name":"devcontainer.systemd-nspawn","result":"PASS","duration":553663992571,"output":"        cluster.go:125: + set -euo pipefail\n        cluster.go:125: + source /home/core/download-library.sh\n        cluster.go:125: + download_dev_container_image flatcar_developer_container.bin\n        cluster.go:125: + local output_bin=flatcar_developer_container.bin\n        cluster.go:125: + shift\n        cluster.go:125: + local arch version image_url bzip2cat\n        cluster.go:125: ++ source /usr/share/flatcar/release\n        cluster.go:125: +++ FLATCAR_RELEASE_VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: +++ FLATCAR_RELEASE_BOARD=amd64-usr\n        cluster.go:125: +++ FLATCAR_RELEASE_APPID='{e96281a6-d1af-4bde-9a0a-97b76e56dc57}'\n        cluster.go:125: ++ echo amd64\n        cluster.go:125: + arch=amd64\n        cluster.go:125: ++ source /usr/share/flatcar/release\n        cluster.go:125: +++ FLATCAR_RELEASE_VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: +++ FLATCAR_RELEASE_BOARD=amd64-usr\n        cluster.go:125: +++ FLATCAR_RELEASE_APPID='{e96281a6-d1af-4bde-9a0a-97b76e56dc57}'\n        cluster.go:125: ++ echo 4459.2.4+nightly-20260423-2100\n        cluster.go:125: + version=4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ process_template http://bincache.flatcar-linux.net/images/@ARCH@/@VERSION@/flatcar_developer_container.bin.bz2 amd64 4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ local template=http://bincache.flatcar-linux.net/images/@ARCH@/@VERSION@/flatcar_developer_container.bin.bz2\n        cluster.go:125: ++ shift\n        cluster.go:125: ++ local arch=amd64\n        cluster.go:125: ++ shift\n        cluster.go:125: ++ local version=4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ shift\n        cluster.go:125: ++ local result=http://bincache.flatcar-linux.net/images/@ARCH@/@VERSION@/flatcar_developer_container.bin.bz2\n        cluster.go:125: ++ result=http://bincache.flatcar-linux.net/images/amd64/@VERSION@/flatcar_developer_container.bin.bz2\n        cluster.go:125: ++ result=http://bincache.flatcar-linux.net/images/amd64/4459.2.4+nightly-20260423-2100/flatcar_developer_container.bin.bz2\n        cluster.go:125: ++ echo http://bincache.flatcar-linux.net/images/amd64/4459.2.4+nightly-20260423-2100/flatcar_developer_container.bin.bz2\n        cluster.go:125: + image_url=http://bincache.flatcar-linux.net/images/amd64/4459.2.4+nightly-20260423-2100/flatcar_developer_container.bin.bz2\n        cluster.go:125: + echo 'Fetching developer container from http://bincache.flatcar-linux.net/images/amd64/4459.2.4+nightly-20260423-2100/flatcar_developer_container.bin.bz2'\n        cluster.go:125: + curl --fail --silent --show-error --location --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 --remote-name http://bincache.flatcar-linux.net/images/amd64/4459.2.4+nightly-20260423-2100/flatcar_developer_container.bin.bz2\n        cluster.go:125: + bzip2cat=bzcat\n        cluster.go:125: + command -v lbzcat\n        cluster.go:125: + bzip2cat=lbzcat\n        cluster.go:125: ++ lbzcat flatcar_developer_container.bin.bz2\n        cluster.go:125: + cp --sparse=always /dev/fd/63 flatcar_developer_container.bin\n        cluster.go:125: + source /usr/share/coreos/release\n        cluster.go:125: ++ FLATCAR_RELEASE_VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ FLATCAR_RELEASE_BOARD=amd64-usr\n        cluster.go:125: ++ FLATCAR_RELEASE_APPID='{e96281a6-d1af-4bde-9a0a-97b76e56dc57}'\n        cluster.go:125: + ARCH=amd64\n        cluster.go:125: + VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ process_template http://bincache.flatcar-linux.net/boards/@ARCH@-usr/@VERSION@/pkgs amd64 4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ local template=http://bincache.flatcar-linux.net/boards/@ARCH@-usr/@VERSION@/pkgs\n        cluster.go:125: ++ shift\n        cluster.go:125: ++ local arch=amd64\n        cluster.go:125: ++ shift\n        cluster.go:125: ++ local version=4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ shift\n        cluster.go:125: ++ local result=http://bincache.flatcar-linux.net/boards/@ARCH@-usr/@VERSION@/pkgs\n        cluster.go:125: ++ result=http://bincache.flatcar-linux.net/boards/amd64-usr/@VERSION@/pkgs\n        cluster.go:125: ++ result=http://bincache.flatcar-linux.net/boards/amd64-usr/4459.2.4+nightly-20260423-2100/pkgs\n        cluster.go:125: ++ echo http://bincache.flatcar-linux.net/boards/amd64-usr/4459.2.4+nightly-20260423-2100/pkgs\n        cluster.go:125: + PORTAGE_BINHOST=http://bincache.flatcar-linux.net/boards/amd64-usr/4459.2.4+nightly-20260423-2100/pkgs\n        cluster.go:125: + EXPECTED_VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: + workdir=/home/core/dev-container-workdir-22657\n        cluster.go:125: + USR_SRC_DIR=/home/core/dev-container-workdir-22657/src\n        cluster.go:125: + VAR_TMP_DIR=/home/core/dev-container-workdir-22657/tmp\n        cluster.go:125: + mkdir -p /home/core/dev-container-workdir-22657/src /home/core/dev-container-workdir-22657/tmp\n        cluster.go:125: + sudo systemd-nspawn --console=pipe --setenv=PORTAGE_BINHOST=http://bincache.flatcar-linux.net/boards/amd64-usr/4459.2.4+nightly-20260423-2100/pkgs --setenv=EXPECTED_VERSION=4459.2.4+nightly-20260423-2100 --bind-ro=/lib/modules --bind-ro=/home/core/dev-container-script --bind=/home/core/dev-container-workdir-22657/src:/usr/src --bind=/home/core/dev-container-workdir-22657/tmp:/var/tmp --image=flatcar_developer_container.bin --machine=flatcar-developer-container /bin/bash /home/core/dev-container-script\n        cluster.go:125: + source /usr/share/coreos/release\n        cluster.go:125: ++ FLATCAR_RELEASE_VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ FLATCAR_RELEASE_BOARD=amd64-usr\n        cluster.go:125: ++ FLATCAR_RELEASE_APPID='{e96281a6-d1af-4bde-9a0a-97b76e56dc57}'\n        cluster.go:125: + [[ 4459.2.4+nightly-20260423-2100 != \\4\\4\\5\\9\\.\\2\\.\\4\\+\\n\\i\\g\\h\\t\\l\\y\\-\\2\\0\\2\\6\\0\\4\\2\\3\\-\\2\\1\\0\\0 ]]\n        cluster.go:125: + export PORTAGE_BINHOST=http://bincache.flatcar-linux.net/boards/amd64-usr/4459.2.4+nightly-20260423-2100/pkgs\n        cluster.go:125: + PORTAGE_BINHOST=http://bincache.flatcar-linux.net/boards/amd64-usr/4459.2.4+nightly-20260423-2100/pkgs\n        cluster.go:125: + export 'FEATURES=-ipc-sandbox -network-sandbox'\n        cluster.go:125: + FEATURES='-ipc-sandbox -network-sandbox'\n        cluster.go:125: + emerge-gitclone\n        cluster.go:125: Cloning into '/var/lib/portage/scripts'...\n        cluster.go:125: Note: switching to 'stable-4459.2.4-nightly-20260423-2100'.\n        cluster.go:125: \n        cluster.go:125: You are in 'detached HEAD' state. You can look around, make experimental\n        cluster.go:125: changes and commit them, and you can discard any commits you make in this\n        cluster.go:125: state without impacting any branches by switching back to a branch.\n        cluster.go:125: \n        cluster.go:125: If you want to create a new branch to retain commits you create, you may\n        cluster.go:125: do so (now or later) by using -c with the switch command. Example:\n        cluster.go:125: \n        cluster.go:125:   git switch -c \u003cnew-branch-name\u003e\n        cluster.go:125: \n        cluster.go:125: Or undo this operation with:\n        cluster.go:125: \n        cluster.go:125:   git switch -\n        cluster.go:125: \n        cluster.go:125: Turn off this advice by setting config variable advice.detachedHead to false\n        cluster.go:125: \n        cluster.go:125: HEAD is now at 4aa1ccb8c8 New version: stable-4459.2.4-nightly-20260423-2100\n        cluster.go:125: + emerge --getbinpkg --verbose coreos-sources\n        cluster.go:125: + zcat /proc/config.gz\n        cluster.go:125: ++ nproc\n        cluster.go:125: + exec make -C /usr/src/linux -j4 modules_prepare V=1\n        cluster.go:125: \n        cluster.go:125: Error in reading or end of file.\n"},{"name":"cl.disk.raid0.root","result":"PASS","duration":64529858550,"output":""},{"name":"sysext.custom-docker.sysext","result":"PASS","duration":69966102782,"output":"        cluster.go:125: bash: line 1: docker: command not found\n        cluster.go:125: Cloning into 'sysext-bakery'...\n        cluster.go:125: Note: switching to '9850ffd5b2353f45a9b3bf4fb84f8138a149e3e7'.\n        cluster.go:125: \n        cluster.go:125: You are in 'detached HEAD' state. You can look around, make experimental\n        cluster.go:125: changes and commit them, and you can discard any commits you make in this\n        cluster.go:125: state without impacting any branches by switching back to a branch.\n        cluster.go:125: \n        cluster.go:125: If you want to create a new branch to retain commits you create, you may\n        cluster.go:125: do so (now or later) by using -c with the switch command. Example:\n        cluster.go:125: \n        cluster.go:125:   git switch -c \u003cnew-branch-name\u003e\n        cluster.go:125: \n        cluster.go:125: Or undo this operation with:\n        cluster.go:125: \n        cluster.go:125:   git switch -\n        cluster.go:125: \n        cluster.go:125: Turn off this advice by setting config variable advice.detachedHead to false\n        cluster.go:125: \n        cluster.go:125: HEAD is now at 9850ffd Merge pull request #31 from flatcar/t-lo/fix-docker-23-containerd-shim\n        cluster.go:125: mke2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: resize2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: mke2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: resize2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: Unable to find image 'ghcr.io/flatcar/busybox:latest' locally\n        cluster.go:125: latest: Pulling from flatcar/busybox\n        cluster.go:125: 4bf2067f7735: Pulling fs layer\n        cluster.go:125: 4bf2067f7735: Verifying Checksum\n        cluster.go:125: 4bf2067f7735: Pull complete\n        cluster.go:125: Digest: sha256:93e8234eb9ca92b9aae20fd73d6c9447ac3d1cc741c6e80c737f821dca582a0e\n        cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/busybox:latest\n        cluster.go:125: mke2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: resize2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: mke2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: resize2fs 1.47.3 (8-Jul-2025)\n"},{"name":"cl.update.reboot","result":"FAIL","duration":89795212434,"output":"        update.go:91: couldn't reboot: machine \"a04c5616-4440-45a4-96de-357c36fb4660\" failed basic checks: some systemd units failed:\n● ldconfig.service loaded failed failed Rebuild Dynamic Linker Cache\nstatus: \njournal:-- No entries --\n        harness.go:616: Found systemd unit failed to start (\u001b[0;1;39mldconfig.service\u001b[0m - Rebuild Dynamic Linker Cache.\r\r) on machine a04c5616-4440-45a4-96de-357c36fb4660 console\n"},{"name":"cl.cloudinit.script","result":"PASS","duration":27652521785,"output":""},{"name":"coreos.ignition.resource.s3.versioned","result":"PASS","duration":27609372061,"output":""},{"name":"cl.ignition.v1.ext4root","result":"PASS","duration":29872708716,"output":""},{"name":"docker.network-openbsd-nc","result":"PASS","duration":32408160973,"output":"        docker.go:413: creating netcat containers\n        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 3.68MB 0.0s done\n        cluster.go:125: #3 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.0s done\n        cluster.go:125: #5 writing image sha256:3e48874bd4fdfa28f2e33f086a7ec75c40df1837e47e1f072a6319670123ba76 done\n        cluster.go:125: #5 naming to docker.io/library/netcat done\n        cluster.go:125: #5 DONE 0.0s\n        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 3.68MB 0.0s done\n        cluster.go:125: #3 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.0s done\n        cluster.go:125: #5 writing image sha256:922c0a55bcf40af3fa670e579cddc3d12331d75096237011237e6c26b90c1dc4\n        cluster.go:125: #5 writing image sha256:922c0a55bcf40af3fa670e579cddc3d12331d75096237011237e6c26b90c1dc4 done\n        cluster.go:125: #5 naming to docker.io/library/netcat done\n        cluster.go:125: #5 DONE 0.0s\n"},{"name":"cl.update.badverity","result":"PASS","duration":335021851052,"output":""},{"name":"cl.ignition.oem.regular","result":"PASS","duration":53499862676,"output":""},{"name":"coreos.ignition.systemd.enable-service","result":"PASS","duration":34616842421,"output":""},{"name":"kubeadm.v1.33.8.cilium.base/node_readiness","result":"PASS","duration":41771190497,"output":""},{"name":"kubeadm.v1.33.8.cilium.base/nginx_deployment","result":"PASS","duration":15954646541,"output":""},{"name":"kubeadm.v1.33.8.cilium.base/NFS_deployment","result":"PASS","duration":22336157390,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"cl.tang.nonroot","result":"PASS","duration":111584809000,"output":"        tang.go:148: Started tang on 10.0.0.1:45243\n"},{"name":"kubeadm.v1.33.8.cilium.base/IPSec_encryption","result":"PASS","duration":24860980315,"output":"        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"kubeadm.v1.33.8.cilium.base","result":"PASS","duration":248803651549,"output":"        cluster.go:125: I0424 00:23:50.856068    2054 version.go:261] remote version is much newer: v1.36.0; falling back to: stable-1.33\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.0\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.5.24-0\n        cluster.go:125: I0424 00:23:59.312423    2256 version.go:261] remote version is much newer: v1.36.0; falling back to: stable-1.33\n        cluster.go:125: [init] Using Kubernetes version: v1.33.11\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local localhost] and IPs [10.96.0.1 10.0.0.66]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.002853792s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.66:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 1.50680636s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 1.840093054s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 4.003829067s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: u7bkea.bwwj4rl2vpzkikgx\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.66:6443 --token u7bkea.bwwj4rl2vpzkikgx \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:c27e3a6cba1ee0cb5a3965f6ead16fc6541c217862e65fdfef6e2339fef35926 \n        cluster.go:125: i  Using Cilium version 1.12.5\n        cluster.go:125: ? Auto-detected cluster name: kubernetes\n        cluster.go:125: ? Auto-detected datapath mode: tunnel\n        cluster.go:125: ? Auto-detected kube-proxy has been installed\n        cluster.go:125: i  helm template --namespace kube-system cilium cilium/cilium --version 1.12.5 --set cluster.id=0,cluster.name=kubernetes,encryption.nodeEncryption=false,extraConfig.cluster-pool-ipv4-cidr=192.168.0.0/17,extraConfig.enable-endpoint-routes=true,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=vxlan\n        cluster.go:125: i  Storing helm values file in kube-system/cilium-cli-helm-values Secret\n        cluster.go:125: ? Created CA in secret cilium-ca\n        cluster.go:125: ? Generating certificates for Hubble...\n        cluster.go:125: ? Creating Service accounts...\n        cluster.go:125: ? Creating Cluster roles...\n        cluster.go:125: ? Creating ConfigMap for Cilium version 1.12.5...\n        cluster.go:125: i  Manual overwrite in ConfigMap: enable-endpoint-routes=true\n        cluster.go:125: i  Manual overwrite in ConfigMap: cluster-pool-ipv4-cidr=192.168.0.0/17\n        cluster.go:125: ? Creating Agent DaemonSet...\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: ? Creating Operator Deployment...\n        cluster.go:125: ? Waiting for Cilium to be installed and ready...\n        cluster.go:125: ? Cilium was successfully installed! Run 'cilium status' to view installation health\n        cluster.go:125: \u001b[33m    /??\\\n        cluster.go:125: \u001b[36m /??\u001b[33m\\__/\u001b[32m??\\\u001b[0m    Cilium:         \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[36m \\__\u001b[31m/??\\\u001b[32m__/\u001b[0m    Operator:       \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[32m /??\u001b[31m\\__/\u001b[35m??\\\u001b[0m    Hubble:         \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[32m \\__\u001b[34m/??\\\u001b[35m__/\u001b[0m    ClusterMesh:    \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[34m    \\__/\n        cluster.go:125: \u001b[0m\n        cluster.go:125: Deployment       cilium-operator    \n        cluster.go:125: DaemonSet        cilium             \n        cluster.go:125: Containers:      cilium             \n        cluster.go:125:                  cilium-operator    \n        cluster.go:125: Cluster Pods:    0/0 managed by Cilium\n        cluster.go:125: W0424 00:24:55.098343    1796 joinconfiguration.go:113] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.33.8.cilium.base/node_readiness (41.77s)\n    --- PASS: kubeadm.v1.33.8.cilium.base/nginx_deployment (15.95s)\n    --- PASS: kubeadm.v1.33.8.cilium.base/NFS_deployment (22.34s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n    --- PASS: kubeadm.v1.33.8.cilium.base/IPSec_encryption (24.86s)\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"cl.ignition.v1.users","result":"PASS","duration":24722750865,"output":""},{"name":"coreos.locksmith.reboot","result":"PASS","duration":351454977403,"output":""},{"name":"cl.tpm.root-cryptenroll-pcr-noupdate","result":"PASS","duration":168384443544,"output":""},{"name":"cl.verity/verify","result":"PASS","duration":1941644900,"output":"        cluster.go:125: Success\n"},{"name":"cl.ignition.translation","result":"PASS","duration":26478514312,"output":"        cluster.go:152: + ip --json address show kola | jq -r '.[] | .addr_info | .[] | select( .family == \"inet\") | .local'\n        cluster.go:152: + cat /etc/systemd/network/00-dummy.network\n"},{"name":"linux.nfs.v4","result":"PASS","duration":46495415199,"output":"        nfs.go:80: NFS server booted.\n        nfs.go:85: Test file \"/tmp/tmp.CLSw9qgvUL\" created on server.\n        nfs.go:125: NFS client booted.\n        nfs.go:133: Got NFS mount.\n"},{"name":"cl.network.listeners","result":"PASS","duration":26756200183,"output":""},{"name":"cl.ignition.instantiated.enable-unit","result":"PASS","duration":41833327262,"output":""},{"name":"cl.verity/corruption","result":"PASS","duration":169110501752,"output":""},{"name":"coreos.update.badusr","result":"PASS","duration":383622986874,"output":""},{"name":"cl.verity","result":"PASS","duration":196229500502,"output":"    --- PASS: cl.verity/verify (1.94s)\n            cluster.go:125: Success\n    --- PASS: cl.verity/corruption (169.11s)\n"},{"name":"cl.ignition.v2_1.vfat","result":"PASS","duration":24632415879,"output":""},{"name":"cl.ignition.v2.btrfsroot","result":"PASS","duration":29239909455,"output":""},{"name":"cl.ignition.oem.regular.new","result":"PASS","duration":65851888207,"output":""},{"name":"cl.install.cloudinit","result":"PASS","duration":32194523206,"output":""},{"name":"cl.network.initramfs.second-boot","result":"PASS","duration":65283143058,"output":""},{"name":"cl.flannel.vxlan","result":"PASS","duration":79900853749,"output":"        flannel.go:121: ping from e2bbb37c-26d9-4a11-912c-2e13c068cccf(10.254.45.0) to 26d37b8d-4c85-41cd-ae61-c970d5d5394c(10.254.39.0)\n"},{"name":"cl.ignition.v2.users","result":"PASS","duration":42321577225,"output":""},{"name":"cl.network.nftables","result":"PASS","duration":25342722952,"output":"        cluster.go:152: + sudo nft --json list ruleset | jq '.nftables[] | select(.rule) | .rule.expr[0].match.right'\n"},{"name":"cl.ignition.v1.once","result":"PASS","duration":68701409584,"output":""},{"name":"cl.overlay.cleanup","result":"PASS","duration":314344264387,"output":""},{"name":"kubeadm.v1.35.1.calico.base/node_readiness","result":"PASS","duration":184309572958,"output":""},{"name":"systemd.journal.remote","result":"PASS","duration":228134465659,"output":"        cluster.go:125: Running as unit: systemd-journal-remote-client.service; invocation ID: b8f8a0dab422463c800ac269d14bd135\n"},{"name":"kubeadm.v1.35.1.calico.base/nginx_deployment","result":"PASS","duration":80027627342,"output":""},{"name":"cl.ignition.oem.wipe","result":"PASS","duration":61918394649,"output":""},{"name":"kubeadm.v1.34.4.cilium.base/node_readiness","result":"PASS","duration":62556102548,"output":""},{"name":"kubeadm.v1.35.1.calico.base/NFS_deployment","result":"PASS","duration":55259760495,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"docker.lib-coreos-dockerd-compat/docker-info","result":"PASS","duration":7100124030,"output":""},{"name":"kubeadm.v1.35.1.calico.base","result":"PASS","duration":749562041123,"output":"        cluster.go:125: I0424 00:35:55.739705    2064 version.go:260] remote version is much newer: v1.36.0; falling back to: stable-1.35\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.13.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.6-0\n        cluster.go:125: I0424 00:36:26.656150    2327 version.go:260] remote version is much newer: v1.36.0; falling back to: stable-1.35\n        cluster.go:125: [init] Using Kubernetes version: v1.35.4\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local localhost] and IPs [10.96.0.1 10.0.0.92]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.004955738s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.92:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 4.033720971s\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 4.037418765s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 13.027430335s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: c9kw5v.1s06vy0g58bh7hy3\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.92:6443 --token c9kw5v.1s06vy0g58bh7hy3 \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:602d69d956eb48069634b43304823f47fd9788d4e3d4e3128a34e81fa8cb6155 \n        cluster.go:125: namespace/tigera-operator created\n        cluster.go:125: serviceaccount/tigera-operator created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: rolebinding.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: deployment.apps/tigera-operator created\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: installation.operator.tigera.io/default created\n        cluster.go:125: apiserver.operator.tigera.io/default created\n        cluster.go:125: goldmane.operator.tigera.io/default created\n        cluster.go:125: whisker.operator.tigera.io/default created\n        cluster.go:125: W0424 00:39:57.504406    1849 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.35.1.calico.base/node_readiness (184.31s)\n    --- PASS: kubeadm.v1.35.1.calico.base/nginx_deployment (80.03s)\n    --- PASS: kubeadm.v1.35.1.calico.base/NFS_deployment (55.26s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"docker.lib-coreos-dockerd-compat/resources","result":"PASS","duration":8355151108,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 3.42MB 0.1s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.1s done\n        cluster.go:125: #5 writing image sha256:dccd9130df1340fd37219eb9e27ddf9add62f22c63fc2031cf101065b045a275 done\n        cluster.go:125: #5 naming to docker.io/library/sleep 0.0s done\n        cluster.go:125: #5 DONE 0.2s\n        cluster.go:125: WARNING: Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.\n        cluster.go:125: WARNING: Your kernel does not support OomKillDisable. OomKillDisable discarded.\n"},{"name":"kubeadm.v1.34.4.cilium.base/nginx_deployment","result":"PASS","duration":46671226691,"output":""},{"name":"kubeadm.v1.34.4.cilium.base/NFS_deployment","result":"PASS","duration":29364804113,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"cl.disk.raid1.data","result":"PASS","duration":60436573016,"output":""},{"name":"kubeadm.v1.34.4.cilium.base/IPSec_encryption","result":"PASS","duration":26565412179,"output":"        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"kubeadm.v1.34.4.cilium.base","result":"PASS","duration":813131975742,"output":"        cluster.go:125: I0424 00:36:34.308131    2058 version.go:260] remote version is much newer: v1.36.0; falling back to: stable-1.34\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.5-0\n        cluster.go:125: I0424 00:37:12.647678    2345 version.go:260] remote version is much newer: v1.36.0; falling back to: stable-1.34\n        cluster.go:125: [init] Using Kubernetes version: v1.34.7\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local localhost] and IPs [10.96.0.1 10.0.0.94]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 2.720269839s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.94:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 4.685496315s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 10.201698721s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 18.057736326s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: owrntd.nqw12l077tm14uc3\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.94:6443 --token owrntd.nqw12l077tm14uc3 \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:80096e4af7b6807e17c65222d824581ce94752703ddd0dc3f53524f03435625d \n        cluster.go:125: i  Using Cilium version 1.12.5\n        cluster.go:125: ? Auto-detected cluster name: kubernetes\n        cluster.go:125: ? Auto-detected datapath mode: tunnel\n        cluster.go:125: ? Auto-detected kube-proxy has been installed\n        cluster.go:125: i  helm template --namespace kube-system cilium cilium/cilium --version 1.12.5 --set cluster.id=0,cluster.name=kubernetes,encryption.nodeEncryption=false,extraConfig.cluster-pool-ipv4-cidr=192.168.0.0/17,extraConfig.enable-endpoint-routes=true,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=vxlan\n        cluster.go:125: i  Storing helm values file in kube-system/cilium-cli-helm-values Secret\n        cluster.go:125: ? Created CA in secret cilium-ca\n        cluster.go:125: ? Generating certificates for Hubble...\n        cluster.go:125: ? Creating Service accounts...\n        cluster.go:125: ? Creating Cluster roles...\n        cluster.go:125: ? Creating ConfigMap for Cilium version 1.12.5...\n        cluster.go:125: i  Manual overwrite in ConfigMap: enable-endpoint-routes=true\n        cluster.go:125: i  Manual overwrite in ConfigMap: cluster-pool-ipv4-cidr=192.168.0.0/17\n        cluster.go:125: ? Creating Agent DaemonSet...\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: ? Creating Operator Deployment...\n        cluster.go:125: ? Waiting for Cilium to be installed and ready...\n        cluster.go:125: ? Cilium was successfully installed! Run 'cilium status' to view installation health\n        cluster.go:125: \u001b[33m    /??\\\n        cluster.go:125: \u001b[36m /??\u001b[33m\\__/\u001b[32m??\\\u001b[0m    Cilium:         \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[36m \\__\u001b[31m/??\\\u001b[32m__/\u001b[0m    Operator:       \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[32m /??\u001b[31m\\__/\u001b[35m??\\\u001b[0m    Hubble:         \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[32m \\__\u001b[34m/??\\\u001b[35m__/\u001b[0m    ClusterMesh:    \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[34m    \\__/\n        cluster.go:125: \u001b[0m\n        cluster.go:125: DaemonSet         cilium             Desired: 1, Ready: \u001b[32m1/1\u001b[0m, Available: \u001b[32m1/1\u001b[0m\n        cluster.go:125: Deployment        cilium-operator    Desired: 1, Ready: \u001b[32m1/1\u001b[0m, Available: \u001b[32m1/1\u001b[0m\n        cluster.go:125: Containers:       cilium             Running: \u001b[32m1\u001b[0m\n        cluster.go:125:                   cilium-operator    Running: \u001b[32m1\u001b[0m\n        cluster.go:125: Cluster Pods:     2/2 managed by Cilium\n        cluster.go:125: Image versions    cilium             quay.io/cilium/cilium:v1.12.5@sha256:06ce2b0a0a472e73334a7504ee5c5d8b2e2d7b72ef728ad94e564740dd505be5: 1\n        cluster.go:125:                   cilium-operator    quay.io/cilium/operator-generic:v1.12.5@sha256:b296eb7f0f7656a5cc19724f40a8a7121b7fd725278b7d61dc91fe0b7ffd7c0e: 1\n        cluster.go:125: W0424 00:44:22.283859    1854 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.34.4.cilium.base/node_readiness (62.56s)\n    --- PASS: kubeadm.v1.34.4.cilium.base/nginx_deployment (46.67s)\n    --- PASS: kubeadm.v1.34.4.cilium.base/NFS_deployment (29.36s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n    --- PASS: kubeadm.v1.34.4.cilium.base/IPSec_encryption (26.57s)\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"cl.ignition.v1.sethostname","result":"PASS","duration":26467266842,"output":""},{"name":"linux.nfs.v3","result":"PASS","duration":51881063434,"output":"        nfs.go:80: NFS server booted.\n        nfs.go:85: Test file \"/tmp/tmp.rLDXySRYYE\" created on server.\n        nfs.go:125: NFS client booted.\n        nfs.go:133: Got NFS mount.\n"},{"name":"misc.fips","result":"PASS","duration":38694690721,"output":"        cluster.go:125: Error setting digest\n        cluster.go:125: 80FB7567667F0000:error:0308010C:digital envelope routines:inner_evp_generic_fetch:unsupported:../openssl-3.4.4/crypto/evp/evp_fetch.c:356:Global default library context, Algorithm (MD5 : 102), Properties ()\n        cluster.go:125: 80FB7567667F0000:error:03000086:digital envelope routines:evp_md_init_internal:initialization error:../openssl-3.4.4/crypto/evp/digest.c:271:\n        cluster.go:152: + cat /proc/sys/crypto/fips_enabled\n"},{"name":"cl.ignition.v1.groups","result":"PASS","duration":29276556872,"output":""},{"name":"cl.swap_activation","result":"PASS","duration":29818929886,"output":""},{"name":"docker.userns","result":"PASS","duration":32284797315,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 3.47MB 0.0s done\n        cluster.go:125: #3 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.0s done\n        cluster.go:125: #5 writing image sha256:6f7bdb0dda1a7247d2cdd57422534b36c7ddb801761cf5cd6e0b8eec54886e42 done\n        cluster.go:125: #5 naming to docker.io/library/userns-test done\n        cluster.go:125: #5 DONE 0.0s\n"},{"name":"docker.lib-coreos-dockerd-compat/networks-reliably","result":"PASS","duration":167363831631,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B 0.0s done\n        cluster.go:125: #1 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 5.26MB 0.1s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers\n        cluster.go:125: #5 exporting layers 0.0s done\n        cluster.go:125: #5 writing image sha256:5e0c2fd9300475c2fbf0d1ab522afcc13d59054d19386d01ba64e9718adb7c13 done\n        cluster.go:125: #5 naming to docker.io/library/ping 0.0s done\n        cluster.go:125: #5 DONE 0.1s\n"},{"name":"docker.lib-coreos-dockerd-compat/user-no-caps","result":"PASS","duration":1652003886,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 6.27MB 0.1s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.1s done\n        cluster.go:125: #5 writing image sha256:50989310ba7fc1872db728247bb54e4bb30dbfa0617337a9a2a437854750d3f3 done\n        cluster.go:125: #5 naming to docker.io/library/captest done\n        cluster.go:125: #5 DONE 0.1s\n"},{"name":"docker.lib-coreos-dockerd-compat/ownership","result":"PASS","duration":11664836569,"output":"        cluster.go:125: Unable to find image 'ghcr.io/flatcar/nginx:latest' locally\n        cluster.go:125: latest: Pulling from flatcar/nginx\n        cluster.go:125: 9d11882d4b58: Pulling fs layer\n        cluster.go:125: b44d445f805d: Pulling fs layer\n        cluster.go:125: 8bef7a9ebe88: Pulling fs layer\n        cluster.go:125: a0f1c91fa11d: Pulling fs layer\n        cluster.go:125: 2044bb7170df: Pulling fs layer\n        cluster.go:125: 95d77c54a9aa: Pulling fs layer\n        cluster.go:125: c13dd6bf8c10: Pulling fs layer\n        cluster.go:125: 2044bb7170df: Waiting\n        cluster.go:125: a0f1c91fa11d: Waiting\n        cluster.go:125: 95d77c54a9aa: Waiting\n        cluster.go:125: c13dd6bf8c10: Waiting\n        cluster.go:125: 8bef7a9ebe88: Verifying Checksum\n        cluster.go:125: 8bef7a9ebe88: Download complete\n        cluster.go:125: a0f1c91fa11d: Verifying Checksum\n        cluster.go:125: a0f1c91fa11d: Download complete\n        cluster.go:125: b44d445f805d: Verifying Checksum\n        cluster.go:125: b44d445f805d: Download complete\n        cluster.go:125: 2044bb7170df: Verifying Checksum\n        cluster.go:125: 2044bb7170df: Download complete\n        cluster.go:125: 9d11882d4b58: Verifying Checksum\n        cluster.go:125: 9d11882d4b58: Download complete\n        cluster.go:125: 95d77c54a9aa: Verifying Checksum\n        cluster.go:125: 95d77c54a9aa: Download complete\n        cluster.go:125: c13dd6bf8c10: Verifying Checksum\n        cluster.go:125: c13dd6bf8c10: Download complete\n        cluster.go:125: 9d11882d4b58: Pull complete\n        cluster.go:125: b44d445f805d: Pull complete\n        cluster.go:125: 8bef7a9ebe88: Pull complete\n        cluster.go:125: a0f1c91fa11d: Pull complete\n        cluster.go:125: 2044bb7170df: Pull complete\n        cluster.go:125: 95d77c54a9aa: Pull complete\n        cluster.go:125: c13dd6bf8c10: Pull complete\n        cluster.go:125: Digest: sha256:6f170eb5f8ea893d94fe3c05cef69270f16614a99d3d922ce8a320efc43b8591\n        cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/nginx:latest\n"},{"name":"cl.cloudinit.multipart-mime","result":"PASS","duration":27110050213,"output":""},{"name":"docker.lib-coreos-dockerd-compat","result":"PASS","duration":239211992809,"output":"    --- PASS: docker.lib-coreos-dockerd-compat/docker-info (7.10s)\n    --- PASS: docker.lib-coreos-dockerd-compat/resources (8.36s)\n            cluster.go:125: #0 building with \"default\" instance using docker driver\n            cluster.go:125: \n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile: 108B done\n            cluster.go:125: #1 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 3.42MB 0.1s done\n            cluster.go:125: #3 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers 0.1s done\n            cluster.go:125: #5 writing image sha256:dccd9130df1340fd37219eb9e27ddf9add62f22c63fc2031cf101065b045a275 done\n            cluster.go:125: #5 naming to docker.io/library/sleep 0.0s done\n            cluster.go:125: #5 DONE 0.2s\n            cluster.go:125: WARNING: Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.\n            cluster.go:125: WARNING: Your kernel does not support OomKillDisable. OomKillDisable discarded.\n    --- PASS: docker.lib-coreos-dockerd-compat/networks-reliably (167.36s)\n            cluster.go:125: #0 building with \"default\" instance using docker driver\n            cluster.go:125: \n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile: 108B 0.0s done\n            cluster.go:125: #1 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 5.26MB 0.1s done\n            cluster.go:125: #3 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers\n            cluster.go:125: #5 exporting layers 0.0s done\n            cluster.go:125: #5 writing image sha256:5e0c2fd9300475c2fbf0d1ab522afcc13d59054d19386d01ba64e9718adb7c13 done\n            cluster.go:125: #5 naming to docker.io/library/ping 0.0s done\n            cluster.go:125: #5 DONE 0.1s\n    --- PASS: docker.lib-coreos-dockerd-compat/user-no-caps (1.65s)\n            cluster.go:125: #0 building with \"default\" instance using docker driver\n            cluster.go:125: \n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile: 108B done\n            cluster.go:125: #1 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 6.27MB 0.1s done\n            cluster.go:125: #3 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers 0.1s done\n            cluster.go:125: #5 writing image sha256:50989310ba7fc1872db728247bb54e4bb30dbfa0617337a9a2a437854750d3f3 done\n            cluster.go:125: #5 naming to docker.io/library/captest done\n            cluster.go:125: #5 DONE 0.1s\n    --- PASS: docker.lib-coreos-dockerd-compat/ownership (11.66s)\n            cluster.go:125: Unable to find image 'ghcr.io/flatcar/nginx:latest' locally\n            cluster.go:125: latest: Pulling from flatcar/nginx\n            cluster.go:125: 9d11882d4b58: Pulling fs layer\n            cluster.go:125: b44d445f805d: Pulling fs layer\n            cluster.go:125: 8bef7a9ebe88: Pulling fs layer\n            cluster.go:125: a0f1c91fa11d: Pulling fs layer\n            cluster.go:125: 2044bb7170df: Pulling fs layer\n            cluster.go:125: 95d77c54a9aa: Pulling fs layer\n            cluster.go:125: c13dd6bf8c10: Pulling fs layer\n            cluster.go:125: 2044bb7170df: Waiting\n            cluster.go:125: a0f1c91fa11d: Waiting\n            cluster.go:125: 95d77c54a9aa: Waiting\n            cluster.go:125: c13dd6bf8c10: Waiting\n            cluster.go:125: 8bef7a9ebe88: Verifying Checksum\n            cluster.go:125: 8bef7a9ebe88: Download complete\n            cluster.go:125: a0f1c91fa11d: Verifying Checksum\n            cluster.go:125: a0f1c91fa11d: Download complete\n            cluster.go:125: b44d445f805d: Verifying Checksum\n            cluster.go:125: b44d445f805d: Download complete\n            cluster.go:125: 2044bb7170df: Verifying Checksum\n            cluster.go:125: 2044bb7170df: Download complete\n            cluster.go:125: 9d11882d4b58: Verifying Checksum\n            cluster.go:125: 9d11882d4b58: Download complete\n            cluster.go:125: 95d77c54a9aa: Verifying Checksum\n            cluster.go:125: 95d77c54a9aa: Download complete\n            cluster.go:125: c13dd6bf8c10: Verifying Checksum\n            cluster.go:125: c13dd6bf8c10: Download complete\n            cluster.go:125: 9d11882d4b58: Pull complete\n            cluster.go:125: b44d445f805d: Pull complete\n            cluster.go:125: 8bef7a9ebe88: Pull complete\n            cluster.go:125: a0f1c91fa11d: Pull complete\n            cluster.go:125: 2044bb7170df: Pull complete\n            cluster.go:125: 95d77c54a9aa: Pull complete\n            cluster.go:125: c13dd6bf8c10: Pull complete\n            cluster.go:125: Digest: sha256:6f170eb5f8ea893d94fe3c05cef69270f16614a99d3d922ce8a320efc43b8591\n            cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/nginx:latest\n"},{"name":"cl.ignition.v2_1.swap","result":"PASS","duration":29651969881,"output":""},{"name":"cl.ignition.v2_1.ext4checkexisting","result":"PASS","duration":53780307975,"output":""},{"name":"coreos.ignition.resource.local","result":"PASS","duration":71683562269,"output":""},{"name":"docker.btrfs-storage","result":"PASS","duration":52226158945,"output":""},{"name":"docker.base/docker-info","result":"PASS","duration":2761349801,"output":""},{"name":"kubeadm.v1.33.8.flannel.base/node_readiness","result":"PASS","duration":31615451801,"output":""},{"name":"cl.network.wireguard","result":"PASS","duration":46538603666,"output":"        cluster.go:152: + ip --json address show kv0 | jq -r '.[] | .addr_info | .[] | select( .family == \"inet\") | .local'\n"},{"name":"docker.base/resources","result":"PASS","duration":5238501035,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 3.42MB 0.1s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.0s done\n        cluster.go:125: #5 writing image sha256:633aa3ec1cc514d2456204cde1b5da05fc056c6d08bc780c7625087d3003284f done\n        cluster.go:125: #5 naming to docker.io/library/sleep done\n        cluster.go:125: #5 DONE 0.1s\n        cluster.go:125: WARNING: Your kernel does not support OomKillDisable. OomKillDisable discarded.\n        cluster.go:125: WARNING: Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.\n"},{"name":"kubeadm.v1.33.8.flannel.base/nginx_deployment","result":"PASS","duration":10836905980,"output":""},{"name":"kubeadm.v1.33.8.flannel.base/NFS_deployment","result":"PASS","duration":27624963136,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.flannel.base","result":"PASS","duration":208498242573,"output":"        cluster.go:125: I0424 00:50:58.431699    1974 version.go:261] remote version is much newer: v1.36.0; falling back to: stable-1.33\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.0\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.5.24-0\n        cluster.go:125: I0424 00:51:08.781717    2190 version.go:261] remote version is much newer: v1.36.0; falling back to: stable-1.33\n        cluster.go:125: [init] Using Kubernetes version: v1.33.11\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local localhost] and IPs [10.96.0.1 10.0.0.114]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.002175784s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.114:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 1.007031236s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 1.292484087s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 3.002515029s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: apdjx9.0ch189qjdvxupbdz\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.114:6443 --token apdjx9.0ch189qjdvxupbdz \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:5bcc0b38b26b7da17528a35ddc1a390a6038aff061d6e46381bf0612edb652af \n        cluster.go:125: namespace/kube-flannel created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: serviceaccount/flannel created\n        cluster.go:125: configmap/kube-flannel-cfg created\n        cluster.go:125: daemonset.apps/kube-flannel-ds created\n        cluster.go:125: W0424 00:52:08.711926    1773 joinconfiguration.go:113] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.33.8.flannel.base/node_readiness (31.62s)\n    --- PASS: kubeadm.v1.33.8.flannel.base/nginx_deployment (10.84s)\n    --- PASS: kubeadm.v1.33.8.flannel.base/NFS_deployment (27.62s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"docker.selinux","result":"PASS","duration":41187861395,"output":"        cluster.go:125: Unable to find image 'ghcr.io/flatcar/busybox:latest' locally\n        cluster.go:125: latest: Pulling from flatcar/busybox\n        cluster.go:125: 4bf2067f7735: Pulling fs layer\n        cluster.go:125: 4bf2067f7735: Verifying Checksum\n        cluster.go:125: 4bf2067f7735: Download complete\n        cluster.go:125: 4bf2067f7735: Pull complete\n        cluster.go:125: Digest: sha256:93e8234eb9ca92b9aae20fd73d6c9447ac3d1cc741c6e80c737f821dca582a0e\n        cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/busybox:latest\n        cluster.go:125: sh: can't create /opt/hello: Permission denied\n"},{"name":"cl.network.iptables","result":"PASS","duration":29650969635,"output":"        cluster.go:152: + sudo nft --json list ruleset | jq '.nftables[] | select(.rule) | .rule.expr[0].match.right'\n"},{"name":"cl.users.shells","result":"PASS","duration":33764815960,"output":""},{"name":"sysext.custom-oem","result":"PASS","duration":78399114349,"output":"        cluster.go:125: + set -euo pipefail\n        cluster.go:125: + source /home/core/download-library.sh\n        cluster.go:125: + download_dev_container_image flatcar_developer_container.bin\n        cluster.go:125: + local output_bin=flatcar_developer_container.bin\n        cluster.go:125: + shift\n        cluster.go:125: + local arch version image_url bzip2cat\n        cluster.go:125: ++ source /usr/share/flatcar/release\n        cluster.go:125: +++ FLATCAR_RELEASE_VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: +++ FLATCAR_RELEASE_BOARD=amd64-usr\n        cluster.go:125: +++ FLATCAR_RELEASE_APPID='{e96281a6-d1af-4bde-9a0a-97b76e56dc57}'\n        cluster.go:125: ++ echo amd64\n        cluster.go:125: + arch=amd64\n        cluster.go:125: ++ source /usr/share/flatcar/release\n        cluster.go:125: +++ FLATCAR_RELEASE_VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: +++ FLATCAR_RELEASE_BOARD=amd64-usr\n        cluster.go:125: +++ FLATCAR_RELEASE_APPID='{e96281a6-d1af-4bde-9a0a-97b76e56dc57}'\n        cluster.go:125: ++ echo 4459.2.4+nightly-20260423-2100\n        cluster.go:125: + version=4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ process_template http://bincache.flatcar-linux.net/images/@ARCH@/@VERSION@/flatcar_developer_container.bin.bz2 amd64 4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ local template=http://bincache.flatcar-linux.net/images/@ARCH@/@VERSION@/flatcar_developer_container.bin.bz2\n        cluster.go:125: ++ shift\n        cluster.go:125: ++ local arch=amd64\n        cluster.go:125: ++ shift\n        cluster.go:125: ++ local version=4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ shift\n        cluster.go:125: ++ local result=http://bincache.flatcar-linux.net/images/@ARCH@/@VERSION@/flatcar_developer_container.bin.bz2\n        cluster.go:125: ++ result=http://bincache.flatcar-linux.net/images/amd64/@VERSION@/flatcar_developer_container.bin.bz2\n        cluster.go:125: ++ result=http://bincache.flatcar-linux.net/images/amd64/4459.2.4+nightly-20260423-2100/flatcar_developer_container.bin.bz2\n        cluster.go:125: ++ echo http://bincache.flatcar-linux.net/images/amd64/4459.2.4+nightly-20260423-2100/flatcar_developer_container.bin.bz2\n        cluster.go:125: + image_url=http://bincache.flatcar-linux.net/images/amd64/4459.2.4+nightly-20260423-2100/flatcar_developer_container.bin.bz2\n        cluster.go:125: + echo 'Fetching developer container from http://bincache.flatcar-linux.net/images/amd64/4459.2.4+nightly-20260423-2100/flatcar_developer_container.bin.bz2'\n        cluster.go:125: + curl --fail --silent --show-error --location --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 --remote-name http://bincache.flatcar-linux.net/images/amd64/4459.2.4+nightly-20260423-2100/flatcar_developer_container.bin.bz2\n        cluster.go:125: + bzip2cat=bzcat\n        cluster.go:125: + command -v lbzcat\n        cluster.go:125: + bzip2cat=lbzcat\n        cluster.go:125: + cp --sparse=always /dev/fd/63 flatcar_developer_container.bin\n        cluster.go:125: ++ lbzcat flatcar_developer_container.bin.bz2\n        cluster.go:125: + workdir=/home/core/dev-container-workdir-7074\n        cluster.go:125: + mkdir -p /home/core/dev-container-workdir-7074\n        cluster.go:125: + sudo systemd-nspawn --console=pipe --bind-ro=/home/core/dev-container-script.sh --bind=/home/core/dev-container-workdir-7074:/work --image=flatcar_developer_container.bin --machine=flatcar-developer-container /bin/bash /home/core/dev-container-script.sh\n        cluster.go:125: + set -euo pipefail\n        cluster.go:125: ++ source /etc/os-release\n        cluster.go:125: +++ NAME='Flatcar Container Linux by Kinvolk'\n        cluster.go:125: +++ ID=flatcar\n        cluster.go:125: +++ ID_LIKE=coreos\n        cluster.go:125: +++ VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: +++ VERSION_ID=4459.2.4\n        cluster.go:125: +++ BUILD_ID=nightly-20260423-2100\n        cluster.go:125: +++ SYSEXT_LEVEL=1.0\n        cluster.go:125: +++ PRETTY_NAME='Flatcar Container Linux by Kinvolk 4459.2.4+nightly-20260423-2100 (Oklo)'\n        cluster.go:125: +++ ANSI_COLOR='38;5;75'\n        cluster.go:125: +++ HOME_URL=https://flatcar.org/\n        cluster.go:125: +++ BUG_REPORT_URL=https://issues.flatcar.org\n        cluster.go:125: +++ FLATCAR_BOARD=amd64-usr\n        cluster.go:125: +++ CPE_NAME='cpe:2.3:o:flatcar-linux:flatcar_linux:4459.2.4+nightly-20260423-2100:*:*:*:*:*:*:*'\n        cluster.go:125: ++ echo 4459.2.4+nightly-20260423-2100\n        cluster.go:125: + version=4459.2.4+nightly-20260423-2100\n        cluster.go:125: ++ source /etc/os-release\n        cluster.go:125: +++ NAME='Flatcar Container Linux by Kinvolk'\n        cluster.go:125: +++ ID=flatcar\n        cluster.go:125: +++ ID_LIKE=coreos\n        cluster.go:125: +++ VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: +++ VERSION_ID=4459.2.4\n        cluster.go:125: +++ BUILD_ID=nightly-20260423-2100\n        cluster.go:125: +++ SYSEXT_LEVEL=1.0\n        cluster.go:125: +++ PRETTY_NAME='Flatcar Container Linux by Kinvolk 4459.2.4+nightly-20260423-2100 (Oklo)'\n        cluster.go:125: +++ ANSI_COLOR='38;5;75'\n        cluster.go:125: +++ HOME_URL=https://flatcar.org/\n        cluster.go:125: +++ BUG_REPORT_URL=https://issues.flatcar.org\n        cluster.go:125: +++ FLATCAR_BOARD=amd64-usr\n        cluster.go:125: +++ CPE_NAME='cpe:2.3:o:flatcar-linux:flatcar_linux:4459.2.4+nightly-20260423-2100:*:*:*:*:*:*:*'\n        cluster.go:125: ++ echo 4459.2.4\n        cluster.go:125: + version_id=4459.2.4\n        cluster.go:125: ++ source /usr/share/flatcar/release\n        cluster.go:125: +++ FLATCAR_RELEASE_VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: +++ FLATCAR_RELEASE_BOARD=amd64-usr\n        cluster.go:125: +++ FLATCAR_RELEASE_APPID='{e96281a6-d1af-4bde-9a0a-97b76e56dc57}'\n        cluster.go:125: ++ echo amd64-usr\n        cluster.go:125: + board=amd64-usr\n        cluster.go:125: + mkdir -p /work/sysext_rootfs/usr/share/flatcar-sysext-kola-test\n        cluster.go:125: + echo 4459.2.4\n        cluster.go:125: + mkdir -p /work/sysext_rootfs/usr/lib/extension-release.d\n        cluster.go:125: + sysext_arch=x86-64\n        cluster.go:125: + [[ amd64-usr = \\a\\r\\m\\6\\4\\-\\u\\s\\r ]]\n        cluster.go:125: + metadata=('ID=flatcar' \"VERSION_ID=${version_id}\" \"ARCHITECTURE=${sysext_arch}\")\n        cluster.go:125: + metadata_file=/work/sysext_rootfs/usr/lib/extension-release.d/extension-release.oem-test\n        cluster.go:125: + printf '%s\\n' ID=flatcar VERSION_ID=4459.2.4 ARCHITECTURE=x86-64\n        cluster.go:125: + mksquashfs /work/sysext_rootfs /work/oem-test-4459.2.4+nightly-20260423-2100.raw -all-root\n        cluster.go:125: ++ source /etc/os-release\n        cluster.go:125: +++ NAME='Flatcar Container Linux by Kinvolk'\n        cluster.go:125: +++ ID=flatcar\n        cluster.go:125: +++ ID_LIKE=coreos\n        cluster.go:125: +++ VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: +++ VERSION_ID=4459.2.4\n        cluster.go:125: +++ BUILD_ID=nightly-20260423-2100\n        cluster.go:125: +++ SYSEXT_LEVEL=1.0\n        cluster.go:125: +++ PRETTY_NAME='Flatcar Container Linux by Kinvolk 4459.2.4+nightly-20260423-2100 (Oklo)'\n        cluster.go:125: +++ ANSI_COLOR='38;5;75'\n        cluster.go:125: +++ HOME_URL=https://flatcar.org/\n        cluster.go:125: +++ BUG_REPORT_URL=https://issues.flatcar.org\n        cluster.go:125: +++ FLATCAR_BOARD=amd64-usr\n        cluster.go:125: +++ CPE_NAME='cpe:2.3:o:flatcar-linux:flatcar_linux:4459.2.4+nightly-20260423-2100:*:*:*:*:*:*:*'\n        cluster.go:125: ++ echo 4459.2.4+nightly-20260423-2100\n        cluster.go:125: + version=4459.2.4+nightly-20260423-2100\n        cluster.go:125: + sysext_file=/home/core/dev-container-workdir-7074/oem-test-4459.2.4+nightly-20260423-2100.raw\n        cluster.go:125: + [[ ! -e /home/core/dev-container-workdir-7074/oem-test-4459.2.4+nightly-20260423-2100.raw ]]\n        cluster.go:125: + [[ ! -e /oem/oem-release ]]\n        cluster.go:125: + printf '%s\\n' ID=test VERSION_ID=1.0.0 'NAME=testing stuff'\n        cluster.go:125: + sudo tee /oem/oem-release\n        cluster.go:125: + sudo mkdir -p /oem/sysext\n        cluster.go:125: + sudo mv /home/core/dev-container-workdir-7074/oem-test-4459.2.4+nightly-20260423-2100.raw /oem/sysext\n        cluster.go:125: + sudo touch /oem/sysext/active-oem-test\n        cluster.go:125: + sudo flatcar-reset --keep-machine-id --keep-paths /var/log\n        cluster.go:125: + set -euo pipefail\n        cluster.go:125: ++ systemd-sysext list --json=pretty\n        cluster.go:125: + list_out='[\n        cluster.go:125: \t{\n        cluster.go:125: \t\t\"name\" : \"containerd-flatcar\",\n        cluster.go:125: \t\t\"type\" : \"raw\",\n        cluster.go:125: \t\t\"path\" : \"/etc/extensions/containerd-flatcar.raw\",\n        cluster.go:125: \t\t\"time\" : 1776985283770195\n        cluster.go:125: \t},\n        cluster.go:125: \t{\n        cluster.go:125: \t\t\"name\" : \"docker-flatcar\",\n        cluster.go:125: \t\t\"type\" : \"raw\",\n        cluster.go:125: \t\t\"path\" : \"/etc/extensions/docker-flatcar.raw\",\n        cluster.go:125: \t\t\"time\" : 1776985308901015\n        cluster.go:125: \t},\n        cluster.go:125: \t{\n        cluster.go:125: \t\t\"name\" : \"oem-test\",\n        cluster.go:125: \t\t\"type\" : \"raw\",\n        cluster.go:125: \t\t\"path\" : \"/etc/extensions/oem-test.raw\",\n        cluster.go:125: \t\t\"time\" : 1776992060563374\n        cluster.go:125: \t}\n        cluster.go:125: ]'\n        cluster.go:125: ++ systemd-sysext status --json=pretty\n        cluster.go:125: + status_out='[\n        cluster.go:125: \t{\n        cluster.go:125: \t\t\"hierarchy\" : \"/opt\",\n        cluster.go:125: \t\t\"extensions\" : \"none\",\n        cluster.go:125: \t\t\"since\" : null\n        cluster.go:125: \t},\n        cluster.go:125: \t{\n        cluster.go:125: \t\t\"hierarchy\" : \"/usr\",\n        cluster.go:125: \t\t\"extensions\" : [\n        cluster.go:125: \t\t\t\"containerd-flatcar\",\n        cluster.go:125: \t\t\t\"docker-flatcar\",\n        cluster.go:125: \t\t\t\"oem-test\"\n        cluster.go:125: \t\t],\n        cluster.go:125: \t\t\"since\" : 1776992073492000\n        cluster.go:125: \t}\n        cluster.go:125: ]'\n        cluster.go:125: + printf 'sysext list:\\n%s\\nsysext status:\\n%s\\n' '[\n        cluster.go:125: \t{\n        cluster.go:125: \t\t\"name\" : \"containerd-flatcar\",\n        cluster.go:125: \t\t\"type\" : \"raw\",\n        cluster.go:125: \t\t\"path\" : \"/etc/extensions/containerd-flatcar.raw\",\n        cluster.go:125: \t\t\"time\" : 1776985283770195\n        cluster.go:125: \t},\n        cluster.go:125: \t{\n        cluster.go:125: \t\t\"name\" : \"docker-flatcar\",\n        cluster.go:125: \t\t\"type\" : \"raw\",\n        cluster.go:125: \t\t\"path\" : \"/etc/extensions/docker-flatcar.raw\",\n        cluster.go:125: \t\t\"time\" : 1776985308901015\n        cluster.go:125: \t},\n        cluster.go:125: \t{\n        cluster.go:125: \t\t\"name\" : \"oem-test\",\n        cluster.go:125: \t\t\"type\" : \"raw\",\n        cluster.go:125: \t\t\"path\" : \"/etc/extensions/oem-test.raw\",\n        cluster.go:125: \t\t\"time\" : 1776992060563374\n        cluster.go:125: \t}\n        cluster.go:125: ]' '[\n        cluster.go:125: \t{\n        cluster.go:125: \t\t\"hierarchy\" : \"/opt\",\n        cluster.go:125: \t\t\"extensions\" : \"none\",\n        cluster.go:125: \t\t\"since\" : null\n        cluster.go:125: \t},\n        cluster.go:125: \t{\n        cluster.go:125: \t\t\"hierarchy\" : \"/usr\",\n        cluster.go:125: \t\t\"extensions\" : [\n        cluster.go:125: \t\t\t\"containerd-flatcar\",\n        cluster.go:125: \t\t\t\"docker-flatcar\",\n        cluster.go:125: \t\t\t\"oem-test\"\n        cluster.go:125: \t\t],\n        cluster.go:125: \t\t\"since\" : 1776992073492000\n        cluster.go:125: \t}\n        cluster.go:125: ]'\n        cluster.go:125: ++ jq '.[] | select(.name == \"oem-test\")'\n        cluster.go:125: + list_oem_test='{\n        cluster.go:125:   \"name\": \"oem-test\",\n        cluster.go:125:   \"type\": \"raw\",\n        cluster.go:125:   \"path\": \"/etc/extensions/oem-test.raw\",\n        cluster.go:125:   \"time\": 1776992060563374\n        cluster.go:125: }'\n        cluster.go:125: + [[ -z {\n        cluster.go:125:   \"name\": \"oem-test\",\n        cluster.go:125:   \"type\": \"raw\",\n        cluster.go:125:   \"path\": \"/etc/extensions/oem-test.raw\",\n        cluster.go:125:   \"time\": 1776992060563374\n        cluster.go:125: } ]]\n        cluster.go:125: ++ jq --raw-output .type\n        cluster.go:125: + oem_test_type=raw\n        cluster.go:125: + [[ raw != \\r\\a\\w ]]\n        cluster.go:125: ++ jq --raw-output .path\n        cluster.go:125: + oem_test_path=/etc/extensions/oem-test.raw\n        cluster.go:125: ++ readlink -f /etc/extensions/oem-test.raw\n        cluster.go:125: + oem_test_real_path=/oem/sysext/oem-test-4459.2.4+nightly-20260423-2100.raw\n        cluster.go:125: + [[ /etc/extensions/oem-test.raw != \\/\\o\\e\\m\\/\\s\\y\\s\\e\\x\\t\\/\\o\\e\\m\\-\\t\\e\\s\\t\\-\\4\\4\\5\\9\\.\\2\\.\\4\\+\\n\\i\\g\\h\\t\\l\\y\\-\\2\\0\\2\\6\\0\\4\\2\\3\\-\\2\\1\\0\\0\\.\\r\\a\\w ]]\n        cluster.go:125: + [[ /etc/extensions/oem-test.raw != \\/\\e\\t\\c\\/\\e\\x\\t\\e\\n\\s\\i\\o\\n\\s\\/\\o\\e\\m\\-\\t\\e\\s\\t\\.\\r\\a\\w ]]\n        cluster.go:125: ++ jq '.[] | select(.hierarchy == \"/usr\")'\n        cluster.go:125: + status_usr='{\n        cluster.go:125:   \"hierarchy\": \"/usr\",\n        cluster.go:125:   \"extensions\": [\n        cluster.go:125:     \"containerd-flatcar\",\n        cluster.go:125:     \"docker-flatcar\",\n        cluster.go:125:     \"oem-test\"\n        cluster.go:125:   ],\n        cluster.go:125:   \"since\": 1776992073492000\n        cluster.go:125: }'\n        cluster.go:125: + [[ -z {\n        cluster.go:125:   \"hierarchy\": \"/usr\",\n        cluster.go:125:   \"extensions\": [\n        cluster.go:125:     \"containerd-flatcar\",\n        cluster.go:125:     \"docker-flatcar\",\n        cluster.go:125:     \"oem-test\"\n        cluster.go:125:   ],\n        cluster.go:125:   \"since\": 1776992073492000\n        cluster.go:125: } ]]\n        cluster.go:125: ++ jq --raw-output '.extensions[] | select(. == \"oem-test\")'\n        cluster.go:125: + status_usr_extensions_oem_test=oem-test\n        cluster.go:125: + [[ oem-test != \\o\\e\\m\\-\\t\\e\\s\\t ]]\n        cluster.go:125: + f=/usr/share/flatcar-sysext-kola-test/file\n        cluster.go:125: + [[ ! -e /usr/share/flatcar-sysext-kola-test/file ]]\n        cluster.go:125: ++ cat /usr/share/flatcar-sysext-kola-test/file\n        cluster.go:125: + got=4459.2.4\n        cluster.go:125: ++ source /etc/os-release\n        cluster.go:125: +++ NAME='Flatcar Container Linux by Kinvolk'\n        cluster.go:125: +++ ID=flatcar\n        cluster.go:125: +++ ID_LIKE=coreos\n        cluster.go:125: +++ VERSION=4459.2.4+nightly-20260423-2100\n        cluster.go:125: +++ VERSION_ID=4459.2.4\n        cluster.go:125: +++ BUILD_ID=nightly-20260423-2100\n        cluster.go:125: +++ SYSEXT_LEVEL=1.0\n        cluster.go:125: +++ PRETTY_NAME='Flatcar Container Linux by Kinvolk 4459.2.4+nightly-20260423-2100 (Oklo)'\n        cluster.go:125: +++ ANSI_COLOR='38;5;75'\n        cluster.go:125: +++ HOME_URL=https://flatcar.org/\n        cluster.go:125: +++ BUG_REPORT_URL=https://issues.flatcar.org\n        cluster.go:125: +++ FLATCAR_BOARD=amd64-usr\n        cluster.go:125: +++ CPE_NAME='cpe:2.3:o:flatcar-linux:flatcar_linux:4459.2.4+nightly-20260423-2100:*:*:*:*:*:*:*'\n        cluster.go:125: ++ echo 4459.2.4\n        cluster.go:125: + ex=4459.2.4\n        cluster.go:125: + [[ 4459.2.4 != \\4\\4\\5\\9\\.\\2\\.\\4 ]]\n"},{"name":"docker.base/networks-reliably","result":"PASS","duration":145754461852,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B 0.1s done\n        cluster.go:125: #1 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context:\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 5.26MB 0.1s done\n        cluster.go:125: #3 DONE 0.2s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.1s done\n        cluster.go:125: #5 writing image sha256:9ceedf13215d3028e7f95f3c5fe777e871897eb7ccb1ff041728426dce02a598 done\n        cluster.go:125: #5 naming to docker.io/library/ping done\n        cluster.go:125: #5 DONE 0.1s\n"},{"name":"docker.base/user-no-caps","result":"PASS","duration":1406126632,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 6.27MB 0.0s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.1s done\n        cluster.go:125: #5 writing image sha256:2ac920075d9ec411488329ef740e5f7eb8b717204e1798c0852f78699e1fcb7d done\n        cluster.go:125: #5 naming to docker.io/library/captest done\n        cluster.go:125: #5 DONE 0.1s\n"},{"name":"docker.base/ownership","result":"PASS","duration":9714563135,"output":"        cluster.go:125: Unable to find image 'ghcr.io/flatcar/nginx:latest' locally\n        cluster.go:125: latest: Pulling from flatcar/nginx\n        cluster.go:125: 9d11882d4b58: Pulling fs layer\n        cluster.go:125: b44d445f805d: Pulling fs layer\n        cluster.go:125: 8bef7a9ebe88: Pulling fs layer\n        cluster.go:125: a0f1c91fa11d: Pulling fs layer\n        cluster.go:125: 2044bb7170df: Pulling fs layer\n        cluster.go:125: 95d77c54a9aa: Pulling fs layer\n        cluster.go:125: c13dd6bf8c10: Pulling fs layer\n        cluster.go:125: 95d77c54a9aa: Waiting\n        cluster.go:125: c13dd6bf8c10: Waiting\n        cluster.go:125: a0f1c91fa11d: Waiting\n        cluster.go:125: 2044bb7170df: Waiting\n        cluster.go:125: 8bef7a9ebe88: Verifying Checksum\n        cluster.go:125: 8bef7a9ebe88: Download complete\n        cluster.go:125: a0f1c91fa11d: Verifying Checksum\n        cluster.go:125: a0f1c91fa11d: Download complete\n        cluster.go:125: 2044bb7170df: Verifying Checksum\n        cluster.go:125: 2044bb7170df: Download complete\n        cluster.go:125: b44d445f805d: Verifying Checksum\n        cluster.go:125: b44d445f805d: Download complete\n        cluster.go:125: 9d11882d4b58: Verifying Checksum\n        cluster.go:125: 9d11882d4b58: Download complete\n        cluster.go:125: 95d77c54a9aa: Verifying Checksum\n        cluster.go:125: 95d77c54a9aa: Download complete\n        cluster.go:125: c13dd6bf8c10: Verifying Checksum\n        cluster.go:125: c13dd6bf8c10: Download complete\n        cluster.go:125: 9d11882d4b58: Pull complete\n        cluster.go:125: b44d445f805d: Pull complete\n        cluster.go:125: 8bef7a9ebe88: Pull complete\n        cluster.go:125: a0f1c91fa11d: Pull complete\n        cluster.go:125: 2044bb7170df: Pull complete\n        cluster.go:125: 95d77c54a9aa: Pull complete\n        cluster.go:125: c13dd6bf8c10: Pull complete\n        cluster.go:125: Digest: sha256:6f170eb5f8ea893d94fe3c05cef69270f16614a99d3d922ce8a320efc43b8591\n        cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/nginx:latest\n"},{"name":"docker.base","result":"PASS","duration":211862686637,"output":"    --- PASS: docker.base/docker-info (2.76s)\n    --- PASS: docker.base/resources (5.24s)\n            cluster.go:125: #0 building with \"default\" instance using docker driver\n            cluster.go:125: \n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile: 108B done\n            cluster.go:125: #1 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 3.42MB 0.1s done\n            cluster.go:125: #3 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers 0.0s done\n            cluster.go:125: #5 writing image sha256:633aa3ec1cc514d2456204cde1b5da05fc056c6d08bc780c7625087d3003284f done\n            cluster.go:125: #5 naming to docker.io/library/sleep done\n            cluster.go:125: #5 DONE 0.1s\n            cluster.go:125: WARNING: Your kernel does not support OomKillDisable. OomKillDisable discarded.\n            cluster.go:125: WARNING: Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.\n    --- PASS: docker.base/networks-reliably (145.75s)\n            cluster.go:125: #0 building with \"default\" instance using docker driver\n            cluster.go:125: \n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile: 108B 0.1s done\n            cluster.go:125: #1 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context:\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 5.26MB 0.1s done\n            cluster.go:125: #3 DONE 0.2s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers 0.1s done\n            cluster.go:125: #5 writing image sha256:9ceedf13215d3028e7f95f3c5fe777e871897eb7ccb1ff041728426dce02a598 done\n            cluster.go:125: #5 naming to docker.io/library/ping done\n            cluster.go:125: #5 DONE 0.1s\n    --- PASS: docker.base/user-no-caps (1.41s)\n            cluster.go:125: #0 building with \"default\" instance using docker driver\n            cluster.go:125: \n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile: 108B done\n            cluster.go:125: #1 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 6.27MB 0.0s done\n            cluster.go:125: #3 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers 0.1s done\n            cluster.go:125: #5 writing image sha256:2ac920075d9ec411488329ef740e5f7eb8b717204e1798c0852f78699e1fcb7d done\n            cluster.go:125: #5 naming to docker.io/library/captest done\n            cluster.go:125: #5 DONE 0.1s\n    --- PASS: docker.base/ownership (9.71s)\n            cluster.go:125: Unable to find image 'ghcr.io/flatcar/nginx:latest' locally\n            cluster.go:125: latest: Pulling from flatcar/nginx\n            cluster.go:125: 9d11882d4b58: Pulling fs layer\n            cluster.go:125: b44d445f805d: Pulling fs layer\n            cluster.go:125: 8bef7a9ebe88: Pulling fs layer\n            cluster.go:125: a0f1c91fa11d: Pulling fs layer\n            cluster.go:125: 2044bb7170df: Pulling fs layer\n            cluster.go:125: 95d77c54a9aa: Pulling fs layer\n            cluster.go:125: c13dd6bf8c10: Pulling fs layer\n            cluster.go:125: 95d77c54a9aa: Waiting\n            cluster.go:125: c13dd6bf8c10: Waiting\n            cluster.go:125: a0f1c91fa11d: Waiting\n            cluster.go:125: 2044bb7170df: Waiting\n            cluster.go:125: 8bef7a9ebe88: Verifying Checksum\n            cluster.go:125: 8bef7a9ebe88: Download complete\n            cluster.go:125: a0f1c91fa11d: Verifying Checksum\n            cluster.go:125: a0f1c91fa11d: Download complete\n            cluster.go:125: 2044bb7170df: Verifying Checksum\n            cluster.go:125: 2044bb7170df: Download complete\n            cluster.go:125: b44d445f805d: Verifying Checksum\n            cluster.go:125: b44d445f805d: Download complete\n            cluster.go:125: 9d11882d4b58: Verifying Checksum\n            cluster.go:125: 9d11882d4b58: Download complete\n            cluster.go:125: 95d77c54a9aa: Verifying Checksum\n            cluster.go:125: 95d77c54a9aa: Download complete\n            cluster.go:125: c13dd6bf8c10: Verifying Checksum\n            cluster.go:125: c13dd6bf8c10: Download complete\n            cluster.go:125: 9d11882d4b58: Pull complete\n            cluster.go:125: b44d445f805d: Pull complete\n            cluster.go:125: 8bef7a9ebe88: Pull complete\n            cluster.go:125: a0f1c91fa11d: Pull complete\n            cluster.go:125: 2044bb7170df: Pull complete\n            cluster.go:125: 95d77c54a9aa: Pull complete\n            cluster.go:125: c13dd6bf8c10: Pull complete\n            cluster.go:125: Digest: sha256:6f170eb5f8ea893d94fe3c05cef69270f16614a99d3d922ce8a320efc43b8591\n            cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/nginx:latest\n"},{"name":"coreos.locksmith.tls","result":"PASS","duration":61559077051,"output":""},{"name":"systemd.sysusers.gshadow","result":"PASS","duration":46816509677,"output":""},{"name":"cl.ignition.symlink","result":"PASS","duration":28691300197,"output":"        cluster.go:152: + readlink /etc/localtime\n"},{"name":"cl.filesystem/deadlinks","result":"PASS","duration":1628371493,"output":""},{"name":"cl.filesystem/suid","result":"PASS","duration":240835123,"output":""},{"name":"cl.filesystem/sgid","result":"PASS","duration":333563949,"output":""},{"name":"cl.filesystem/writablefiles","result":"PASS","duration":285666232,"output":""},{"name":"cl.filesystem/writabledirs","result":"PASS","duration":295673566,"output":""},{"name":"cl.filesystem/stickydirs","result":"PASS","duration":212531480,"output":""},{"name":"cl.filesystem/blacklist","result":"PASS","duration":235807852,"output":""},{"name":"cl.filesystem","result":"PASS","duration":33926765347,"output":"    --- PASS: cl.filesystem/deadlinks (1.63s)\n    --- PASS: cl.filesystem/suid (0.24s)\n    --- PASS: cl.filesystem/sgid (0.33s)\n    --- PASS: cl.filesystem/writablefiles (0.29s)\n    --- PASS: cl.filesystem/writabledirs (0.30s)\n    --- PASS: cl.filesystem/stickydirs (0.21s)\n    --- PASS: cl.filesystem/blacklist (0.24s)\n"},{"name":"kubeadm.v1.34.4.flannel.base/node_readiness","result":"PASS","duration":11409916012,"output":""},{"name":"kubeadm.v1.34.4.flannel.base/nginx_deployment","result":"PASS","duration":16568910104,"output":""},{"name":"kubeadm.v1.34.4.flannel.base/NFS_deployment","result":"PASS","duration":22823577280,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.34.4.flannel.base","result":"PASS","duration":198558350072,"output":"        cluster.go:125: I0424 00:55:39.101288    2004 version.go:260] remote version is much newer: v1.36.0; falling back to: stable-1.34\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.5-0\n        cluster.go:125: I0424 00:55:53.347538    2224 version.go:260] remote version is much newer: v1.36.0; falling back to: stable-1.34\n        cluster.go:125: [init] Using Kubernetes version: v1.34.7\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local localhost] and IPs [10.96.0.1 10.0.0.128]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.505323984s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.128:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 1.535462004s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 2.331633992s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 4.002031324s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: t5zlwi.65u4etogv7tc6iro\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.128:6443 --token t5zlwi.65u4etogv7tc6iro \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:87bef8e69ca7b0d15d5f889ab080e47dd3eb7212b079418f34a88480125fc2ac \n        cluster.go:125: namespace/kube-flannel created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: serviceaccount/flannel created\n        cluster.go:125: configmap/kube-flannel-cfg created\n        cluster.go:125: daemonset.apps/kube-flannel-ds created\n        cluster.go:125: W0424 00:56:50.487576    1770 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.34.4.flannel.base/node_readiness (11.41s)\n    --- PASS: kubeadm.v1.34.4.flannel.base/nginx_deployment (16.57s)\n    --- PASS: kubeadm.v1.34.4.flannel.base/NFS_deployment (22.82s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"sysext.disable-containerd","result":"PASS","duration":33548527816,"output":""},{"name":"cl.flannel.udp","result":"PASS","duration":138854977810,"output":"        cluster.go:125: Timeout occurred while waiting for network connectivity.\n        flannel.go:121: ping from 1baa4539-c927-475e-a86e-ccc7429dce95(10.254.17.0) to 5a7bfba2-a3ee-469f-b50e-db467d624d95(10.254.33.1)\n"},{"name":"coreos.tls.fetch-urls","result":"PASS","duration":35549733159,"output":""},{"name":"cl.etcd-member.etcdctlv3","result":"PASS","duration":28728022133,"output":"        cluster.go:125: {\"level\":\"info\",\"ts\":1776992337.4397361,\"caller\":\"snapshot/v3_snapshot.go:68\",\"msg\":\"created temporary db file\",\"path\":\"/tmp/tmp.XA7M6M1G6V/snapshot.db.part\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":1776992337.442031,\"logger\":\"client\",\"caller\":\"v3/maintenance.go:211\",\"msg\":\"opened snapshot stream; downloading\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":1776992337.4420853,\"caller\":\"snapshot/v3_snapshot.go:76\",\"msg\":\"fetching snapshot\",\"endpoint\":\"127.0.0.1:2379\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":1776992337.445496,\"logger\":\"client\",\"caller\":\"v3/maintenance.go:219\",\"msg\":\"completed snapshot read; closing\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":1776992337.4462395,\"caller\":\"snapshot/v3_snapshot.go:91\",\"msg\":\"fetched snapshot\",\"endpoint\":\"127.0.0.1:2379\",\"size\":\"20 kB\",\"took\":\"now\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":1776992337.4463086,\"caller\":\"snapshot/v3_snapshot.go:100\",\"msg\":\"saved\",\"path\":\"/tmp/tmp.XA7M6M1G6V/snapshot.db\"}\n        cluster.go:125: Deprecated: Use `etcdutl snapshot status` instead.\n"},{"name":"cl.ignition.v2.xfsroot","result":"PASS","duration":25505267271,"output":""},{"name":"sysext.simple","result":"PASS","duration":27434942963,"output":""},{"name":"packages/sys-cluster/ipvsadm","result":"PASS","duration":551076893,"output":""},{"name":"cl.sysext.boot","result":"PASS","duration":112240238446,"output":"        cluster.go:125: + sudo systemctl mask --now ensure-sysext\n        cluster.go:125: Created symlink '/etc/systemd/system/ensure-sysext.service' → '/dev/null'.\n        cluster.go:125: + sudo systemctl mask --now systemd-sysext\n        cluster.go:125: Created symlink '/etc/systemd/system/systemd-sysext.service' → '/dev/null'.\n        cluster.go:125: + sudo mkdir -p /etc/flatcar/sysext /etc/flatcar/oem-sysext /oem/sysext /etc/extensions\n        cluster.go:125: + echo ID=test\n        cluster.go:125: + sudo tee /oem/oem-release\n        cluster.go:125: + echo myext\n        cluster.go:125: + sudo tee /etc/flatcar/enabled-sysext.conf\n        cluster.go:125: + sudo touch /oem/sysext/active-oem-test /etc/flatcar/oem-sysext/oem-test-4459.2.4+nightly-20260423-2100.raw /etc/flatcar/oem-sysext/oem-test-1.2.3.raw /etc/flatcar/sysext/flatcar-myext-4459.2.4+nightly-20260423-2100.raw /etc/flatcar/sysext/flatcar-myext-1.2.3.raw\n        cluster.go:125: + sudo ln -fs /etc/flatcar/oem-sysext/oem-test-1.2.3.raw /etc/extensions/oem-test.raw\n        cluster.go:125: + sudo ln -fs /etc/flatcar/sysext/flatcar-myext-1.2.3.raw /etc/extensions/flatcar-myext.raw\n        cluster.go:125: ++ readlink -f /etc/extensions/oem-test.raw\n        cluster.go:125: + '[' /oem/sysext/oem-test-4459.2.4+nightly-20260423-2100.raw = /oem/sysext/oem-test-4459.2.4+nightly-20260423-2100.raw ']'\n        cluster.go:125: ++ readlink -f /etc/extensions/flatcar-myext.raw\n        cluster.go:125: + '[' /etc/flatcar/sysext/flatcar-myext-4459.2.4+nightly-20260423-2100.raw = /etc/flatcar/sysext/flatcar-myext-4459.2.4+nightly-20260423-2100.raw ']'\n        cluster.go:125: + sudo mv /oem/sysext/oem-test-4459.2.4+nightly-20260423-2100.raw /etc/flatcar/oem-sysext/\n        cluster.go:125: + sudo mv /etc/flatcar/oem-sysext/oem-test-1.2.3.raw /oem/sysext/\n        cluster.go:125: + sudo ln -fs /oem/sysext/oem-test-1.2.3.raw /etc/extensions/oem-test.raw\n        cluster.go:125: + sudo ln -fs /etc/flatcar/sysext/flatcar-myext-1.2.3.raw /etc/extensions/flatcar-myext.raw\n        cluster.go:125: ++ readlink -f /etc/extensions/oem-test.raw\n        cluster.go:125: + '[' /oem/sysext/oem-test-4459.2.4+nightly-20260423-2100.raw = /oem/sysext/oem-test-4459.2.4+nightly-20260423-2100.raw ']'\n        cluster.go:125: ++ readlink -f /etc/extensions/flatcar-myext.raw\n        cluster.go:125: + '[' /etc/flatcar/sysext/flatcar-myext-4459.2.4+nightly-20260423-2100.raw = /etc/flatcar/sysext/flatcar-myext-4459.2.4+nightly-20260423-2100.raw ']'\n        cluster.go:125: + '[' -e /etc/flatcar/oem-sysext/oem-test-1.2.3.raw ']'\n        cluster.go:125: ++ readlink -f /etc/extensions/oem-test.raw\n        cluster.go:125: + '[' /oem/sysext/oem-test-4459.2.4+nightly-20260423-2100.raw = /oem/sysext/oem-test-4459.2.4+nightly-20260423-2100.raw ']'\n        cluster.go:125: ++ readlink -f /etc/extensions/flatcar-myext.raw\n        cluster.go:125: + '[' /etc/flatcar/sysext/flatcar-myext-4459.2.4+nightly-20260423-2100.raw = /etc/flatcar/sysext/flatcar-myext-4459.2.4+nightly-20260423-2100.raw ']'\n"},{"name":"packages/sys-block/open-iscsi","result":"PASS","duration":41898301601,"output":"        cluster.go:125: Unable to find image 'ghcr.io/flatcar/targetcli-fb:latest' locally\n        cluster.go:125: latest: Pulling from flatcar/targetcli-fb\n        cluster.go:125: a2318d6c47ec: Pulling fs layer\n        cluster.go:125: 3d3086a1439f: Pulling fs layer\n        cluster.go:125: a2318d6c47ec: Verifying Checksum\n        cluster.go:125: a2318d6c47ec: Download complete\n        cluster.go:125: a2318d6c47ec: Pull complete\n        cluster.go:125: 3d3086a1439f: Download complete\n        cluster.go:125: 3d3086a1439f: Pull complete\n        cluster.go:125: Digest: sha256:b6cd65db981974e8b74938617218dd023775b969f9a059ced21e6ce6fa4763c1\n        cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/targetcli-fb:latest\n        cluster.go:125: mke2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: Created symlink '/etc/systemd/system/remote-fs.target.wants/iscsi.service' → '/usr/lib/systemd/system/iscsi.service'.\n"},{"name":"packages","result":"PASS","duration":70029168735,"output":"    --- PASS: packages/sys-cluster/ipvsadm (0.55s)\n    --- PASS: packages/sys-block/open-iscsi (41.90s)\n            cluster.go:125: Unable to find image 'ghcr.io/flatcar/targetcli-fb:latest' locally\n            cluster.go:125: latest: Pulling from flatcar/targetcli-fb\n            cluster.go:125: a2318d6c47ec: Pulling fs layer\n            cluster.go:125: 3d3086a1439f: Pulling fs layer\n            cluster.go:125: a2318d6c47ec: Verifying Checksum\n            cluster.go:125: a2318d6c47ec: Download complete\n            cluster.go:125: a2318d6c47ec: Pull complete\n            cluster.go:125: 3d3086a1439f: Download complete\n            cluster.go:125: 3d3086a1439f: Pull complete\n            cluster.go:125: Digest: sha256:b6cd65db981974e8b74938617218dd023775b969f9a059ced21e6ce6fa4763c1\n            cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/targetcli-fb:latest\n            cluster.go:125: mke2fs 1.47.3 (8-Jul-2025)\n            cluster.go:125: Created symlink '/etc/systemd/system/remote-fs.target.wants/iscsi.service' → '/usr/lib/systemd/system/iscsi.service'.\n"},{"name":"cl.tpm.root-cryptenroll-pcr-withupdate","result":"PASS","duration":268348372661,"output":"        cluster.go:125: New TPM2 token enrolled as key slot 1.\n        cluster.go:125: Wiped slot 2.\n"},{"name":"linux.ntp","result":"PASS","duration":27893681840,"output":""},{"name":"cl.basic/ReadOnly","result":"PASS","duration":213439329,"output":""},{"name":"cl.basic/MachineID","result":"PASS","duration":253930159,"output":""},{"name":"cl.basic/CloudConfig","result":"PASS","duration":623000537,"output":""},{"name":"cl.basic/PortSSH","result":"PASS","duration":250290908,"output":""},{"name":"cl.basic/DbusPerms","result":"PASS","duration":422315245,"output":""},{"name":"cl.basic/Symlink","result":"PASS","duration":180947300,"output":""},{"name":"cl.basic/RandomUUID","result":"PASS","duration":285125180,"output":""},{"name":"cl.basic/Useradd","result":"PASS","duration":352921467,"output":""},{"name":"cl.basic/Microcode","result":"PASS","duration":247783532,"output":""},{"name":"cl.basic/Script","result":"PASS","duration":297091504,"output":""},{"name":"cl.basic/SymlinkFlatcar","result":"PASS","duration":194230418,"output":""},{"name":"cl.basic/UpdateEngineKeys","result":"PASS","duration":192503305,"output":""},{"name":"cl.basic/ServicesActive","result":"PASS","duration":243483441,"output":""},{"name":"cl.basic","result":"PASS","duration":35022875805,"output":"    --- PASS: cl.basic/ReadOnly (0.21s)\n    --- PASS: cl.basic/MachineID (0.25s)\n    --- PASS: cl.basic/CloudConfig (0.62s)\n    --- PASS: cl.basic/PortSSH (0.25s)\n    --- PASS: cl.basic/DbusPerms (0.42s)\n    --- PASS: cl.basic/Symlink (0.18s)\n    --- PASS: cl.basic/RandomUUID (0.29s)\n    --- PASS: cl.basic/Useradd (0.35s)\n    --- PASS: cl.basic/Microcode (0.25s)\n    --- PASS: cl.basic/Script (0.30s)\n    --- PASS: cl.basic/SymlinkFlatcar (0.19s)\n    --- PASS: cl.basic/UpdateEngineKeys (0.19s)\n    --- PASS: cl.basic/ServicesActive (0.24s)\n"},{"name":"cl.tpm.eventlog","result":"PASS","duration":34540714690,"output":""},{"name":"kubeadm.v1.34.4.calico.base","result":"FAIL","duration":113945490693,"output":"        cluster.go:125: I0424 01:02:26.624990    2016 version.go:260] remote version is much newer: v1.36.0; falling back to: stable-1.34\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.5-0\n        cluster.go:125: I0424 01:02:41.546025    2253 version.go:260] remote version is much newer: v1.36.0; falling back to: stable-1.34\n        cluster.go:125: [init] Using Kubernetes version: v1.34.7\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local localhost] and IPs [10.96.0.1 10.0.0.150]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 501.271471ms\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.150:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 2.507099729s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 4.074954484s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 6.006995238s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node localhost as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: nzohlu.7yzcymymfwnfb0ao\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.150:6443 --token nzohlu.7yzcymymfwnfb0ao \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:1058a15528307cca04529b6854c7f81785f822afce27ceb6f325a79a12c7f380 \n        cluster.go:125: namespace/tigera-operator created\n        cluster.go:125: serviceaccount/tigera-operator created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: rolebinding.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: deployment.apps/tigera-operator created\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: error: .status.conditions accessor error: \u003cnil\u003e is of the type \u003cnil\u003e, expected []interface{}\n        kubeadm.go:197: unable to setup cluster: unable to run master script: Process exited with status 1\n"}],"result":"FAIL","platform":"qemu","version":"4459.2.4"}
