{"tests":[{"name":"cl.basic/MachineID","result":"PASS","duration":263169203,"output":""},{"name":"cl.basic/CloudConfig","result":"PASS","duration":612132733,"output":""},{"name":"cl.basic/Script","result":"PASS","duration":407584074,"output":""},{"name":"cl.basic/DbusPerms","result":"PASS","duration":303847461,"output":""},{"name":"cl.basic/UpdateEngineKeys","result":"PASS","duration":241779144,"output":""},{"name":"cl.basic/ReadOnly","result":"PASS","duration":229941419,"output":""},{"name":"cl.basic/RandomUUID","result":"PASS","duration":269220247,"output":""},{"name":"cl.basic/Microcode","result":"PASS","duration":248425741,"output":""},{"name":"cl.basic/PortSSH","result":"PASS","duration":251656027,"output":""},{"name":"cl.basic/Symlink","result":"PASS","duration":260401780,"output":""},{"name":"cl.basic/SymlinkFlatcar","result":"PASS","duration":246974711,"output":""},{"name":"cl.basic/ServicesActive","result":"PASS","duration":273385573,"output":""},{"name":"cl.basic/Useradd","result":"PASS","duration":343116266,"output":""},{"name":"cl.internet/DockerPing","result":"PASS","duration":5616683249,"output":""},{"name":"cl.internet/DockerEcho","result":"PASS","duration":516161690,"output":""},{"name":"cl.ignition.v2.btrfsroot","result":"PASS","duration":58368185421,"output":""},{"name":"coreos.ignition.resource.remote","result":"PASS","duration":59000990184,"output":""},{"name":"cl.internet/NTPDate","result":"PASS","duration":6964094453,"output":""},{"name":"cl.install.cloudinit","result":"PASS","duration":59856437955,"output":""},{"name":"cl.internet/UpdateEngine","result":"PASS","duration":250938651,"output":""},{"name":"coreos.ignition.ssh.key","result":"PASS","duration":61054214010,"output":""},{"name":"cl.basic","result":"PASS","duration":64720030373,"output":"    --- PASS: cl.basic/MachineID (0.26s)\n    --- PASS: cl.basic/CloudConfig (0.61s)\n    --- PASS: cl.basic/Script (0.41s)\n    --- PASS: cl.basic/DbusPerms (0.30s)\n    --- PASS: cl.basic/UpdateEngineKeys (0.24s)\n    --- PASS: cl.basic/ReadOnly (0.23s)\n    --- PASS: cl.basic/RandomUUID (0.27s)\n    --- PASS: cl.basic/Microcode (0.25s)\n    --- PASS: cl.basic/PortSSH (0.25s)\n    --- PASS: cl.basic/Symlink (0.26s)\n    --- PASS: cl.basic/SymlinkFlatcar (0.25s)\n    --- PASS: cl.basic/ServicesActive (0.27s)\n    --- PASS: cl.basic/Useradd (0.34s)\n"},{"name":"cl.ignition.kargs","result":"PASS","duration":69929461380,"output":"        cluster.go:152: + cat /proc/cmdline\n"},{"name":"coreos.ignition.once","result":"PASS","duration":77792191380,"output":""},{"name":"cl.internet","result":"PASS","duration":77808349736,"output":"    --- PASS: cl.internet/DockerPing (5.62s)\n    --- PASS: cl.internet/DockerEcho (0.52s)\n    --- PASS: cl.internet/NTPDate (6.96s)\n    --- PASS: cl.internet/UpdateEngine (0.25s)\n"},{"name":"docker.network-openbsd-nc","result":"PASS","duration":84444361403,"output":"        docker.go:413: creating netcat containers\n        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 2.46MB 0.0s done\n        cluster.go:125: #3 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.0s done\n        cluster.go:125: #5 writing image sha256:db833ee8fe77c19bb1e8a1220cb36fc735a4d99d2b727b093550209f9c0f151b done\n        cluster.go:125: #5 naming to docker.io/library/netcat done\n        cluster.go:125: #5 DONE 0.0s\n        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 2.46MB 0.0s done\n        cluster.go:125: #3 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.0s done\n        cluster.go:125: #5 writing image sha256:1c2667231b124982f715e03203b1d6b732727676a1c8a4090fa6982672a90b19 done\n        cluster.go:125: #5 naming to docker.io/library/netcat done\n        cluster.go:125: #5 DONE 0.0s\n"},{"name":"cl.etcd-member.discovery","result":"PASS","duration":103487975929,"output":""},{"name":"cl.ignition.misc.empty","result":"PASS","duration":55687600012,"output":""},{"name":"linux.nfs.v4","result":"PASS","duration":115545055187,"output":"        nfs.go:80: NFS server booted.\n        nfs.go:85: Test file \"/tmp/tmp.XXiPHeAULy\" created on server.\n        nfs.go:125: NFS client booted.\n        nfs.go:133: Got NFS mount.\n"},{"name":"cl.ignition.v1.noop","result":"PASS","duration":59467093091,"output":""},{"name":"coreos.ignition.sethostname","result":"PASS","duration":57777588804,"output":""},{"name":"cl.flannel.vxlan","result":"PASS","duration":134668974864,"output":"        flannel.go:121: ping from 127592489(10.254.32.0) to 127592466(10.254.61.0)\n"},{"name":"cl.network.initramfs.second-boot","result":"PASS","duration":74600111650,"output":""},{"name":"kubeadm.v1.35.1.cilium.base/node_readiness","result":"PASS","duration":16267658127,"output":""},{"name":"kubeadm.v1.35.1.flannel.base/node_readiness","result":"PASS","duration":10957154924,"output":""},{"name":"kubeadm.v1.35.1.flannel.base/nginx_deployment","result":"PASS","duration":6009870248,"output":""},{"name":"kubeadm.v1.35.1.cilium.base/nginx_deployment","result":"PASS","duration":11279860869,"output":""},{"name":"kubeadm.v1.34.4.cilium.base/node_readiness","result":"PASS","duration":16277009828,"output":""},{"name":"coreos.ignition.security.tls","result":"PASS","duration":114874182097,"output":""},{"name":"kubeadm.v1.35.1.flannel.base/NFS_deployment","result":"PASS","duration":12565908796,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.35.1.cilium.base/NFS_deployment","result":"PASS","duration":12739320668,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.34.4.cilium.base/nginx_deployment","result":"PASS","duration":11274616535,"output":""},{"name":"kubeadm.v1.35.1.calico.base/node_readiness","result":"PASS","duration":16654805335,"output":""},{"name":"kubeadm.v1.35.1.calico.base/nginx_deployment","result":"PASS","duration":6057684806,"output":""},{"name":"coreos.ignition.resource.local","result":"PASS","duration":112667547433,"output":""},{"name":"cl.ignition.v2.noop","result":"PASS","duration":73578662632,"output":""},{"name":"kubeadm.v1.34.4.cilium.base/NFS_deployment","result":"PASS","duration":12626625142,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.35.1.cilium.base/IPSec_encryption","result":"PASS","duration":19071646063,"output":"        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"kubeadm.v1.35.1.calico.base/NFS_deployment","result":"PASS","duration":12922049188,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.34.4.flannel.base/node_readiness","result":"PASS","duration":10927206413,"output":""},{"name":"kubeadm.v1.34.4.flannel.base/nginx_deployment","result":"PASS","duration":5947458345,"output":""},{"name":"kubeadm.v1.34.4.cilium.base/IPSec_encryption","result":"PASS","duration":21014906554,"output":"        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"kubeadm.v1.33.8.flannel.base/node_readiness","result":"PASS","duration":10934296742,"output":""},{"name":"linux.nfs.v3","result":"PASS","duration":112999636824,"output":"        nfs.go:80: NFS server booted.\n        nfs.go:85: Test file \"/tmp/tmp.EVsZMbIlBu\" created on server.\n        nfs.go:125: NFS client booted.\n        nfs.go:133: Got NFS mount.\n"},{"name":"kubeadm.v1.33.8.flannel.base/nginx_deployment","result":"PASS","duration":5952413902,"output":""},{"name":"kubeadm.v1.34.4.flannel.base/NFS_deployment","result":"PASS","duration":12587726216,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.flannel.base/NFS_deployment","result":"PASS","duration":12433923238,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.35.1.cilium.base","result":"PASS","duration":250378464088,"output":"        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.13.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.6-0\n        cluster.go:125: [init] Using Kubernetes version: v1.35.4\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-f451c51626\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-f451c51626\": lookup ci-4459-2-4-n-f451c51626 on 185.12.64.2:53: no such host\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4459-2-4-n-f451c51626 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 178.104.187.218]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 501.631ms\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://178.104.187.218:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 2.51254032s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 3.01546552s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 4.50185408s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-f451c51626 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-f451c51626 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: 08quf1.413cyouoods987wc\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 178.104.187.218:6443 --token 08quf1.413cyouoods987wc \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:803fc5f4f22c559ef07ba32ea03c3773fee3771e0fa4ddfa5ff8813046b3eac4 \n        cluster.go:125: i  Using Cilium version 1.12.5\n        cluster.go:125: ? Auto-detected cluster name: kubernetes\n        cluster.go:125: ? Auto-detected datapath mode: tunnel\n        cluster.go:125: ? Auto-detected kube-proxy has been installed\n        cluster.go:125: i  helm template --namespace kube-system cilium cilium/cilium --version 1.12.5 --set cluster.id=0,cluster.name=kubernetes,encryption.nodeEncryption=false,extraConfig.cluster-pool-ipv4-cidr=192.168.0.0/17,extraConfig.enable-endpoint-routes=true,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=vxlan\n        cluster.go:125: i  Storing helm values file in kube-system/cilium-cli-helm-values Secret\n        cluster.go:125: ? Created CA in secret cilium-ca\n        cluster.go:125: ? Generating certificates for Hubble...\n        cluster.go:125: ? Creating Service accounts...\n        cluster.go:125: ? Creating Cluster roles...\n        cluster.go:125: ? Creating ConfigMap for Cilium version 1.12.5...\n        cluster.go:125: i  Manual overwrite in ConfigMap: enable-endpoint-routes=true\n        cluster.go:125: i  Manual overwrite in ConfigMap: cluster-pool-ipv4-cidr=192.168.0.0/17\n        cluster.go:125: ? Creating Agent DaemonSet...\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: ? Creating Operator Deployment...\n        cluster.go:125: ? Waiting for Cilium to be installed and ready...\n        cluster.go:125: ? Cilium was successfully installed! Run 'cilium status' to view installation health\n        cluster.go:125: \u001b[33m    /??\\\n        cluster.go:125: \u001b[36m /??\u001b[33m\\__/\u001b[32m??\\\u001b[0m    Cilium:         \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[36m \\__\u001b[31m/??\\\u001b[32m__/\u001b[0m    Operator:       \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[32m /??\u001b[31m\\__/\u001b[35m??\\\u001b[0m    Hubble:         \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[32m \\__\u001b[34m/??\\\u001b[35m__/\u001b[0m    ClusterMesh:    \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[34m    \\__/\n        cluster.go:125: \u001b[0m\n        cluster.go:125: Deployment       cilium-operator    \n        cluster.go:125: DaemonSet        cilium             \n        cluster.go:125: Containers:      cilium             \n        cluster.go:125:                  cilium-operator    \n        cluster.go:125: Cluster Pods:    0/0 managed by Cilium\n        cluster.go:125: W0421 02:22:25.238139    1764 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.35.1.cilium.base/node_readiness (16.27s)\n    --- PASS: kubeadm.v1.35.1.cilium.base/nginx_deployment (11.28s)\n    --- PASS: kubeadm.v1.35.1.cilium.base/NFS_deployment (12.74s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n    --- PASS: kubeadm.v1.35.1.cilium.base/IPSec_encryption (19.07s)\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"kubeadm.v1.35.1.calico.base","result":"PASS","duration":252544972050,"output":"        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.13.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.6-0\n        cluster.go:125: [init] Using Kubernetes version: v1.35.4\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-8c14646a76\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-8c14646a76\": lookup ci-4459-2-4-n-8c14646a76 on 185.12.64.1:53: no such host\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4459-2-4-n-8c14646a76 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 178.105.13.45]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.00205792s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://178.105.13.45:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 2.01122644s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 2.72814072s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 4.50222088s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-8c14646a76 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-8c14646a76 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: avrao6.izuomfriy7yydnl1\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 178.105.13.45:6443 --token avrao6.izuomfriy7yydnl1 \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:0de69c56c3de8e45a3f7d5e1f950f132d0474e32cba411b8bd7bb9195e2d5e7c \n        cluster.go:125: namespace/tigera-operator created\n        cluster.go:125: serviceaccount/tigera-operator created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: rolebinding.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: deployment.apps/tigera-operator created\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: installation.operator.tigera.io/default created\n        cluster.go:125: apiserver.operator.tigera.io/default created\n        cluster.go:125: goldmane.operator.tigera.io/default created\n        cluster.go:125: whisker.operator.tigera.io/default created\n        cluster.go:125: W0421 02:22:50.725296    1772 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.35.1.calico.base/node_readiness (16.65s)\n    --- PASS: kubeadm.v1.35.1.calico.base/nginx_deployment (6.06s)\n    --- PASS: kubeadm.v1.35.1.calico.base/NFS_deployment (12.92s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.calico.base/node_readiness","result":"PASS","duration":16262449148,"output":""},{"name":"kubeadm.v1.34.4.cilium.base","result":"PASS","duration":265227970905,"output":"        cluster.go:125: I0421 02:21:31.383757    1991 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.5-0\n        cluster.go:125: I0421 02:21:39.892955    2204 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [init] Using Kubernetes version: v1.34.7\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-f395b28152\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-f395b28152\": lookup ci-4459-2-4-n-f395b28152 on 185.12.64.1:53: no such host\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4459-2-4-n-f395b28152 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 178.104.187.133]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.50175048s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://178.104.187.133:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 2.3962356s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 2.94540328s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 5.00459228s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-f395b28152 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-f395b28152 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: k67npj.076pvq5ca6613dal\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 178.104.187.133:6443 --token k67npj.076pvq5ca6613dal \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:ca9a95d23ecc43c725687d3665fde74f27d65e0a4bb3b259a0a2e47eee464be4 \n        cluster.go:125: i  Using Cilium version 1.12.5\n        cluster.go:125: ? Auto-detected cluster name: kubernetes\n        cluster.go:125: ? Auto-detected datapath mode: tunnel\n        cluster.go:125: ? Auto-detected kube-proxy has been installed\n        cluster.go:125: i  helm template --namespace kube-system cilium cilium/cilium --version 1.12.5 --set cluster.id=0,cluster.name=kubernetes,encryption.nodeEncryption=false,extraConfig.cluster-pool-ipv4-cidr=192.168.0.0/17,extraConfig.enable-endpoint-routes=true,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=vxlan\n        cluster.go:125: i  Storing helm values file in kube-system/cilium-cli-helm-values Secret\n        cluster.go:125: ? Created CA in secret cilium-ca\n        cluster.go:125: ? Generating certificates for Hubble...\n        cluster.go:125: ? Creating Service accounts...\n        cluster.go:125: ? Creating Cluster roles...\n        cluster.go:125: ? Creating ConfigMap for Cilium version 1.12.5...\n        cluster.go:125: i  Manual overwrite in ConfigMap: enable-endpoint-routes=true\n        cluster.go:125: i  Manual overwrite in ConfigMap: cluster-pool-ipv4-cidr=192.168.0.0/17\n        cluster.go:125: ? Creating Agent DaemonSet...\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: ? Creating Operator Deployment...\n        cluster.go:125: ? Waiting for Cilium to be installed and ready...\n        cluster.go:125: ? Cilium was successfully installed! Run 'cilium status' to view installation health\n        cluster.go:125: \u001b[33m    /??\\\n        cluster.go:125: \u001b[36m /??\u001b[33m\\__/\u001b[32m??\\\u001b[0m    Cilium:         \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[36m \\__\u001b[31m/??\\\u001b[32m__/\u001b[0m    Operator:       \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[32m /??\u001b[31m\\__/\u001b[35m??\\\u001b[0m    Hubble:         \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[32m \\__\u001b[34m/??\\\u001b[35m__/\u001b[0m    ClusterMesh:    \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[34m    \\__/\n        cluster.go:125: \u001b[0m\n        cluster.go:125: DaemonSet        cilium             \n        cluster.go:125: Deployment       cilium-operator    \n        cluster.go:125: Containers:      cilium             \n        cluster.go:125:                  cilium-operator    \n        cluster.go:125: Cluster Pods:    0/0 managed by Cilium\n        cluster.go:125: W0421 02:22:37.380423    1765 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.34.4.cilium.base/node_readiness (16.28s)\n    --- PASS: kubeadm.v1.34.4.cilium.base/nginx_deployment (11.27s)\n    --- PASS: kubeadm.v1.34.4.cilium.base/NFS_deployment (12.63s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n    --- PASS: kubeadm.v1.34.4.cilium.base/IPSec_encryption (21.01s)\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"kubeadm.v1.33.8.calico.base/nginx_deployment","result":"PASS","duration":6045652553,"output":""},{"name":"kubeadm.v1.33.8.cilium.base/node_readiness","result":"PASS","duration":10912118925,"output":""},{"name":"kubeadm.v1.35.1.flannel.base","result":"PASS","duration":275014245925,"output":"        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.13.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.6-0\n        cluster.go:125: [init] Using Kubernetes version: v1.35.4\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-0d17bcb43e\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-0d17bcb43e\": lookup ci-4459-2-4-n-0d17bcb43e on 185.12.64.2:53: no such host\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4459-2-4-n-0d17bcb43e kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 178.104.191.88]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 503.41112ms\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://178.104.191.88:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 2.01740976s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 2.85030812s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 5.00387732s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-0d17bcb43e as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-0d17bcb43e as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: g4ctyv.19r4hxkzwp9nl4ab\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 178.104.191.88:6443 --token g4ctyv.19r4hxkzwp9nl4ab \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:27cb69e7342b89a1d556e71c214baa7893b6425ad57ca11d5833b58b58e83d84 \n        cluster.go:125: namespace/kube-flannel created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: serviceaccount/flannel created\n        cluster.go:125: configmap/kube-flannel-cfg created\n        cluster.go:125: daemonset.apps/kube-flannel-ds created\n        cluster.go:125: W0421 02:22:31.291517    1721 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.35.1.flannel.base/node_readiness (10.96s)\n    --- PASS: kubeadm.v1.35.1.flannel.base/nginx_deployment (6.01s)\n    --- PASS: kubeadm.v1.35.1.flannel.base/NFS_deployment (12.57s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.34.4.flannel.base","result":"PASS","duration":217926991983,"output":"        cluster.go:125: I0421 02:22:19.474960    1936 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.5-0\n        cluster.go:125: I0421 02:22:27.505531    2150 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [init] Using Kubernetes version: v1.34.7\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-ff15c20ba1\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-ff15c20ba1\": lookup ci-4459-2-4-n-ff15c20ba1 on 185.12.64.1:53: no such host\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4459-2-4-n-ff15c20ba1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 178.105.7.188]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 502.13068ms\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://178.105.7.188:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 2.91082276s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 3.57152424s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 5.0048648s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-ff15c20ba1 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-ff15c20ba1 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: 75htt5.gj4ofqjbs7fl27in\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 178.105.7.188:6443 --token 75htt5.gj4ofqjbs7fl27in \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:b58702c7af8f276f61694c4f75233b2eab61f8c33009d8277d7ccb0b20b07970 \n        cluster.go:125: namespace/kube-flannel created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: serviceaccount/flannel created\n        cluster.go:125: configmap/kube-flannel-cfg created\n        cluster.go:125: daemonset.apps/kube-flannel-ds created\n        cluster.go:125: W0421 02:23:19.536875    1719 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.34.4.flannel.base/node_readiness (10.93s)\n    --- PASS: kubeadm.v1.34.4.flannel.base/nginx_deployment (5.95s)\n    --- PASS: kubeadm.v1.34.4.flannel.base/NFS_deployment (12.59s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.calico.base/NFS_deployment","result":"PASS","duration":12941748059,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.cilium.base/nginx_deployment","result":"PASS","duration":11277380804,"output":""},{"name":"kubeadm.v1.33.8.flannel.base","result":"PASS","duration":219357994414,"output":"        cluster.go:125: I0421 02:22:27.075402    1941 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.0\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.5.24-0\n        cluster.go:125: I0421 02:22:35.548562    2156 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [init] Using Kubernetes version: v1.33.11\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-44bf401ebb\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-44bf401ebb\": lookup ci-4459-2-4-n-44bf401ebb on 185.12.64.1:53: no such host\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4459-2-4-n-44bf401ebb kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 178.104.182.43]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.002237155s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://178.104.182.43:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 2.100112672s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 2.976110996s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 4.503364129s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-44bf401ebb as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-44bf401ebb as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: 5tksji.acbadzfr84b9a0k1\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 178.104.182.43:6443 --token 5tksji.acbadzfr84b9a0k1 \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:3bff33222912b43f696bdc092d587e6bb776b4f572e4c90b10281673b0033a07 \n        cluster.go:125: namespace/kube-flannel created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: serviceaccount/flannel created\n        cluster.go:125: configmap/kube-flannel-cfg created\n        cluster.go:125: daemonset.apps/kube-flannel-ds created\n        cluster.go:125: W0421 02:23:27.172884    1717 joinconfiguration.go:113] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.33.8.flannel.base/node_readiness (10.93s)\n    --- PASS: kubeadm.v1.33.8.flannel.base/nginx_deployment (5.95s)\n    --- PASS: kubeadm.v1.33.8.flannel.base/NFS_deployment (12.43s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.34.4.calico.base/node_readiness","result":"PASS","duration":16346479311,"output":""},{"name":"kubeadm.v1.34.4.calico.base/nginx_deployment","result":"PASS","duration":6069535000,"output":""},{"name":"kubeadm.v1.33.8.cilium.base/NFS_deployment","result":"PASS","duration":12665267152,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.34.4.calico.base/NFS_deployment","result":"PASS","duration":12799501899,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.cilium.base/IPSec_encryption","result":"PASS","duration":18795613760,"output":"        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"kubeadm.v1.33.8.calico.base","result":"PASS","duration":244827701583,"output":"        cluster.go:125: I0421 02:22:50.147963    1996 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.0\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.5.24-0\n        cluster.go:125: I0421 02:22:58.235740    2209 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [init] Using Kubernetes version: v1.33.11\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-ef902e6588\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-ef902e6588\": lookup ci-4459-2-4-n-ef902e6588 on 185.12.64.2:53: no such host\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4459-2-4-n-ef902e6588 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 178.104.182.92]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.501232337s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://178.104.182.92:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 2.01965747s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 3.00945773s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 4.002532606s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-ef902e6588 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-ef902e6588 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: ooctyw.fgvyvr77dlekb1fa\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 178.104.182.92:6443 --token ooctyw.fgvyvr77dlekb1fa \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:ddc88508d1e41cbf5730f01ab714bc104a8cf65bef36bb4f7f94e623258c76f5 \n        cluster.go:125: namespace/tigera-operator created\n        cluster.go:125: serviceaccount/tigera-operator created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: rolebinding.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: deployment.apps/tigera-operator created\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: installation.operator.tigera.io/default created\n        cluster.go:125: apiserver.operator.tigera.io/default created\n        cluster.go:125: goldmane.operator.tigera.io/default created\n        cluster.go:125: whisker.operator.tigera.io/default created\n        cluster.go:125: W0421 02:24:07.757675    1774 joinconfiguration.go:113] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.33.8.calico.base/node_readiness (16.26s)\n    --- PASS: kubeadm.v1.33.8.calico.base/nginx_deployment (6.05s)\n    --- PASS: kubeadm.v1.33.8.calico.base/NFS_deployment (12.94s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.34.4.calico.base","result":"PASS","duration":242792466414,"output":"        cluster.go:125: I0421 02:23:19.651841    1997 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.5-0\n        cluster.go:125: I0421 02:23:27.495498    2212 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [init] Using Kubernetes version: v1.34.7\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-993bc10de1\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-993bc10de1\": lookup ci-4459-2-4-n-993bc10de1 on 185.12.64.1:53: no such host\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4459-2-4-n-993bc10de1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 178.105.12.198]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.5020002s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://178.105.12.198:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 2.01183424s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 3.23987888s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 4.50208016s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-993bc10de1 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-993bc10de1 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: z73xc2.wyfoyljelam6xf20\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 178.105.12.198:6443 --token z73xc2.wyfoyljelam6xf20 \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:6fcf0991a14441c5345bfb327a9ef6d2b878121d9f105e74395dd2cd748ed437 \n        cluster.go:125: namespace/tigera-operator created\n        cluster.go:125: serviceaccount/tigera-operator created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: rolebinding.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: deployment.apps/tigera-operator created\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: installation.operator.tigera.io/default created\n        cluster.go:125: apiserver.operator.tigera.io/default created\n        cluster.go:125: goldmane.operator.tigera.io/default created\n        cluster.go:125: whisker.operator.tigera.io/default created\n        cluster.go:125: W0421 02:24:36.017896    1760 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.34.4.calico.base/node_readiness (16.35s)\n    --- PASS: kubeadm.v1.34.4.calico.base/nginx_deployment (6.07s)\n    --- PASS: kubeadm.v1.34.4.calico.base/NFS_deployment (12.80s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.cilium.base","result":"PASS","duration":248200801476,"output":"        cluster.go:125: I0421 02:23:19.008613    1991 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.0\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.5.24-0\n        cluster.go:125: I0421 02:23:27.082035    2207 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [init] Using Kubernetes version: v1.33.11\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-6bad48d9f1\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459-2-4-n-6bad48d9f1\": lookup ci-4459-2-4-n-6bad48d9f1 on 185.12.64.2:53: no such host\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4459-2-4-n-6bad48d9f1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 178.105.1.74]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.501574354s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://178.105.1.74:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 1.927756868s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 2.581445666s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 4.502022331s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-6bad48d9f1 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459-2-4-n-6bad48d9f1 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: y4zna8.drjaije8vnjsjlx8\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 178.105.1.74:6443 --token y4zna8.drjaije8vnjsjlx8 \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:ecfd136588eb3e88d91748628bcab61d9a9ba24bcc434bb4de7726d29af877e2 \n        cluster.go:125: i  Using Cilium version 1.12.5\n        cluster.go:125: ? Auto-detected cluster name: kubernetes\n        cluster.go:125: ? Auto-detected datapath mode: tunnel\n        cluster.go:125: ? Auto-detected kube-proxy has been installed\n        cluster.go:125: i  helm template --namespace kube-system cilium cilium/cilium --version 1.12.5 --set cluster.id=0,cluster.name=kubernetes,encryption.nodeEncryption=false,extraConfig.cluster-pool-ipv4-cidr=192.168.0.0/17,extraConfig.enable-endpoint-routes=true,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=vxlan\n        cluster.go:125: i  Storing helm values file in kube-system/cilium-cli-helm-values Secret\n        cluster.go:125: ? Created CA in secret cilium-ca\n        cluster.go:125: ? Generating certificates for Hubble...\n        cluster.go:125: ? Creating Service accounts...\n        cluster.go:125: ? Creating Cluster roles...\n        cluster.go:125: ? Creating ConfigMap for Cilium version 1.12.5...\n        cluster.go:125: i  Manual overwrite in ConfigMap: enable-endpoint-routes=true\n        cluster.go:125: i  Manual overwrite in ConfigMap: cluster-pool-ipv4-cidr=192.168.0.0/17\n        cluster.go:125: ? Creating Agent DaemonSet...\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: ? Creating Operator Deployment...\n        cluster.go:125: ? Waiting for Cilium to be installed and ready...\n        cluster.go:125: ? Cilium was successfully installed! Run 'cilium status' to view installation health\n        cluster.go:125: \u001b[33m    /??\\\n        cluster.go:125: \u001b[36m /??\u001b[33m\\__/\u001b[32m??\\\u001b[0m    Cilium:         \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[36m \\__\u001b[31m/??\\\u001b[32m__/\u001b[0m    Operator:       \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[32m /??\u001b[31m\\__/\u001b[35m??\\\u001b[0m    Hubble:         \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[32m \\__\u001b[34m/??\\\u001b[35m__/\u001b[0m    ClusterMesh:    \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[34m    \\__/\n        cluster.go:125: \u001b[0m\n        cluster.go:125: Deployment       cilium-operator    \n        cluster.go:125: DaemonSet        cilium             \n        cluster.go:125: Containers:      cilium             \n        cluster.go:125:                  cilium-operator    \n        cluster.go:125: Cluster Pods:    0/0 managed by Cilium\n        cluster.go:125: W0421 02:24:21.828491    1767 joinconfiguration.go:113] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.33.8.cilium.base/node_readiness (10.91s)\n    --- PASS: kubeadm.v1.33.8.cilium.base/nginx_deployment (11.28s)\n    --- PASS: kubeadm.v1.33.8.cilium.base/NFS_deployment (12.67s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n    --- PASS: kubeadm.v1.33.8.cilium.base/IPSec_encryption (18.80s)\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"}],"result":"PASS","platform":"hetzner","version":"4459.2.4"}
