{"tests":[{"name":"cl.ignition.misc.empty","result":"FAIL","duration":12784898515,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: a2be7b66-b1f9-41e5-9fb2-3c683c90a4f8\n"},{"name":"cl.ignition.v1.noop","result":"FAIL","duration":14313281760,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 396643f8-b69d-43ec-8f71-6adc903c3b0d\n"},{"name":"kubeadm.v1.34.4.calico.base","result":"FAIL","duration":16478669602,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 539b87fe-46f4-4adc-9616-77321c3f913f\n"},{"name":"docker.network-openbsd-nc","result":"FAIL","duration":7843679386,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: ad752d89-96e2-4c8f-9c72-6bb2f584f75e\n"},{"name":"cl.ignition.kargs","result":"FAIL","duration":10091756294,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 7fa83cbe-29fa-4320-88ed-cad06d9cc752\n"},{"name":"cl.internet","result":"FAIL","duration":10381485390,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: c06f0cef-0443-4034-882a-9e79fbac8092\n"},{"name":"coreos.ignition.resource.local","result":"FAIL","duration":6526569146,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 7fa9a0d5-96ab-45c5-a515-7d925415e2b6\n"},{"name":"cl.ignition.v2.noop","result":"FAIL","duration":6667968524,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: f1f81d02-24ff-4fd6-a89f-f87bd3e43eb1\n"},{"name":"kubeadm.v1.35.1.flannel.base","result":"FAIL","duration":8400436231,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: a72c372c-500d-4181-a38d-fbb43fccbac3\n"},{"name":"kubeadm.v1.35.1.cilium.base","result":"FAIL","duration":8554027595,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: abd89a2e-a5cb-4fb6-a645-73054891855a\n"},{"name":"coreos.misc.aws.diskfriendlyname","result":"FAIL","duration":9904885324,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 41c96620-f6a8-45db-8e18-51d877dcb0a1\n"},{"name":"coreos.ignition.sethostname","result":"FAIL","duration":10033801420,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 9bfe315b-e3f3-4a56-9fd8-656995246841\n"},{"name":"cl.flannel.vxlan","result":"FAIL","duration":9817895908,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: d75eb93b-e627-4384-b0a2-a65a3bf39980\n"},{"name":"kubeadm.v1.35.1.calico.base","result":"FAIL","duration":8718343819,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: ba505779-2efc-4111-b959-5e3fd1b5bf88\n"},{"name":"coreos.ignition.once","result":"FAIL","duration":7190331882,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: ec620d28-f3ba-46ef-876b-a319497d0f55\n"},{"name":"cl.ignition.v2.btrfsroot","result":"FAIL","duration":11788905921,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: edf91941-3d20-4d42-97fe-7f8483365594\n"},{"name":"cl.basic/CloudConfig","result":"PASS","duration":2531423867,"output":""},{"name":"cl.basic/Script","result":"PASS","duration":1642570944,"output":""},{"name":"cl.basic/DbusPerms","result":"PASS","duration":1643151563,"output":""},{"name":"cl.basic/RandomUUID","result":"PASS","duration":1579305173,"output":""},{"name":"cl.basic/Useradd","result":"PASS","duration":1990246252,"output":""},{"name":"cl.basic/MachineID","result":"PASS","duration":1524313905,"output":""},{"name":"cl.basic/PortSSH","result":"PASS","duration":1539099441,"output":""},{"name":"cl.metadata.aws","result":"PASS","duration":80385845491,"output":""},{"name":"cl.basic/Symlink","result":"PASS","duration":1585784770,"output":""},{"name":"cl.basic/SymlinkFlatcar","result":"PASS","duration":1535405106,"output":""},{"name":"cl.basic/UpdateEngineKeys","result":"PASS","duration":1515991905,"output":""},{"name":"linux.nfs.v3","result":"FAIL","duration":85877249424,"output":"        nfs.go:80: NFS server booted.\n        nfs.go:85: Test file \"/tmp/tmp.2YIqNd1s3B\" created on server.\n        nfs.go:122: Cluster.NewMachine: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: c2ab90c0-a924-44f7-bb01-413e8bdb5337\n"},{"name":"coreos.ignition.resource.remote","result":"PASS","duration":86089318537,"output":""},{"name":"cl.basic/ServicesActive","result":"PASS","duration":1575438050,"output":""},{"name":"linux.nfs.v4","result":"FAIL","duration":86927863431,"output":"        nfs.go:80: NFS server booted.\n        nfs.go:85: Test file \"/tmp/tmp.yEh2OVVwmO\" created on server.\n        nfs.go:122: Cluster.NewMachine: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: cb2b28fd-06a5-419a-8926-951d93adc3ca\n"},{"name":"cl.basic/ReadOnly","result":"PASS","duration":1558350296,"output":""},{"name":"coreos.ignition.resource.s3","result":"PASS","duration":89592940637,"output":""},{"name":"cl.basic/Microcode","result":"PASS","duration":1634345105,"output":""},{"name":"kubeadm.v1.34.4.flannel.base","result":"FAIL","duration":90779479310,"output":"        kubeadm.go:197: unable to setup cluster: unable to create master node: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 48718bcb-36b2-4931-bce1-023466fe9041\n"},{"name":"coreos.ignition.ssh.key","result":"PASS","duration":91234698924,"output":""},{"name":"cl.install.cloudinit","result":"PASS","duration":91953558179,"output":""},{"name":"cl.cloudinit.basic","result":"PASS","duration":92793869407,"output":""},{"name":"coreos.ignition.security.tls","result":"FAIL","duration":112557821956,"output":"        security.go:132: starting client: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 26e70344-4cbc-431f-862d-7999c20fe1a6\n"},{"name":"kubeadm.v1.34.4.cilium.base","result":"FAIL","duration":113309224176,"output":"        kubeadm.go:197: unable to setup cluster: unable to create master node: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 42afb5d0-e8e7-4d84-8450-cccb4344ce9c\n"},{"name":"cl.basic","result":"PASS","duration":115822975263,"output":"    --- PASS: cl.basic/CloudConfig (2.53s)\n    --- PASS: cl.basic/Script (1.64s)\n    --- PASS: cl.basic/DbusPerms (1.64s)\n    --- PASS: cl.basic/RandomUUID (1.58s)\n    --- PASS: cl.basic/Useradd (1.99s)\n    --- PASS: cl.basic/MachineID (1.52s)\n    --- PASS: cl.basic/PortSSH (1.54s)\n    --- PASS: cl.basic/Symlink (1.59s)\n    --- PASS: cl.basic/SymlinkFlatcar (1.54s)\n    --- PASS: cl.basic/UpdateEngineKeys (1.52s)\n    --- PASS: cl.basic/ServicesActive (1.58s)\n    --- PASS: cl.basic/ReadOnly (1.56s)\n    --- PASS: cl.basic/Microcode (1.63s)\n"},{"name":"cl.network.initramfs.second-boot","result":"PASS","duration":140470266293,"output":""},{"name":"kubeadm.v1.33.8.flannel.base/node_readiness","result":"PASS","duration":14900946536,"output":""},{"name":"kubeadm.v1.33.8.flannel.base/nginx_deployment","result":"PASS","duration":9957428187,"output":""},{"name":"kubeadm.v1.33.8.cilium.base/node_readiness","result":"PASS","duration":21530494446,"output":""},{"name":"kubeadm.v1.33.8.flannel.base/NFS_deployment","result":"PASS","duration":20913905808,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.calico.base/node_readiness","result":"PASS","duration":21781954450,"output":""},{"name":"kubeadm.v1.33.8.cilium.base/nginx_deployment","result":"PASS","duration":23333447848,"output":""},{"name":"kubeadm.v1.33.8.calico.base/nginx_deployment","result":"PASS","duration":16788125930,"output":""},{"name":"kubeadm.v1.33.8.cilium.base/NFS_deployment","result":"PASS","duration":20856026303,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.calico.base/NFS_deployment","result":"PASS","duration":34914209362,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.cilium.base/IPSec_encryption","result":"PASS","duration":27748718971,"output":"        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"kubeadm.v1.33.8.flannel.base","result":"PASS","duration":375938988983,"output":"        cluster.go:125: I0421 23:14:36.340513    2473 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.0\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.5.24-0\n        cluster.go:125: I0421 23:14:48.679508    2691 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [init] Using Kubernetes version: v1.33.11\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ip-172-31-27-168 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.31.27.168]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 2.002045369s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://172.31.27.168:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 3.608862134s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 4.614967775s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 6.502129889s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ip-172-31-27-168 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ip-172-31-27-168 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: pf5wj3.99ilg09g9q0gbyaq\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 172.31.27.168:6443 --token pf5wj3.99ilg09g9q0gbyaq \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:bf399f210007a34e6e784244a2d5b97a7744423d1c31c6f6160f316f1b54efb8 \n        cluster.go:125: namespace/kube-flannel created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: serviceaccount/flannel created\n        cluster.go:125: configmap/kube-flannel-cfg created\n        cluster.go:125: daemonset.apps/kube-flannel-ds created\n        cluster.go:125: W0421 23:16:07.125960    2239 joinconfiguration.go:113] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.33.8.flannel.base/node_readiness (14.90s)\n    --- PASS: kubeadm.v1.33.8.flannel.base/nginx_deployment (9.96s)\n    --- PASS: kubeadm.v1.33.8.flannel.base/NFS_deployment (20.91s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.calico.base","result":"PASS","duration":390781478956,"output":"        cluster.go:125: I0421 23:14:40.094385    2540 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.0\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.5.24-0\n        cluster.go:125: I0421 23:14:52.435562    2759 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [init] Using Kubernetes version: v1.33.11\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ip-172-31-25-249 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.31.25.249]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.501943791s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://172.31.25.249:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 3.318238221s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 4.486305679s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 6.001617131s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ip-172-31-25-249 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ip-172-31-25-249 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: r4zo9u.a04gt88ixj4swkvd\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 172.31.25.249:6443 --token r4zo9u.a04gt88ixj4swkvd \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:df89d991eb461c89cf9172e4297c6df49a8d5180358c17e8faee5eca5cd17572 \n        cluster.go:125: namespace/tigera-operator created\n        cluster.go:125: serviceaccount/tigera-operator created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: rolebinding.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: deployment.apps/tigera-operator created\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: installation.operator.tigera.io/default created\n        cluster.go:125: apiserver.operator.tigera.io/default created\n        cluster.go:125: goldmane.operator.tigera.io/default created\n        cluster.go:125: whisker.operator.tigera.io/default created\n        cluster.go:125: W0421 23:16:36.032896    2208 joinconfiguration.go:113] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.33.8.calico.base/node_readiness (21.78s)\n    --- PASS: kubeadm.v1.33.8.calico.base/nginx_deployment (16.79s)\n    --- PASS: kubeadm.v1.33.8.calico.base/NFS_deployment (34.91s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.cilium.base","result":"PASS","duration":432153768561,"output":"        cluster.go:125: I0421 23:14:40.879646    2442 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.0\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.5.24-0\n        cluster.go:125: I0421 23:14:52.271665    2675 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [init] Using Kubernetes version: v1.33.11\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ip-172-31-17-207 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.31.17.207]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.002420845s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://172.31.17.207:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 3.845915827s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 5.001921961s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 7.003030907s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ip-172-31-17-207 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ip-172-31-17-207 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: pq51ib.ir43d4lk5aw4kr0s\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 172.31.17.207:6443 --token pq51ib.ir43d4lk5aw4kr0s \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:586965cee3f621d4a9c68a3da1d024618870a5c5acc55826bdab0fb177044ef9 \n        cluster.go:125: i  Using Cilium version 1.12.5\n        cluster.go:125: ? Auto-detected cluster name: kubernetes\n        cluster.go:125: ? Auto-detected datapath mode: tunnel\n        cluster.go:125: ? Auto-detected kube-proxy has been installed\n        cluster.go:125: i  helm template --namespace kube-system cilium cilium/cilium --version 1.12.5 --set cluster.id=0,cluster.name=kubernetes,encryption.nodeEncryption=false,extraConfig.cluster-pool-ipv4-cidr=192.168.0.0/17,extraConfig.enable-endpoint-routes=true,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=vxlan\n        cluster.go:125: i  Storing helm values file in kube-system/cilium-cli-helm-values Secret\n        cluster.go:125: ? Created CA in secret cilium-ca\n        cluster.go:125: ? Generating certificates for Hubble...\n        cluster.go:125: ? Creating Service accounts...\n        cluster.go:125: ? Creating Cluster roles...\n        cluster.go:125: ? Creating ConfigMap for Cilium version 1.12.5...\n        cluster.go:125: i  Manual overwrite in ConfigMap: enable-endpoint-routes=true\n        cluster.go:125: i  Manual overwrite in ConfigMap: cluster-pool-ipv4-cidr=192.168.0.0/17\n        cluster.go:125: ? Creating Agent DaemonSet...\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: ? Creating Operator Deployment...\n        cluster.go:125: ? Waiting for Cilium to be installed and ready...\n        cluster.go:125: ? Cilium was successfully installed! Run 'cilium status' to view installation health\n        cluster.go:125: \u001b[33m    /??\\\n        cluster.go:125: \u001b[36m /??\u001b[33m\\__/\u001b[32m??\\\u001b[0m    Cilium:         \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[36m \\__\u001b[31m/??\\\u001b[32m__/\u001b[0m    Operator:       \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[32m /??\u001b[31m\\__/\u001b[35m??\\\u001b[0m    Hubble:         \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[32m \\__\u001b[34m/??\\\u001b[35m__/\u001b[0m    ClusterMesh:    \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[34m    \\__/\n        cluster.go:125: \u001b[0m\n        cluster.go:125: Deployment       cilium-operator    \n        cluster.go:125: DaemonSet        cilium             \n        cluster.go:125: Containers:      cilium             \n        cluster.go:125:                  cilium-operator    \n        cluster.go:125: Cluster Pods:    0/0 managed by Cilium\n        cluster.go:125: W0421 23:16:27.405336    2212 joinconfiguration.go:113] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.33.8.cilium.base/node_readiness (21.53s)\n    --- PASS: kubeadm.v1.33.8.cilium.base/nginx_deployment (23.33s)\n    --- PASS: kubeadm.v1.33.8.cilium.base/NFS_deployment (20.86s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n    --- PASS: kubeadm.v1.33.8.cilium.base/IPSec_encryption (27.75s)\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"cl.etcd-member.discovery","result":"FAIL","duration":853858014844,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 50e19f5d-29cf-4347-a458-bc8368482bcf\n"}],"result":"FAIL","platform":"aws","version":"4628.0.0"}
