{"tests":[{"name":"linux.nfs.v3","result":"FAIL","duration":15057274854,"output":"        nfs.go:77: Cluster.NewMachine: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 9b8dd624-8e13-40e4-802f-20c9dfa3810a\n"},{"name":"coreos.ignition.security.tls","result":"FAIL","duration":16346311970,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 8df0ba19-faff-4125-a0c1-d8ea78592b68\n"},{"name":"linux.nfs.v4","result":"FAIL","duration":17685372031,"output":"        nfs.go:77: Cluster.NewMachine: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 0f9bbcba-b146-4de5-be6a-e784278fe216\n"},{"name":"kubeadm.v1.35.1.flannel.base","result":"FAIL","duration":18469166163,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 6ca7e36d-ae86-4074-a197-1ead5ec7a2d0\n"},{"name":"kubeadm.v1.33.8.calico.base","result":"FAIL","duration":19145729878,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 6151a03e-7912-41c1-bc3c-f04ca0d62c67\n"},{"name":"kubeadm.v1.33.8.cilium.base","result":"FAIL","duration":10178382828,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 5d75b155-2526-4462-a433-8a0b596f1422\n"},{"name":"kubeadm.v1.35.1.calico.base","result":"FAIL","duration":11916422936,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 2d5b3184-fd68-45ef-b950-16d36ab651a7\n"},{"name":"kubeadm.v1.34.4.cilium.base","result":"FAIL","duration":12779113064,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 7f4e4e00-aa6c-43b7-a03f-4c858ade29f9\n"},{"name":"cl.ignition.kargs","result":"FAIL","duration":9975434411,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 8175f6ca-aa45-46f2-b29b-e79c3aff63c3\n"},{"name":"coreos.ignition.resource.remote","result":"FAIL","duration":16143836051,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: fb190d9b-9b32-470c-91fd-6f3f7cd53d45\n"},{"name":"cl.flannel.vxlan","result":"FAIL","duration":22833965786,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 706b89e4-0ff6-4a15-9113-b97d785223d8\n"},{"name":"cl.network.initramfs.second-boot","result":"FAIL","duration":10751229895,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 19290aff-d77b-492f-940c-e04b36daab36\n"},{"name":"cl.etcd-member.discovery","result":"FAIL","duration":13887583182,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 32ca9a0c-755f-4d59-bc54-2420317ac756\n"},{"name":"cl.ignition.misc.empty","result":"FAIL","duration":7227900708,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: fb27c6cd-b1ef-4dea-877e-0029713444f7\n"},{"name":"cl.cloudinit.basic","result":"FAIL","duration":11355194312,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: d5eda60e-9b58-4a08-9303-3e86a10c4f75\n"},{"name":"kubeadm.v1.34.4.flannel.base","result":"FAIL","duration":9296733321,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: a6c145d0-c50d-4ed8-a486-6fc6e6bcb00d\n"},{"name":"cl.metadata.aws","result":"FAIL","duration":10060798480,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 8f40587e-a929-449f-a428-a23c5dcab13e\n"},{"name":"kubeadm.v1.33.8.flannel.base","result":"FAIL","duration":11204686818,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: e7f763a9-b9bc-45df-a24b-f6ceb0b3d366\n"},{"name":"cl.internet/UpdateEngine","result":"PASS","duration":1619304704,"output":""},{"name":"docker.network-openbsd-nc","result":"FAIL","duration":95851100734,"output":"        harness.go:646: Cluster failed starting machines: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 742bc125-9a6c-4890-a74c-10baf21cf3af\n"},{"name":"cl.basic/DbusPerms","result":"PASS","duration":1754169689,"output":""},{"name":"cl.internet/DockerPing","result":"PASS","duration":13233338429,"output":""},{"name":"cl.basic/SymlinkFlatcar","result":"PASS","duration":1590479353,"output":""},{"name":"cl.internet/DockerEcho","result":"PASS","duration":1946927473,"output":""},{"name":"cl.basic/UpdateEngineKeys","result":"PASS","duration":1594805522,"output":""},{"name":"cl.basic/ServicesActive","result":"PASS","duration":1683674458,"output":""},{"name":"cl.ignition.v1.noop","result":"PASS","duration":109899038564,"output":""},{"name":"cl.install.cloudinit","result":"PASS","duration":110139742268,"output":""},{"name":"coreos.ignition.ssh.key","result":"PASS","duration":110459653699,"output":""},{"name":"cl.basic/MachineID","result":"PASS","duration":1575244956,"output":""},{"name":"coreos.ignition.sethostname","result":"PASS","duration":110785620246,"output":""},{"name":"cl.ignition.v2.noop","result":"PASS","duration":111891641142,"output":""},{"name":"cl.basic/Microcode","result":"PASS","duration":1636981528,"output":""},{"name":"coreos.misc.aws.diskfriendlyname","result":"PASS","duration":112499188764,"output":""},{"name":"cl.ignition.v2.btrfsroot","result":"PASS","duration":114175647385,"output":""},{"name":"cl.internet/NTPDate","result":"PASS","duration":8342294651,"output":""},{"name":"cl.basic/CloudConfig","result":"PASS","duration":2888752020,"output":""},{"name":"coreos.ignition.resource.local","result":"FAIL","duration":115665330252,"output":"        resource.go:333: starting client: error running instances: Unsupported: Your requested instance type (a1.large) is not supported in your requested Availability Zone (us-west-2a). Please retry your request by not specifying an Availability Zone or choosing us-west-2b, us-west-2c.\n\tstatus code: 400, request id: 9806d7ed-9f77-4959-a8f8-5b43fedb73f7\n"},{"name":"cl.basic/Symlink","result":"PASS","duration":1627279552,"output":""},{"name":"cl.basic/ReadOnly","result":"PASS","duration":1600241051,"output":""},{"name":"coreos.ignition.resource.s3","result":"PASS","duration":118662928975,"output":""},{"name":"cl.basic/RandomUUID","result":"PASS","duration":1624026110,"output":""},{"name":"cl.basic/Useradd","result":"PASS","duration":3051181166,"output":""},{"name":"cl.basic/Script","result":"PASS","duration":1682016003,"output":""},{"name":"cl.basic/PortSSH","result":"PASS","duration":1625748478,"output":""},{"name":"cl.internet","result":"PASS","duration":141053066908,"output":"    --- PASS: cl.internet/UpdateEngine (1.62s)\n    --- PASS: cl.internet/DockerPing (13.23s)\n    --- PASS: cl.internet/DockerEcho (1.95s)\n    --- PASS: cl.internet/NTPDate (8.34s)\n"},{"name":"cl.basic","result":"PASS","duration":142272771924,"output":"    --- PASS: cl.basic/DbusPerms (1.75s)\n    --- PASS: cl.basic/SymlinkFlatcar (1.59s)\n    --- PASS: cl.basic/UpdateEngineKeys (1.59s)\n    --- PASS: cl.basic/ServicesActive (1.68s)\n    --- PASS: cl.basic/MachineID (1.58s)\n    --- PASS: cl.basic/Microcode (1.64s)\n    --- PASS: cl.basic/CloudConfig (2.89s)\n    --- PASS: cl.basic/Symlink (1.63s)\n    --- PASS: cl.basic/ReadOnly (1.60s)\n    --- PASS: cl.basic/RandomUUID (1.62s)\n    --- PASS: cl.basic/Useradd (3.05s)\n    --- PASS: cl.basic/Script (1.68s)\n    --- PASS: cl.basic/PortSSH (1.63s)\n"},{"name":"coreos.ignition.once","result":"PASS","duration":161477247650,"output":""},{"name":"kubeadm.v1.35.1.cilium.base/node_readiness","result":"PASS","duration":21866179844,"output":""},{"name":"kubeadm.v1.34.4.calico.base/node_readiness","result":"PASS","duration":22311790140,"output":""},{"name":"kubeadm.v1.34.4.calico.base/nginx_deployment","result":"PASS","duration":10304534308,"output":""},{"name":"kubeadm.v1.35.1.cilium.base/nginx_deployment","result":"PASS","duration":23555795349,"output":""},{"name":"kubeadm.v1.35.1.cilium.base/NFS_deployment","result":"PASS","duration":21318016006,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.34.4.calico.base/NFS_deployment","result":"PASS","duration":44271353517,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.35.1.cilium.base/IPSec_encryption","result":"PASS","duration":25718191809,"output":"        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"kubeadm.v1.34.4.calico.base","result":"PASS","duration":450338130484,"output":"        cluster.go:125: I0420 18:07:00.812238    2517 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.5-0\n        cluster.go:125: I0420 18:07:11.421108    2734 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [init] Using Kubernetes version: v1.34.7\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ip-172-31-23-109 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.31.23.109]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.001636082s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://172.31.23.109:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 2.931497117s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 5.052809285s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 6.002711262s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ip-172-31-23-109 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ip-172-31-23-109 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: olrn40.2g4htwwtdy2kw77e\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 172.31.23.109:6443 --token olrn40.2g4htwwtdy2kw77e \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:e51353cb49e5bbc2ee5a152e3ee2814333f25da9c2646e25277fd6b99d29bb4c \n        cluster.go:125: namespace/tigera-operator created\n        cluster.go:125: serviceaccount/tigera-operator created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: rolebinding.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: deployment.apps/tigera-operator created\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: installation.operator.tigera.io/default created\n        cluster.go:125: apiserver.operator.tigera.io/default created\n        cluster.go:125: goldmane.operator.tigera.io/default created\n        cluster.go:125: whisker.operator.tigera.io/default created\n        cluster.go:125: W0420 18:08:47.987860    2346 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.34.4.calico.base/node_readiness (22.31s)\n    --- PASS: kubeadm.v1.34.4.calico.base/nginx_deployment (10.30s)\n    --- PASS: kubeadm.v1.34.4.calico.base/NFS_deployment (44.27s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.35.1.cilium.base","result":"PASS","duration":464133602629,"output":"        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.13.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.6-0\n        cluster.go:125: [init] Using Kubernetes version: v1.35.4\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ip-172-31-29-241 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.31.29.241]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.00206268s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://172.31.29.241:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 3.015349843s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 4.190903149s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 6.002058603s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ip-172-31-29-241 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ip-172-31-29-241 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: iw6a0k.03mjbzwwtny9v3ur\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 172.31.29.241:6443 --token iw6a0k.03mjbzwwtny9v3ur \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:d67bdb0f2b72c03386f096486abb499106ee5ed63f0d05ccff01bdaa0f237859 \n        cluster.go:125: i  Using Cilium version 1.12.5\n        cluster.go:125: ? Auto-detected cluster name: kubernetes\n        cluster.go:125: ? Auto-detected datapath mode: tunnel\n        cluster.go:125: ? Auto-detected kube-proxy has been installed\n        cluster.go:125: i  helm template --namespace kube-system cilium cilium/cilium --version 1.12.5 --set cluster.id=0,cluster.name=kubernetes,encryption.nodeEncryption=false,extraConfig.cluster-pool-ipv4-cidr=192.168.0.0/17,extraConfig.enable-endpoint-routes=true,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=vxlan\n        cluster.go:125: i  Storing helm values file in kube-system/cilium-cli-helm-values Secret\n        cluster.go:125: ? Created CA in secret cilium-ca\n        cluster.go:125: ? Generating certificates for Hubble...\n        cluster.go:125: ? Creating Service accounts...\n        cluster.go:125: ? Creating Cluster roles...\n        cluster.go:125: ? Creating ConfigMap for Cilium version 1.12.5...\n        cluster.go:125: i  Manual overwrite in ConfigMap: enable-endpoint-routes=true\n        cluster.go:125: i  Manual overwrite in ConfigMap: cluster-pool-ipv4-cidr=192.168.0.0/17\n        cluster.go:125: ? Creating Agent DaemonSet...\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: ? Creating Operator Deployment...\n        cluster.go:125: ? Waiting for Cilium to be installed and ready...\n        cluster.go:125: ? Cilium was successfully installed! Run 'cilium status' to view installation health\n        cluster.go:125: \u001b[33m    /??\\\n        cluster.go:125: \u001b[36m /??\u001b[33m\\__/\u001b[32m??\\\u001b[0m    Cilium:         \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[36m \\__\u001b[31m/??\\\u001b[32m__/\u001b[0m    Operator:       \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[32m /??\u001b[31m\\__/\u001b[35m??\\\u001b[0m    Hubble:         \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[32m \\__\u001b[34m/??\\\u001b[35m__/\u001b[0m    ClusterMesh:    \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[34m    \\__/\n        cluster.go:125: \u001b[0m\n        cluster.go:125: Deployment       cilium-operator    \n        cluster.go:125: DaemonSet        cilium             \n        cluster.go:125: Containers:      cilium             \n        cluster.go:125:                  cilium-operator    \n        cluster.go:125: Cluster Pods:    0/0 managed by Cilium\n        cluster.go:125: W0420 18:08:34.990467    2268 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.35.1.cilium.base/node_readiness (21.87s)\n    --- PASS: kubeadm.v1.35.1.cilium.base/nginx_deployment (23.56s)\n    --- PASS: kubeadm.v1.35.1.cilium.base/NFS_deployment (21.32s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n    --- PASS: kubeadm.v1.35.1.cilium.base/IPSec_encryption (25.72s)\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"}],"result":"FAIL","platform":"aws","version":"4669.0.0"}
