{"tests":[{"name":"cl.network.nftables","result":"PASS","duration":84141829965,"output":"        cluster.go:152: + sudo nft --json list ruleset | jq '.nftables[] | select(.rule) | .rule.expr[0].match.right'\n"},{"name":"docker.lib-coreos-dockerd-compat/docker-info","result":"PASS","duration":2852648612,"output":""},{"name":"cl.ignition.translation","result":"PASS","duration":84281659018,"output":"        cluster.go:152: + ip --json address show kola | jq -r '.[] | .addr_info | .[] | select( .family == \"inet\") | .local'\n        cluster.go:152: + cat /etc/systemd/network/00-dummy.network\n"},{"name":"cl.ignition.v1.ext4root","result":"PASS","duration":84465897978,"output":""},{"name":"systemd.journal.user","result":"PASS","duration":84664374027,"output":""},{"name":"cl.ignition.v2.users","result":"PASS","duration":84869851439,"output":""},{"name":"cl.cloudinit.multipart-mime","result":"PASS","duration":84900230870,"output":""},{"name":"cl.ignition.v1.groups","result":"PASS","duration":85295753920,"output":""},{"name":"docker.lib-coreos-dockerd-compat/resources","result":"PASS","duration":5286921066,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 3.43MB 0.0s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers\n        cluster.go:125: #5 exporting layers 0.0s done\n        cluster.go:125: #5 writing image sha256:3aa71b5416daef4e24221df7c93054c4a1dc3faffd1061f7d8a0615f5655010c done\n        cluster.go:125: #5 naming to docker.io/library/sleep done\n        cluster.go:125: #5 DONE 0.1s\n        cluster.go:125: WARNING: Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.\n        cluster.go:125: WARNING: Your kernel does not support OomKillDisable. OomKillDisable discarded.\n"},{"name":"docker.base/docker-info","result":"PASS","duration":8569484975,"output":""},{"name":"bpf.ig/ig","result":"PASS","duration":96457152396,"output":"        cluster.go:125: + sudo ig run trace_exec:v0.50.0 --help\n        cluster.go:125: time=\"2026-04-16T01:48:36Z\" level=warning msg=\"Runtime enricher (docker): couldn't get current containers: listing containers with options {Size:false All:true Latest:false Since: Before: Limit:0 Filters:{fields:map[]}}: Error response from daemon: client version 1.51 is too new. Maximum supported API version is 1.48\"\n        cluster.go:125: + trap 'kill %%' ERR\n        cluster.go:125: + timeout 30 grep -F -m1 running...\n        cluster.go:125: + sudo ig run trace_exec:v0.50.0 --host --filter proc.comm=docker,args~ps --output json --verbose\n        cluster.go:125: + docker info\n        cluster.go:125: + docker ps\n        cluster.go:125: + docker images\n        cluster.go:125: + kill %%\n        cluster.go:125: + wait\n        cluster.go:125: + jq -s -e '.[] | select(.args == \"/usr/bin/docker\\u00a0ps\")' ig.json\n        cluster.go:125: + jq -s -e 'isempty(.[] | select(.args == \"/usr/bin/docker\\u00a0info\"))' ig.json\n        cluster.go:125: + jq -s -e 'isempty(.[] | select(.args == \"/usr/bin/docker\\u00a0images\"))' ig.json\n        cluster.go:125: + sudo ig run trace_dns:v0.50.0 --help\n        cluster.go:125: + trap 'kill %%' ERR\n        cluster.go:125: + timeout 30 grep -F -m1 running...\n        cluster.go:125: + sudo ig run trace_dns:v0.50.0 --host --filter name=flatcar.org. --output json --verbose\n        cluster.go:125: + dig kinvolk.io\n        cluster.go:125: + dig flatcar.org\n        cluster.go:125: + dig stable.release.flatcar-linux.net\n        cluster.go:125: + kill %%\n        cluster.go:125: + wait\n        cluster.go:125: + jq -s -e '.[] | select(.name == \"flatcar.org.\")' ig.json\n        cluster.go:125: + jq -s -e 'isempty(.[] | select(.name == \"kinvolk.io.\"))' ig.json\n        cluster.go:125: + jq -s -e 'isempty(.[] | select(.name == \"stable.release.flatcar-linux.net.\"))' ig.json\n"},{"name":"bpf.ig","result":"PASS","duration":98786246478,"output":"    --- PASS: bpf.ig/ig (96.46s)\n            cluster.go:125: + sudo ig run trace_exec:v0.50.0 --help\n            cluster.go:125: time=\"2026-04-16T01:48:36Z\" level=warning msg=\"Runtime enricher (docker): couldn't get current containers: listing containers with options {Size:false All:true Latest:false Since: Before: Limit:0 Filters:{fields:map[]}}: Error response from daemon: client version 1.51 is too new. Maximum supported API version is 1.48\"\n            cluster.go:125: + trap 'kill %%' ERR\n            cluster.go:125: + timeout 30 grep -F -m1 running...\n            cluster.go:125: + sudo ig run trace_exec:v0.50.0 --host --filter proc.comm=docker,args~ps --output json --verbose\n            cluster.go:125: + docker info\n            cluster.go:125: + docker ps\n            cluster.go:125: + docker images\n            cluster.go:125: + kill %%\n            cluster.go:125: + wait\n            cluster.go:125: + jq -s -e '.[] | select(.args == \"/usr/bin/docker\\u00a0ps\")' ig.json\n            cluster.go:125: + jq -s -e 'isempty(.[] | select(.args == \"/usr/bin/docker\\u00a0info\"))' ig.json\n            cluster.go:125: + jq -s -e 'isempty(.[] | select(.args == \"/usr/bin/docker\\u00a0images\"))' ig.json\n            cluster.go:125: + sudo ig run trace_dns:v0.50.0 --help\n            cluster.go:125: + trap 'kill %%' ERR\n            cluster.go:125: + timeout 30 grep -F -m1 running...\n            cluster.go:125: + sudo ig run trace_dns:v0.50.0 --host --filter name=flatcar.org. --output json --verbose\n            cluster.go:125: + dig kinvolk.io\n            cluster.go:125: + dig flatcar.org\n            cluster.go:125: + dig stable.release.flatcar-linux.net\n            cluster.go:125: + kill %%\n            cluster.go:125: + wait\n            cluster.go:125: + jq -s -e '.[] | select(.name == \"flatcar.org.\")' ig.json\n            cluster.go:125: + jq -s -e 'isempty(.[] | select(.name == \"kinvolk.io.\"))' ig.json\n            cluster.go:125: + jq -s -e 'isempty(.[] | select(.name == \"stable.release.flatcar-linux.net.\"))' ig.json\n"},{"name":"docker.base/resources","result":"PASS","duration":15480023769,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile:\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.6s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context:\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.8s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 3.43MB 0.0s done\n        cluster.go:125: #3 DONE 0.6s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.4s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.0s done\n        cluster.go:125: #5 writing image sha256:e38e7fc9e3590bf01886ab808181b8dcf0c0dd1e36e37c9bc785a376109a98e4 done\n        cluster.go:125: #5 naming to docker.io/library/sleep done\n        cluster.go:125: #5 DONE 0.1s\n        cluster.go:125: WARNING: Your kernel does not support OomKillDisable. OomKillDisable discarded.\n        cluster.go:125: WARNING: Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.\n"},{"name":"coreos.locksmith.tls","result":"PASS","duration":108003103896,"output":""},{"name":"cl.network.initramfs.second-boot","result":"PASS","duration":108581977470,"output":""},{"name":"cl.ignition.v1.once","result":"PASS","duration":108768611665,"output":""},{"name":"docker.network-openbsd-nc","result":"PASS","duration":112186431235,"output":"        docker.go:413: creating netcat containers\n        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 3.69MB 0.0s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.0s done\n        cluster.go:125: #5 writing image sha256:c0633e1030dab61ee371c36044d812f93797ff8b196c792a5f44d31dffe6f0fb done\n        cluster.go:125: #5 naming to docker.io/library/netcat done\n        cluster.go:125: #5 DONE 0.1s\n        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 3.69MB 0.0s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.1s done\n        cluster.go:125: #5 writing image sha256:912da35a4f9df126b511c4809b2101ac2067fa3ca24c374eea93d3c386512b86 done\n        cluster.go:125: #5 naming to docker.io/library/netcat done\n        cluster.go:125: #5 DONE 0.1s\n"},{"name":"cl.ignition.v1.btrfsroot","result":"PASS","duration":114110866140,"output":""},{"name":"coreos.selinux.enforce","result":"PASS","duration":121691935553,"output":""},{"name":"cl.osreset.ignition-rerun","result":"PASS","duration":126326157286,"output":""},{"name":"coreos.auth.verify","result":"PASS","duration":83040494696,"output":""},{"name":"sysext.disable-containerd","result":"PASS","duration":83085930674,"output":""},{"name":"cl.etcd-member.etcdctlv3","result":"PASS","duration":84081635838,"output":"        cluster.go:125: {\"level\":\"info\",\"ts\":\"2026-04-16T01:49:55.372414Z\",\"caller\":\"snapshot/v3_snapshot.go:65\",\"msg\":\"created temporary db file\",\"path\":\"/tmp/tmp.m8wiDk7qyo/snapshot.db.part\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":\"2026-04-16T01:49:55.373261Z\",\"logger\":\"client\",\"caller\":\"v3@v3.5.18/maintenance.go:212\",\"msg\":\"opened snapshot stream; downloading\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":\"2026-04-16T01:49:55.373289Z\",\"caller\":\"snapshot/v3_snapshot.go:73\",\"msg\":\"fetching snapshot\",\"endpoint\":\"127.0.0.1:2379\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":\"2026-04-16T01:49:55.377740Z\",\"logger\":\"client\",\"caller\":\"v3@v3.5.18/maintenance.go:220\",\"msg\":\"completed snapshot read; closing\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":\"2026-04-16T01:49:55.377802Z\",\"caller\":\"snapshot/v3_snapshot.go:88\",\"msg\":\"fetched snapshot\",\"endpoint\":\"127.0.0.1:2379\",\"size\":\"20 kB\",\"took\":\"now\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":\"2026-04-16T01:49:55.377850Z\",\"caller\":\"snapshot/v3_snapshot.go:97\",\"msg\":\"saved\",\"path\":\"/tmp/tmp.m8wiDk7qyo/snapshot.db\"}\n        cluster.go:125: Deprecated: Use `etcdutl snapshot status` instead.\n"},{"name":"coreos.ignition.resource.remote","result":"PASS","duration":83875368103,"output":""},{"name":"cl.etcd-member.v2-backup-restore","result":"PASS","duration":85909566227,"output":"        cluster.go:125: 2026-04-16T01:49:56Z\tinfo\tetcdutl/backup_command.go:216\tignoring EntryConfChange raft entry\n        cluster.go:125: 2026-04-16T01:49:56Z\tinfo\tetcdutl/backup_command.go:231\tignoring member attribute update on\t{\"entry\": \"Term:2 Index:3 Data:\\\"\\\\010\\\\202\\\\204\\\\257\\\\325\\\\277\\\\262\\\\347\\\\330\\\\254\\\\001\\\\022\\\\003PUT\\\\032\u0026/0/members/b5dd68346cfcacb1/attributes\\\\\\\"R{\\\\\\\"name\\\\\\\":\\\\\\\"a4f2354b6a114aa2bd3fbcc5e16be2a9\\\\\\\",\\\\\\\"clientURLs\\\\\\\":[\\\\\\\"http://10.0.0.18:2379\\\\\\\"]}(\\\\0002\\\\0008\\\\000H\\\\000P\\\\000X\\\\000`\\\\000h\\\\000p\\\\000x\\\\000\\\\200\\\\001\\\\000\\\" \", \"v2Req.Path\": \"/0/members/b5dd68346cfcacb1/attributes\"}\n        cluster.go:125: 2026-04-16T01:49:56Z\tinfo\tetcdutl/backup_command.go:252\tignoring v3 raft entry\n        cluster.go:125: 2026-04-16T01:49:56Z\tinfo\tmembership/store.go:141\tTrimming membership information from the backend...\n"},{"name":"docker.userns","result":"PASS","duration":105739647523,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile:\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 3.47MB 0.0s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.1s done\n        cluster.go:125: #5 writing image sha256:ac0479f454dc88b2d710123b31d44de1716ae6cc973223ed40ef685282883311 done\n        cluster.go:125: #5 naming to docker.io/library/userns-test 0.1s done\n        cluster.go:125: #5 DONE 0.2s\n"},{"name":"coreos.ignition.systemd.enable-service","result":"PASS","duration":83819387398,"output":""},{"name":"cl.ignition.v1.noop","result":"PASS","duration":83314761772,"output":""},{"name":"cl.ignition.v2.ext4root","result":"PASS","duration":83337289672,"output":""},{"name":"cl.ignition.v2.noop","result":"PASS","duration":83659506659,"output":""},{"name":"docker.lib-coreos-dockerd-compat/networks-reliably","result":"PASS","duration":139405252824,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 5.34MB 0.0s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.0s done\n        cluster.go:125: #5 writing image sha256:4eee079c6087ab852b40d5aa6aaed282ce75e8310b7db156a29d100cfb49431d done\n        cluster.go:125: #5 naming to docker.io/library/ping done\n        cluster.go:125: #5 DONE 0.1s\n"},{"name":"docker.lib-coreos-dockerd-compat/user-no-caps","result":"PASS","duration":1296285556,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 6.36MB 0.0s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.1s done\n        cluster.go:125: #5 writing image sha256:6bca358d1f363c73386be0e6e6819d019545e812dae7aad4e5e2751b54a5b0bc done\n        cluster.go:125: #5 naming to docker.io/library/captest done\n        cluster.go:125: #5 DONE 0.1s\n"},{"name":"docker.lib-coreos-dockerd-compat/ownership","result":"PASS","duration":5880054844,"output":"        cluster.go:125: Unable to find image 'ghcr.io/flatcar/nginx:latest' locally\n        cluster.go:125: latest: Pulling from flatcar/nginx\n        cluster.go:125: 9d11882d4b58: Pulling fs layer\n        cluster.go:125: 6a7080264fc9: Pulling fs layer\n        cluster.go:125: f176d5d8a1c1: Pulling fs layer\n        cluster.go:125: 63ef37274310: Pulling fs layer\n        cluster.go:125: 4744f2344932: Pulling fs layer\n        cluster.go:125: 39a24e95a1e9: Pulling fs layer\n        cluster.go:125: 400f8ac367e0: Pulling fs layer\n        cluster.go:125: 4744f2344932: Waiting\n        cluster.go:125: 63ef37274310: Waiting\n        cluster.go:125: 39a24e95a1e9: Waiting\n        cluster.go:125: 400f8ac367e0: Waiting\n        cluster.go:125: f176d5d8a1c1: Verifying Checksum\n        cluster.go:125: f176d5d8a1c1: Download complete\n        cluster.go:125: 63ef37274310: Download complete\n        cluster.go:125: 6a7080264fc9: Verifying Checksum\n        cluster.go:125: 6a7080264fc9: Download complete\n        cluster.go:125: 4744f2344932: Verifying Checksum\n        cluster.go:125: 4744f2344932: Download complete\n        cluster.go:125: 39a24e95a1e9: Verifying Checksum\n        cluster.go:125: 39a24e95a1e9: Download complete\n        cluster.go:125: 400f8ac367e0: Verifying Checksum\n        cluster.go:125: 400f8ac367e0: Download complete\n        cluster.go:125: 9d11882d4b58: Download complete\n        cluster.go:125: 9d11882d4b58: Pull complete\n        cluster.go:125: 6a7080264fc9: Pull complete\n        cluster.go:125: f176d5d8a1c1: Pull complete\n        cluster.go:125: 63ef37274310: Pull complete\n        cluster.go:125: 4744f2344932: Pull complete\n        cluster.go:125: 39a24e95a1e9: Pull complete\n        cluster.go:125: 400f8ac367e0: Pull complete\n        cluster.go:125: Digest: sha256:37746262896e4e1a260f21898a0759befa3e3bc64a33bd95f7cd1b8400a9b03b\n        cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/nginx:latest\n"},{"name":"docker.lib-coreos-dockerd-compat","result":"PASS","duration":238463975988,"output":"    --- PASS: docker.lib-coreos-dockerd-compat/docker-info (2.85s)\n    --- PASS: docker.lib-coreos-dockerd-compat/resources (5.29s)\n            cluster.go:125: #0 building with \"default\" instance using docker driver\n            cluster.go:125: \n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile: 108B done\n            cluster.go:125: #1 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 3.43MB 0.0s done\n            cluster.go:125: #3 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers\n            cluster.go:125: #5 exporting layers 0.0s done\n            cluster.go:125: #5 writing image sha256:3aa71b5416daef4e24221df7c93054c4a1dc3faffd1061f7d8a0615f5655010c done\n            cluster.go:125: #5 naming to docker.io/library/sleep done\n            cluster.go:125: #5 DONE 0.1s\n            cluster.go:125: WARNING: Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.\n            cluster.go:125: WARNING: Your kernel does not support OomKillDisable. OomKillDisable discarded.\n    --- PASS: docker.lib-coreos-dockerd-compat/networks-reliably (139.41s)\n            cluster.go:125: #0 building with \"default\" instance using docker driver\n            cluster.go:125: \n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile: 108B done\n            cluster.go:125: #1 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 5.34MB 0.0s done\n            cluster.go:125: #3 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers 0.0s done\n            cluster.go:125: #5 writing image sha256:4eee079c6087ab852b40d5aa6aaed282ce75e8310b7db156a29d100cfb49431d done\n            cluster.go:125: #5 naming to docker.io/library/ping done\n            cluster.go:125: #5 DONE 0.1s\n    --- PASS: docker.lib-coreos-dockerd-compat/user-no-caps (1.30s)\n            cluster.go:125: #0 building with \"default\" instance using docker driver\n            cluster.go:125: \n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile: 108B done\n            cluster.go:125: #1 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 6.36MB 0.0s done\n            cluster.go:125: #3 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers 0.1s done\n            cluster.go:125: #5 writing image sha256:6bca358d1f363c73386be0e6e6819d019545e812dae7aad4e5e2751b54a5b0bc done\n            cluster.go:125: #5 naming to docker.io/library/captest done\n            cluster.go:125: #5 DONE 0.1s\n    --- PASS: docker.lib-coreos-dockerd-compat/ownership (5.88s)\n            cluster.go:125: Unable to find image 'ghcr.io/flatcar/nginx:latest' locally\n            cluster.go:125: latest: Pulling from flatcar/nginx\n            cluster.go:125: 9d11882d4b58: Pulling fs layer\n            cluster.go:125: 6a7080264fc9: Pulling fs layer\n            cluster.go:125: f176d5d8a1c1: Pulling fs layer\n            cluster.go:125: 63ef37274310: Pulling fs layer\n            cluster.go:125: 4744f2344932: Pulling fs layer\n            cluster.go:125: 39a24e95a1e9: Pulling fs layer\n            cluster.go:125: 400f8ac367e0: Pulling fs layer\n            cluster.go:125: 4744f2344932: Waiting\n            cluster.go:125: 63ef37274310: Waiting\n            cluster.go:125: 39a24e95a1e9: Waiting\n            cluster.go:125: 400f8ac367e0: Waiting\n            cluster.go:125: f176d5d8a1c1: Verifying Checksum\n            cluster.go:125: f176d5d8a1c1: Download complete\n            cluster.go:125: 63ef37274310: Download complete\n            cluster.go:125: 6a7080264fc9: Verifying Checksum\n            cluster.go:125: 6a7080264fc9: Download complete\n            cluster.go:125: 4744f2344932: Verifying Checksum\n            cluster.go:125: 4744f2344932: Download complete\n            cluster.go:125: 39a24e95a1e9: Verifying Checksum\n            cluster.go:125: 39a24e95a1e9: Download complete\n            cluster.go:125: 400f8ac367e0: Verifying Checksum\n            cluster.go:125: 400f8ac367e0: Download complete\n            cluster.go:125: 9d11882d4b58: Download complete\n            cluster.go:125: 9d11882d4b58: Pull complete\n            cluster.go:125: 6a7080264fc9: Pull complete\n            cluster.go:125: f176d5d8a1c1: Pull complete\n            cluster.go:125: 63ef37274310: Pull complete\n            cluster.go:125: 4744f2344932: Pull complete\n            cluster.go:125: 39a24e95a1e9: Pull complete\n            cluster.go:125: 400f8ac367e0: Pull complete\n            cluster.go:125: Digest: sha256:37746262896e4e1a260f21898a0759befa3e3bc64a33bd95f7cd1b8400a9b03b\n            cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/nginx:latest\n"},{"name":"coreos.ignition.resource.local","result":"PASS","duration":165993383600,"output":""},{"name":"cl.ignition.misc.empty","result":"PASS","duration":83394807362,"output":""},{"name":"cl.ignition.v2_1.swap","result":"PASS","duration":85355434211,"output":""},{"name":"cl.ignition.symlink","result":"PASS","duration":54215021295,"output":"        cluster.go:152: + readlink /etc/localtime\n"},{"name":"cl.ignition.v2_1.vfat","result":"PASS","duration":83506591681,"output":""},{"name":"cl.ignition.v2_1.ext4checkexisting","result":"PASS","duration":195814669896,"output":""},{"name":"coreos.ignition.security.tls","result":"PASS","duration":135129000951,"output":""},{"name":"cl.users.shells","result":"PASS","duration":113364064259,"output":""},{"name":"cl.flannel.vxlan","result":"PASS","duration":144205684118,"output":"        flannel.go:121: ping from ci-4593.1.0-n-7c0a2862c1(10.254.30.0) to ci-4593.1.0-n-7aa06f3c9e(10.254.46.0)\n"},{"name":"docker.containerd-restart","result":"PASS","duration":146659286775,"output":"        cluster.go:125: Unable to find image 'ghcr.io/flatcar/busybox:latest' locally\n        cluster.go:125: latest: Pulling from flatcar/busybox\n        cluster.go:125: 4bf2067f7735: Pulling fs layer\n        cluster.go:125: 4bf2067f7735: Verifying Checksum\n        cluster.go:125: 4bf2067f7735: Download complete\n        cluster.go:125: 4bf2067f7735: Pull complete\n        cluster.go:125: Digest: sha256:93e8234eb9ca92b9aae20fd73d6c9447ac3d1cc741c6e80c737f821dca582a0e\n        cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/busybox:latest\n"},{"name":"kubeadm.v1.33.8.cilium.base/node_readiness","result":"PASS","duration":37044841575,"output":""},{"name":"kubeadm.v1.33.8.cilium.base/nginx_deployment","result":"PASS","duration":10981272457,"output":""},{"name":"cl.network.wireguard","result":"PASS","duration":53022820005,"output":"        cluster.go:152: + ip --json address show kv0 | jq -r '.[] | .addr_info | .[] | select( .family == \"inet\") | .local'\n"},{"name":"kubeadm.v1.35.1.cilium.base/node_readiness","result":"PASS","duration":37035627439,"output":""},{"name":"cl.ignition.v2.xfsroot","result":"PASS","duration":53372956147,"output":""},{"name":"cl.verity/verify","result":"PASS","duration":12154888080,"output":"        cluster.go:125: Success\n"},{"name":"cl.ignition.v1.xfsroot","result":"PASS","duration":83839753016,"output":""},{"name":"sysext.simple","result":"PASS","duration":55225273760,"output":""},{"name":"kubeadm.v1.33.8.cilium.base/NFS_deployment","result":"PASS","duration":17410265901,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"cl.basic/PortSSH","result":"PASS","duration":222869273,"output":""},{"name":"cl.basic/DbusPerms","result":"PASS","duration":351288028,"output":""},{"name":"kubeadm.v1.35.1.cilium.base/nginx_deployment","result":"PASS","duration":11022788549,"output":""},{"name":"cl.basic/Symlink","result":"PASS","duration":216361693,"output":""},{"name":"cl.basic/UpdateEngineKeys","result":"PASS","duration":211829912,"output":""},{"name":"cl.basic/ServicesActive","result":"PASS","duration":237633155,"output":""},{"name":"cl.basic/ReadOnly","result":"PASS","duration":223037498,"output":""},{"name":"cl.basic/MachineID","result":"PASS","duration":223310539,"output":""},{"name":"cl.basic/Microcode","result":"PASS","duration":263471111,"output":""},{"name":"cl.basic/CloudConfig","result":"PASS","duration":743371627,"output":""},{"name":"cl.basic/Script","result":"PASS","duration":263769728,"output":""},{"name":"cl.basic/SymlinkFlatcar","result":"PASS","duration":222499052,"output":""},{"name":"cl.basic/RandomUUID","result":"PASS","duration":275492392,"output":""},{"name":"cl.basic/Useradd","result":"PASS","duration":1132021002,"output":""},{"name":"coreos.tls.fetch-urls","result":"PASS","duration":88752644456,"output":""},{"name":"cl.basic","result":"PASS","duration":89677306576,"output":"    --- PASS: cl.basic/PortSSH (0.22s)\n    --- PASS: cl.basic/DbusPerms (0.35s)\n    --- PASS: cl.basic/Symlink (0.22s)\n    --- PASS: cl.basic/UpdateEngineKeys (0.21s)\n    --- PASS: cl.basic/ServicesActive (0.24s)\n    --- PASS: cl.basic/ReadOnly (0.22s)\n    --- PASS: cl.basic/MachineID (0.22s)\n    --- PASS: cl.basic/Microcode (0.26s)\n    --- PASS: cl.basic/CloudConfig (0.74s)\n    --- PASS: cl.basic/Script (0.26s)\n    --- PASS: cl.basic/SymlinkFlatcar (0.22s)\n    --- PASS: cl.basic/RandomUUID (0.28s)\n    --- PASS: cl.basic/Useradd (1.13s)\n"},{"name":"kubeadm.v1.35.1.cilium.base/NFS_deployment","result":"PASS","duration":12070245359,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.cilium.base/IPSec_encryption","result":"PASS","duration":18146699763,"output":"        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"kubeadm.v1.33.8.cilium.base","result":"PASS","duration":383425302234,"output":"        cluster.go:125: I0416 01:50:21.387868    3269 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.0\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.5.24-0\n        cluster.go:125: I0416 01:50:31.974326    3507 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [init] Using Kubernetes version: v1.33.11\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-a24f0784e1\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-a24f0784e1\": lookup ci-4593.1.0-n-a24f0784e1 on 168.63.129.16:53: no such host\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4593.1.0-n-a24f0784e1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.21]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 501.528787ms\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.21:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 1.004751818s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 1.457621726s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 3.000893574s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-a24f0784e1 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-a24f0784e1 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: cw50f2.occuzrhphhcg2hz5\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.21:6443 --token cw50f2.occuzrhphhcg2hz5 \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:ce66b7c3e46107e781532632daafe21847fe86a6819bd427229b9d426d97381c \n        cluster.go:125: i  Using Cilium version 1.12.5\n        cluster.go:125: ? Auto-detected cluster name: kubernetes\n        cluster.go:125: ? Auto-detected datapath mode: tunnel\n        cluster.go:125: ? Auto-detected kube-proxy has been installed\n        cluster.go:125: i  helm template --namespace kube-system cilium cilium/cilium --version 1.12.5 --set cluster.id=0,cluster.name=kubernetes,encryption.nodeEncryption=false,extraConfig.cluster-pool-ipv4-cidr=192.168.0.0/17,extraConfig.enable-endpoint-routes=true,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=vxlan\n        cluster.go:125: i  Storing helm values file in kube-system/cilium-cli-helm-values Secret\n        cluster.go:125: ? Created CA in secret cilium-ca\n        cluster.go:125: ? Generating certificates for Hubble...\n        cluster.go:125: ? Creating Service accounts...\n        cluster.go:125: ? Creating Cluster roles...\n        cluster.go:125: ? Creating ConfigMap for Cilium version 1.12.5...\n        cluster.go:125: i  Manual overwrite in ConfigMap: enable-endpoint-routes=true\n        cluster.go:125: i  Manual overwrite in ConfigMap: cluster-pool-ipv4-cidr=192.168.0.0/17\n        cluster.go:125: ? Creating Agent DaemonSet...\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: ? Creating Operator Deployment...\n        cluster.go:125: ? Waiting for Cilium to be installed and ready...\n        cluster.go:125: ? Cilium was successfully installed! Run 'cilium status' to view installation health\n        cluster.go:125: \u001b[33m    /??\\\n        cluster.go:125: \u001b[36m /??\u001b[33m\\__/\u001b[32m??\\\u001b[0m    Cilium:         \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[36m \\__\u001b[31m/??\\\u001b[32m__/\u001b[0m    Operator:       \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[32m /??\u001b[31m\\__/\u001b[35m??\\\u001b[0m    Hubble:         \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[32m \\__\u001b[34m/??\\\u001b[35m__/\u001b[0m    ClusterMesh:    \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[34m    \\__/\n        cluster.go:125: \u001b[0m\n        cluster.go:125: DaemonSet        cilium             \n        cluster.go:125: Deployment       cilium-operator    \n        cluster.go:125: Containers:      cilium             \n        cluster.go:125:                  cilium-operator    \n        cluster.go:125: Cluster Pods:    0/0 managed by Cilium\n        cluster.go:125: W0416 01:52:03.958997    3031 joinconfiguration.go:113] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.33.8.cilium.base/node_readiness (37.04s)\n    --- PASS: kubeadm.v1.33.8.cilium.base/nginx_deployment (10.98s)\n    --- PASS: kubeadm.v1.33.8.cilium.base/NFS_deployment (17.41s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n    --- PASS: kubeadm.v1.33.8.cilium.base/IPSec_encryption (18.15s)\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"kubeadm.v1.34.4.flannel.base/node_readiness","result":"PASS","duration":10773786150,"output":""},{"name":"kubeadm.v1.35.1.cilium.base/IPSec_encryption","result":"PASS","duration":17318026944,"output":"        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"coreos.update.badusr","result":"PASS","duration":394545626766,"output":""},{"name":"kubeadm.v1.34.4.flannel.base/nginx_deployment","result":"PASS","duration":5756351031,"output":""},{"name":"docker.base/networks-reliably","result":"PASS","duration":290103865637,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile:\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.6s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context:\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.9s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 5.34MB 0.0s done\n        cluster.go:125: #3 DONE 0.6s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.6s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers\n        cluster.go:125: #5 exporting layers 0.6s done\n        cluster.go:125: #5 writing image sha256:516a0837692786a30da07fd4a99a07d0e1b0ec2ab7d786be50dd0248eac2704e done\n        cluster.go:125: #5 naming to docker.io/library/ping 0.0s done\n        cluster.go:125: #5 DONE 1.1s\n"},{"name":"sysext.custom-docker.sysext","result":"PASS","duration":101452448952,"output":"        cluster.go:125: bash: line 1: docker: command not found\n        cluster.go:125: Cloning into 'sysext-bakery'...\n        cluster.go:125: Note: switching to '9850ffd5b2353f45a9b3bf4fb84f8138a149e3e7'.\n        cluster.go:125: \n        cluster.go:125: You are in 'detached HEAD' state. You can look around, make experimental\n        cluster.go:125: changes and commit them, and you can discard any commits you make in this\n        cluster.go:125: state without impacting any branches by switching back to a branch.\n        cluster.go:125: \n        cluster.go:125: If you want to create a new branch to retain commits you create, you may\n        cluster.go:125: do so (now or later) by using -c with the switch command. Example:\n        cluster.go:125: \n        cluster.go:125:   git switch -c \u003cnew-branch-name\u003e\n        cluster.go:125: \n        cluster.go:125: Or undo this operation with:\n        cluster.go:125: \n        cluster.go:125:   git switch -\n        cluster.go:125: \n        cluster.go:125: Turn off this advice by setting config variable advice.detachedHead to false\n        cluster.go:125: \n        cluster.go:125: HEAD is now at 9850ffd Merge pull request #31 from flatcar/t-lo/fix-docker-23-containerd-shim\n        cluster.go:125: mke2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: resize2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: mke2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: resize2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: Unable to find image 'ghcr.io/flatcar/busybox:latest' locally\n        cluster.go:125: latest: Pulling from flatcar/busybox\n        cluster.go:125: 4bf2067f7735: Pulling fs layer\n        cluster.go:125: 4bf2067f7735: Download complete\n        cluster.go:125: 4bf2067f7735: Pull complete\n        cluster.go:125: Digest: sha256:93e8234eb9ca92b9aae20fd73d6c9447ac3d1cc741c6e80c737f821dca582a0e\n        cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/busybox:latest\n        cluster.go:125: mke2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: resize2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: mke2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: resize2fs 1.47.3 (8-Jul-2025)\n"},{"name":"cl.etcd-member.discovery","result":"PASS","duration":84711942020,"output":""},{"name":"kubeadm.v1.35.1.cilium.base","result":"PASS","duration":396718906944,"output":"        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.13.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.6-0\n        cluster.go:125: [init] Using Kubernetes version: v1.35.4\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-968b46864b\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-968b46864b\": lookup ci-4593.1.0-n-968b46864b on 168.63.129.16:53: no such host\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4593.1.0-n-968b46864b kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.25]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 501.366192ms\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.25:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 3.007790452s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 3.505842492s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 5.001207339s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-968b46864b as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-968b46864b as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: c1bz6o.uwv6x8a26skj2orv\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.25:6443 --token c1bz6o.uwv6x8a26skj2orv \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:7f44b9787e262101d8ead80341bd4be9a1c6e468e14d73129dc610dc13b5dd8e \n        cluster.go:125: i  Using Cilium version 1.12.5\n        cluster.go:125: ? Auto-detected cluster name: kubernetes\n        cluster.go:125: ? Auto-detected datapath mode: tunnel\n        cluster.go:125: ? Auto-detected kube-proxy has been installed\n        cluster.go:125: i  helm template --namespace kube-system cilium cilium/cilium --version 1.12.5 --set cluster.id=0,cluster.name=kubernetes,encryption.nodeEncryption=false,extraConfig.cluster-pool-ipv4-cidr=192.168.0.0/17,extraConfig.enable-endpoint-routes=true,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=vxlan\n        cluster.go:125: i  Storing helm values file in kube-system/cilium-cli-helm-values Secret\n        cluster.go:125: ? Created CA in secret cilium-ca\n        cluster.go:125: ? Generating certificates for Hubble...\n        cluster.go:125: ? Creating Service accounts...\n        cluster.go:125: ? Creating Cluster roles...\n        cluster.go:125: ? Creating ConfigMap for Cilium version 1.12.5...\n        cluster.go:125: i  Manual overwrite in ConfigMap: enable-endpoint-routes=true\n        cluster.go:125: i  Manual overwrite in ConfigMap: cluster-pool-ipv4-cidr=192.168.0.0/17\n        cluster.go:125: ? Creating Agent DaemonSet...\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: ? Creating Operator Deployment...\n        cluster.go:125: ? Waiting for Cilium to be installed and ready...\n        cluster.go:125: ? Cilium was successfully installed! Run 'cilium status' to view installation health\n        cluster.go:125: \u001b[33m    /??\\\n        cluster.go:125: \u001b[36m /??\u001b[33m\\__/\u001b[32m??\\\u001b[0m    Cilium:         \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[36m \\__\u001b[31m/??\\\u001b[32m__/\u001b[0m    Operator:       \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[32m /??\u001b[31m\\__/\u001b[35m??\\\u001b[0m    Hubble:         \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[32m \\__\u001b[34m/??\\\u001b[35m__/\u001b[0m    ClusterMesh:    \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[34m    \\__/\n        cluster.go:125: \u001b[0m\n        cluster.go:125: DaemonSet        cilium             \n        cluster.go:125: Deployment       cilium-operator    \n        cluster.go:125: Containers:      cilium             \n        cluster.go:125:                  cilium-operator    \n        cluster.go:125: Cluster Pods:    0/0 managed by Cilium\n        cluster.go:125: W0416 01:52:24.034228    3033 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.35.1.cilium.base/node_readiness (37.04s)\n    --- PASS: kubeadm.v1.35.1.cilium.base/nginx_deployment (11.02s)\n    --- PASS: kubeadm.v1.35.1.cilium.base/NFS_deployment (12.07s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n    --- PASS: kubeadm.v1.35.1.cilium.base/IPSec_encryption (17.32s)\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"docker.btrfs-storage","result":"PASS","duration":85238200219,"output":""},{"name":"docker.base/user-no-caps","result":"PASS","duration":6382475439,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 6.36MB 0.0s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers\n        cluster.go:125: #5 exporting layers 0.0s done\n        cluster.go:125: #5 writing image sha256:64c336fb7a57fd942e58bea8f2b1006101acd77bdb529bebad9043ef6b472a54 done\n        cluster.go:125: #5 naming to docker.io/library/captest done\n        cluster.go:125: #5 DONE 0.1s\n"},{"name":"kubeadm.v1.34.4.flannel.base/NFS_deployment","result":"PASS","duration":17424358480,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.34.4.flannel.base","result":"PASS","duration":307128043092,"output":"        cluster.go:125: I0416 01:51:56.741889    3174 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.5-0\n        cluster.go:125: I0416 01:52:08.489728    3412 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [init] Using Kubernetes version: v1.34.7\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-16936035af\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-16936035af\": lookup ci-4593.1.0-n-16936035af on 168.63.129.16:53: no such host\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4593.1.0-n-16936035af kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.14]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.001459923s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.14:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 2.048373463s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 2.86715163s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 4.500857517s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-16936035af as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-16936035af as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: usg111.rlt5rmot10qc06lz\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.14:6443 --token usg111.rlt5rmot10qc06lz \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:0b772c38715a849b8866e96584f040dee9c4916235e458b911b6025894670cf3 \n        cluster.go:125: namespace/kube-flannel created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: serviceaccount/flannel created\n        cluster.go:125: configmap/kube-flannel-cfg created\n        cluster.go:125: daemonset.apps/kube-flannel-ds created\n        cluster.go:125: W0416 01:53:26.586039    2970 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.34.4.flannel.base/node_readiness (10.77s)\n    --- PASS: kubeadm.v1.34.4.flannel.base/nginx_deployment (5.76s)\n    --- PASS: kubeadm.v1.34.4.flannel.base/NFS_deployment (17.42s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"docker.base/ownership","result":"PASS","duration":17063838067,"output":"        cluster.go:125: Unable to find image 'ghcr.io/flatcar/nginx:latest' locally\n        cluster.go:125: latest: Pulling from flatcar/nginx\n        cluster.go:125: 9d11882d4b58: Pulling fs layer\n        cluster.go:125: 6a7080264fc9: Pulling fs layer\n        cluster.go:125: f176d5d8a1c1: Pulling fs layer\n        cluster.go:125: 63ef37274310: Pulling fs layer\n        cluster.go:125: 4744f2344932: Pulling fs layer\n        cluster.go:125: 39a24e95a1e9: Pulling fs layer\n        cluster.go:125: 400f8ac367e0: Pulling fs layer\n        cluster.go:125: 63ef37274310: Waiting\n        cluster.go:125: 4744f2344932: Waiting\n        cluster.go:125: 39a24e95a1e9: Waiting\n        cluster.go:125: 400f8ac367e0: Waiting\n        cluster.go:125: f176d5d8a1c1: Download complete\n        cluster.go:125: 63ef37274310: Verifying Checksum\n        cluster.go:125: 63ef37274310: Download complete\n        cluster.go:125: 6a7080264fc9: Verifying Checksum\n        cluster.go:125: 6a7080264fc9: Download complete\n        cluster.go:125: 9d11882d4b58: Verifying Checksum\n        cluster.go:125: 9d11882d4b58: Download complete\n        cluster.go:125: 4744f2344932: Verifying Checksum\n        cluster.go:125: 4744f2344932: Download complete\n        cluster.go:125: 39a24e95a1e9: Verifying Checksum\n        cluster.go:125: 39a24e95a1e9: Download complete\n        cluster.go:125: 400f8ac367e0: Verifying Checksum\n        cluster.go:125: 400f8ac367e0: Download complete\n        cluster.go:125: 9d11882d4b58: Pull complete\n        cluster.go:125: 6a7080264fc9: Pull complete\n        cluster.go:125: f176d5d8a1c1: Pull complete\n        cluster.go:125: 63ef37274310: Pull complete\n        cluster.go:125: 4744f2344932: Pull complete\n        cluster.go:125: 39a24e95a1e9: Pull complete\n        cluster.go:125: 400f8ac367e0: Pull complete\n        cluster.go:125: Digest: sha256:37746262896e4e1a260f21898a0759befa3e3bc64a33bd95f7cd1b8400a9b03b\n        cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/nginx:latest\n"},{"name":"kubeadm.v1.35.1.flannel.base/node_readiness","result":"PASS","duration":10704257179,"output":""},{"name":"docker.base","result":"PASS","duration":421708066370,"output":"    --- PASS: docker.base/docker-info (8.57s)\n    --- PASS: docker.base/resources (15.48s)\n            cluster.go:125: #0 building with \"default\" instance using docker driver\n            cluster.go:125: \n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile:\n            cluster.go:125: #1 transferring dockerfile: 108B done\n            cluster.go:125: #1 DONE 0.6s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context:\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.8s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 3.43MB 0.0s done\n            cluster.go:125: #3 DONE 0.6s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.4s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers 0.0s done\n            cluster.go:125: #5 writing image sha256:e38e7fc9e3590bf01886ab808181b8dcf0c0dd1e36e37c9bc785a376109a98e4 done\n            cluster.go:125: #5 naming to docker.io/library/sleep done\n            cluster.go:125: #5 DONE 0.1s\n            cluster.go:125: WARNING: Your kernel does not support OomKillDisable. OomKillDisable discarded.\n            cluster.go:125: WARNING: Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.\n    --- PASS: docker.base/networks-reliably (290.10s)\n            cluster.go:125: #0 building with \"default\" instance using docker driver\n            cluster.go:125: \n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile:\n            cluster.go:125: #1 transferring dockerfile: 108B done\n            cluster.go:125: #1 DONE 0.6s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context:\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.9s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 5.34MB 0.0s done\n            cluster.go:125: #3 DONE 0.6s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.6s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers\n            cluster.go:125: #5 exporting layers 0.6s done\n            cluster.go:125: #5 writing image sha256:516a0837692786a30da07fd4a99a07d0e1b0ec2ab7d786be50dd0248eac2704e done\n            cluster.go:125: #5 naming to docker.io/library/ping 0.0s done\n            cluster.go:125: #5 DONE 1.1s\n    --- PASS: docker.base/user-no-caps (6.38s)\n            cluster.go:125: #0 building with \"default\" instance using docker driver\n            cluster.go:125: \n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile: 108B done\n            cluster.go:125: #1 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 6.36MB 0.0s done\n            cluster.go:125: #3 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers\n            cluster.go:125: #5 exporting layers 0.0s done\n            cluster.go:125: #5 writing image sha256:64c336fb7a57fd942e58bea8f2b1006101acd77bdb529bebad9043ef6b472a54 done\n            cluster.go:125: #5 naming to docker.io/library/captest done\n            cluster.go:125: #5 DONE 0.1s\n    --- PASS: docker.base/ownership (17.06s)\n            cluster.go:125: Unable to find image 'ghcr.io/flatcar/nginx:latest' locally\n            cluster.go:125: latest: Pulling from flatcar/nginx\n            cluster.go:125: 9d11882d4b58: Pulling fs layer\n            cluster.go:125: 6a7080264fc9: Pulling fs layer\n            cluster.go:125: f176d5d8a1c1: Pulling fs layer\n            cluster.go:125: 63ef37274310: Pulling fs layer\n            cluster.go:125: 4744f2344932: Pulling fs layer\n            cluster.go:125: 39a24e95a1e9: Pulling fs layer\n            cluster.go:125: 400f8ac367e0: Pulling fs layer\n            cluster.go:125: 63ef37274310: Waiting\n            cluster.go:125: 4744f2344932: Waiting\n            cluster.go:125: 39a24e95a1e9: Waiting\n            cluster.go:125: 400f8ac367e0: Waiting\n            cluster.go:125: f176d5d8a1c1: Download complete\n            cluster.go:125: 63ef37274310: Verifying Checksum\n            cluster.go:125: 63ef37274310: Download complete\n            cluster.go:125: 6a7080264fc9: Verifying Checksum\n            cluster.go:125: 6a7080264fc9: Download complete\n            cluster.go:125: 9d11882d4b58: Verifying Checksum\n            cluster.go:125: 9d11882d4b58: Download complete\n            cluster.go:125: 4744f2344932: Verifying Checksum\n            cluster.go:125: 4744f2344932: Download complete\n            cluster.go:125: 39a24e95a1e9: Verifying Checksum\n            cluster.go:125: 39a24e95a1e9: Download complete\n            cluster.go:125: 400f8ac367e0: Verifying Checksum\n            cluster.go:125: 400f8ac367e0: Download complete\n            cluster.go:125: 9d11882d4b58: Pull complete\n            cluster.go:125: 6a7080264fc9: Pull complete\n            cluster.go:125: f176d5d8a1c1: Pull complete\n            cluster.go:125: 63ef37274310: Pull complete\n            cluster.go:125: 4744f2344932: Pull complete\n            cluster.go:125: 39a24e95a1e9: Pull complete\n            cluster.go:125: 400f8ac367e0: Pull complete\n            cluster.go:125: Digest: sha256:37746262896e4e1a260f21898a0759befa3e3bc64a33bd95f7cd1b8400a9b03b\n            cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/nginx:latest\n"},{"name":"kubeadm.v1.35.1.flannel.base/nginx_deployment","result":"PASS","duration":5709534075,"output":""},{"name":"kubeadm.v1.35.1.calico.base/node_readiness","result":"PASS","duration":31886010651,"output":""},{"name":"kubeadm.v1.35.1.flannel.base/NFS_deployment","result":"PASS","duration":11756088024,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"cl.overlay.cleanup","result":"PASS","duration":79231710731,"output":""},{"name":"kubeadm.v1.35.1.flannel.base","result":"PASS","duration":319690472594,"output":"        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.13.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.6-0\n        cluster.go:125: [init] Using Kubernetes version: v1.35.4\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-e54ea61d98\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-e54ea61d98\": lookup ci-4593.1.0-n-e54ea61d98 on 168.63.129.16:53: no such host\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4593.1.0-n-e54ea61d98 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.35]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 501.381239ms\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.35:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 1.504168194s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 2.199045521s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 4.502017844s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-e54ea61d98 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-e54ea61d98 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: r1hc80.ph6b5xag3wsatjd9\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.35:6443 --token r1hc80.ph6b5xag3wsatjd9 \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:ebbbffb218c88676c9ae4b8000902e169854f259df7bf2890abccb883b90fccc \n        cluster.go:125: namespace/kube-flannel created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: serviceaccount/flannel created\n        cluster.go:125: configmap/kube-flannel-cfg created\n        cluster.go:125: daemonset.apps/kube-flannel-ds created\n        cluster.go:125: W0416 01:53:57.614691    2985 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.35.1.flannel.base/node_readiness (10.70s)\n    --- PASS: kubeadm.v1.35.1.flannel.base/nginx_deployment (5.71s)\n    --- PASS: kubeadm.v1.35.1.flannel.base/NFS_deployment (11.76s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.35.1.calico.base/nginx_deployment","result":"PASS","duration":21601676578,"output":""},{"name":"cl.ignition.luks","result":"PASS","duration":83502712756,"output":""},{"name":"coreos.ignition.once","result":"PASS","duration":108867992582,"output":""},{"name":"cl.internet/UpdateEngine","result":"PASS","duration":373800667,"output":""},{"name":"cl.swap_activation","result":"PASS","duration":82938124376,"output":""},{"name":"cl.cloudinit.basic","result":"PASS","duration":83333078066,"output":""},{"name":"cl.ignition.kargs","result":"PASS","duration":82965490251,"output":"        cluster.go:152: + cat /proc/cmdline\n"},{"name":"cl.network.iptables","result":"PASS","duration":83106631828,"output":"        cluster.go:152: + sudo nft --json list ruleset | jq '.nftables[] | select(.rule) | .rule.expr[0].match.right'\n"},{"name":"docker.selinux","result":"PASS","duration":106186293346,"output":"        cluster.go:125: Unable to find image 'ghcr.io/flatcar/busybox:latest' locally\n        cluster.go:125: latest: Pulling from flatcar/busybox\n        cluster.go:125: 4bf2067f7735: Pulling fs layer\n        cluster.go:125: 4bf2067f7735: Download complete\n        cluster.go:125: 4bf2067f7735: Pull complete\n        cluster.go:125: Digest: sha256:93e8234eb9ca92b9aae20fd73d6c9447ac3d1cc741c6e80c737f821dca582a0e\n        cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/busybox:latest\n        cluster.go:125: sh: can't create /opt/hello: Permission denied\n"},{"name":"kubeadm.v1.35.1.calico.base/NFS_deployment","result":"PASS","duration":44037973476,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.35.1.calico.base","result":"PASS","duration":327515164949,"output":"        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.13.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.6-0\n        cluster.go:125: [init] Using Kubernetes version: v1.35.4\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-6edb25579a\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-6edb25579a\": lookup ci-4593.1.0-n-6edb25579a on 168.63.129.16:53: no such host\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4593.1.0-n-6edb25579a kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.37]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 501.760918ms\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.37:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 2.004196215s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 2.741138769s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 4.501907637s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-6edb25579a as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-6edb25579a as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: xunwyy.6pbjyimdof522yl8\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.37:6443 --token xunwyy.6pbjyimdof522yl8 \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:4d36406b636b49d196608ae49e60790eba207931305894ef34a75b551306c576 \n        cluster.go:125: namespace/tigera-operator created\n        cluster.go:125: serviceaccount/tigera-operator created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: rolebinding.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: deployment.apps/tigera-operator created\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: installation.operator.tigera.io/default created\n        cluster.go:125: apiserver.operator.tigera.io/default created\n        cluster.go:125: goldmane.operator.tigera.io/default created\n        cluster.go:125: whisker.operator.tigera.io/default created\n        cluster.go:125: W0416 01:53:43.482262    3052 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.35.1.calico.base/node_readiness (31.89s)\n    --- PASS: kubeadm.v1.35.1.calico.base/nginx_deployment (21.60s)\n    --- PASS: kubeadm.v1.35.1.calico.base/NFS_deployment (44.04s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"cl.internet/DockerPing","result":"PASS","duration":19817122608,"output":""},{"name":"sysext.disable-docker","result":"PASS","duration":82999790486,"output":""},{"name":"cl.internet/DockerEcho","result":"PASS","duration":6497481592,"output":""},{"name":"cl.internet/NTPDate","result":"PASS","duration":7269071840,"output":""},{"name":"cl.internet","result":"PASS","duration":118370852848,"output":"    --- PASS: cl.internet/UpdateEngine (0.37s)\n    --- PASS: cl.internet/DockerPing (19.82s)\n    --- PASS: cl.internet/DockerEcho (6.50s)\n    --- PASS: cl.internet/NTPDate (7.27s)\n"},{"name":"cl.toolbox.dnf-install","result":"PASS","duration":100885921998,"output":""},{"name":"cl.verity/corruption","result":"PASS","duration":169114483050,"output":""},{"name":"cl.verity","result":"PASS","duration":234147262243,"output":"    --- PASS: cl.verity/verify (12.15s)\n            cluster.go:125: Success\n    --- PASS: cl.verity/corruption (169.11s)\n"},{"name":"cl.update.badverity","result":"PASS","duration":443777296913,"output":""},{"name":"systemd.sysusers.gshadow","result":"PASS","duration":52991002448,"output":""},{"name":"cl.metadata.azure","result":"PASS","duration":52775060147,"output":""},{"name":"cl.cloudinit.script","result":"PASS","duration":53410099905,"output":""},{"name":"cl.ignition.v2.btrfsroot","result":"PASS","duration":83098970342,"output":""},{"name":"coreos.ignition.groups","result":"PASS","duration":55167250774,"output":""},{"name":"coreos.selinux.boolean","result":"PASS","duration":110987690913,"output":""},{"name":"cl.update.reboot","result":"PASS","duration":139664849988,"output":""},{"name":"cl.ignition.v1.users","result":"PASS","duration":84548055884,"output":""},{"name":"coreos.ignition.ssh.key","result":"PASS","duration":82906481439,"output":""},{"name":"cl.flannel.udp","result":"PASS","duration":213099314660,"output":"        cluster.go:125: Timeout occurred while waiting for network connectivity.\n        flannel.go:121: ping from ci-4593.1.0-n-e4abe4eee0(10.254.21.0) to ci-4593.1.0-n-c9e37c8058(10.254.64.1)\n"},{"name":"cl.locksmith.cluster","result":"PASS","duration":417506386565,"output":""},{"name":"docker.enable-service.sysext","result":"PASS","duration":83325768201,"output":"        cluster.go:152: + systemctl is-enabled docker\n"},{"name":"coreos.locksmith.reboot","result":"PASS","duration":409287229496,"output":""},{"name":"kubeadm.v1.33.8.flannel.base/node_readiness","result":"PASS","duration":10757044618,"output":""},{"name":"kubeadm.v1.34.4.cilium.base/node_readiness","result":"PASS","duration":16036916385,"output":""},{"name":"kubeadm.v1.34.4.cilium.base/nginx_deployment","result":"PASS","duration":10990205792,"output":""},{"name":"kubeadm.v1.33.8.flannel.base/nginx_deployment","result":"PASS","duration":21459133649,"output":""},{"name":"kubeadm.v1.34.4.cilium.base/NFS_deployment","result":"PASS","duration":17288793222,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.flannel.base/NFS_deployment","result":"PASS","duration":17252427847,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.flannel.base","result":"PASS","duration":354948149441,"output":"        cluster.go:125: I0416 01:55:53.897877    3201 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.0\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.5.24-0\n        cluster.go:125: I0416 01:56:06.301260    3451 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [init] Using Kubernetes version: v1.33.11\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-0fb8e73b28\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-0fb8e73b28\": lookup ci-4593.1.0-n-0fb8e73b28 on 168.63.129.16:53: no such host\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4593.1.0-n-0fb8e73b28 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.25]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 501.508834ms\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.25:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 1.507235791s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 2.133417099s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 4.001842721s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-0fb8e73b28 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-0fb8e73b28 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: wq8p4d.d1i62i7c4nruarpv\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.25:6443 --token wq8p4d.d1i62i7c4nruarpv \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:071f64e6674591ad6abd9ff480781d041f202cbea7f46bbc122e03e9d5b8254b \n        cluster.go:125: namespace/kube-flannel created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: serviceaccount/flannel created\n        cluster.go:125: configmap/kube-flannel-cfg created\n        cluster.go:125: daemonset.apps/kube-flannel-ds created\n        cluster.go:125: W0416 01:58:07.583547    2974 joinconfiguration.go:113] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.33.8.flannel.base/node_readiness (10.76s)\n    --- PASS: kubeadm.v1.33.8.flannel.base/nginx_deployment (21.46s)\n    --- PASS: kubeadm.v1.33.8.flannel.base/NFS_deployment (17.25s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.34.4.cilium.base/IPSec_encryption","result":"PASS","duration":17147870462,"output":"        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"kubeadm.v1.34.4.cilium.base","result":"PASS","duration":363741707499,"output":"        cluster.go:125: I0416 01:55:52.787316    3252 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.5-0\n        cluster.go:125: I0416 01:56:19.722193    3479 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [init] Using Kubernetes version: v1.34.7\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-6f5564f4c3\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-6f5564f4c3\": lookup ci-4593.1.0-n-6f5564f4c3 on 168.63.129.16:53: no such host\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4593.1.0-n-6f5564f4c3 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.11]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 2.001203384s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.11:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 4.502453719s\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 6.068563364s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 8.500836788s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-6f5564f4c3 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-6f5564f4c3 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: 3vzltd.wdu15ez3jiysiimc\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.11:6443 --token 3vzltd.wdu15ez3jiysiimc \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:e8151873622092b61e6ab88623a16ed9508a3b18c1fd589b4b908c0898beb3d1 \n        cluster.go:125: i  Using Cilium version 1.12.5\n        cluster.go:125: ? Auto-detected cluster name: kubernetes\n        cluster.go:125: ? Auto-detected datapath mode: tunnel\n        cluster.go:125: ? Auto-detected kube-proxy has been installed\n        cluster.go:125: i  helm template --namespace kube-system cilium cilium/cilium --version 1.12.5 --set cluster.id=0,cluster.name=kubernetes,encryption.nodeEncryption=false,extraConfig.cluster-pool-ipv4-cidr=192.168.0.0/17,extraConfig.enable-endpoint-routes=true,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=vxlan\n        cluster.go:125: i  Storing helm values file in kube-system/cilium-cli-helm-values Secret\n        cluster.go:125: ? Created CA in secret cilium-ca\n        cluster.go:125: ? Generating certificates for Hubble...\n        cluster.go:125: ? Creating Service accounts...\n        cluster.go:125: ? Creating Cluster roles...\n        cluster.go:125: ? Creating ConfigMap for Cilium version 1.12.5...\n        cluster.go:125: i  Manual overwrite in ConfigMap: enable-endpoint-routes=true\n        cluster.go:125: i  Manual overwrite in ConfigMap: cluster-pool-ipv4-cidr=192.168.0.0/17\n        cluster.go:125: ? Creating Agent DaemonSet...\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: ? Creating Operator Deployment...\n        cluster.go:125: ? Waiting for Cilium to be installed and ready...\n        cluster.go:125: ? Cilium was successfully installed! Run 'cilium status' to view installation health\n        cluster.go:125: \u001b[33m    /??\\\n        cluster.go:125: \u001b[36m /??\u001b[33m\\__/\u001b[32m??\\\u001b[0m    Cilium:         \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[36m \\__\u001b[31m/??\\\u001b[32m__/\u001b[0m    Operator:       \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[32m /??\u001b[31m\\__/\u001b[35m??\\\u001b[0m    Hubble:         \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[32m \\__\u001b[34m/??\\\u001b[35m__/\u001b[0m    ClusterMesh:    \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[34m    \\__/\n        cluster.go:125: \u001b[0m\n        cluster.go:125: DaemonSet        cilium             \n        cluster.go:125: Deployment       cilium-operator    \n        cluster.go:125: Containers:      cilium             \n        cluster.go:125:                  cilium-operator    \n        cluster.go:125: Cluster Pods:    0/0 managed by Cilium\n        cluster.go:125: W0416 01:58:02.579093    3053 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.34.4.cilium.base/node_readiness (16.04s)\n    --- PASS: kubeadm.v1.34.4.cilium.base/nginx_deployment (10.99s)\n    --- PASS: kubeadm.v1.34.4.cilium.base/NFS_deployment (17.29s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n    --- PASS: kubeadm.v1.34.4.cilium.base/IPSec_encryption (17.15s)\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"kubeadm.v1.33.8.calico.base/node_readiness","result":"PASS","duration":21363560125,"output":""},{"name":"kubeadm.v1.33.8.calico.base/nginx_deployment","result":"PASS","duration":5780977469,"output":""},{"name":"kubeadm.v1.33.8.calico.base/NFS_deployment","result":"PASS","duration":33236441733,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.33.8.calico.base","result":"PASS","duration":351172690900,"output":"        cluster.go:125: I0416 01:57:19.510909    3262 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.0\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.5.24-0\n        cluster.go:125: I0416 01:57:31.633394    3499 version.go:261] remote version is much newer: v1.35.4; falling back to: stable-1.33\n        cluster.go:125: [init] Using Kubernetes version: v1.33.11\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-de8956a71a\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-de8956a71a\": lookup ci-4593.1.0-n-de8956a71a on 168.63.129.16:53: no such host\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4593.1.0-n-de8956a71a kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.17]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 501.230879ms\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.17:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 1.666916854s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 2.296261827s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 4.001929779s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-de8956a71a as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-de8956a71a as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: r9qw0x.qwf1rzpgwddkc1lf\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.17:6443 --token r9qw0x.qwf1rzpgwddkc1lf \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:20403f2cf186fba590a2254c0d8a644d334f89ec21c277bb12a4c923e9ce6494 \n        cluster.go:125: namespace/tigera-operator created\n        cluster.go:125: serviceaccount/tigera-operator created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: rolebinding.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: deployment.apps/tigera-operator created\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: installation.operator.tigera.io/default created\n        cluster.go:125: apiserver.operator.tigera.io/default created\n        cluster.go:125: goldmane.operator.tigera.io/default created\n        cluster.go:125: whisker.operator.tigera.io/default created\n        cluster.go:125: W0416 01:59:16.236601    3104 joinconfiguration.go:113] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.33.8.calico.base/node_readiness (21.36s)\n    --- PASS: kubeadm.v1.33.8.calico.base/nginx_deployment (5.78s)\n    --- PASS: kubeadm.v1.33.8.calico.base/NFS_deployment (33.24s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:122): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.34.4.calico.base/node_readiness","result":"PASS","duration":52879094917,"output":""},{"name":"kubeadm.v1.34.4.calico.base/nginx_deployment","result":"PASS","duration":5744005388,"output":""},{"name":"kubeadm.v1.34.4.calico.base/NFS_deployment","result":"PASS","duration":33125031300,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"kubeadm.v1.34.4.calico.base","result":"PASS","duration":379237507579,"output":"        cluster.go:125: I0416 01:57:59.292620    3340 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.5-0\n        cluster.go:125: I0416 01:58:09.409639    3584 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [init] Using Kubernetes version: v1.34.7\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-4cd571de57\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4593.1.0-n-4cd571de57\": lookup ci-4593.1.0-n-4cd571de57 on 168.63.129.16:53: no such host\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4593.1.0-n-4cd571de57 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.14]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 501.386721ms\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.14:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 1.505415032s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 2.175024267s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 6.000946445s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-4cd571de57 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4593.1.0-n-4cd571de57 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: yjon70.agbl8n8lqh1a3zq8\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.14:6443 --token yjon70.agbl8n8lqh1a3zq8 \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:36a2fe78fc07876f212d66b64bf6c8d444444cf3ff1c61d1eb9db545a6a34fac \n        cluster.go:125: namespace/tigera-operator created\n        cluster.go:125: serviceaccount/tigera-operator created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: rolebinding.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: deployment.apps/tigera-operator created\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: installation.operator.tigera.io/default created\n        cluster.go:125: apiserver.operator.tigera.io/default created\n        cluster.go:125: goldmane.operator.tigera.io/default created\n        cluster.go:125: whisker.operator.tigera.io/default created\n        cluster.go:125: W0416 01:59:55.332249    2995 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.34.4.calico.base/node_readiness (52.88s)\n    --- PASS: kubeadm.v1.34.4.calico.base/nginx_deployment (5.74s)\n    --- PASS: kubeadm.v1.34.4.calico.base/NFS_deployment (33.13s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"}],"result":"PASS","platform":"azure","version":"4593.1.0"}
