{"tests":[{"name":"docker.lib-coreos-dockerd-compat/docker-info","result":"PASS","duration":3285673617,"output":""},{"name":"cl.users.shells","result":"PASS","duration":95326729342,"output":""},{"name":"coreos.ignition.systemd.enable-service","result":"PASS","duration":95598558871,"output":""},{"name":"cl.swap_activation","result":"PASS","duration":95731483874,"output":""},{"name":"sysext.disable-containerd","result":"PASS","duration":95823463024,"output":""},{"name":"cl.ignition.symlink","result":"PASS","duration":96037101896,"output":"        cluster.go:152: + readlink /etc/localtime\n"},{"name":"cl.network.iptables","result":"PASS","duration":96552946110,"output":"        cluster.go:152: + sudo nft --json list ruleset | jq '.nftables[] | select(.rule) | .rule.expr[0].match.right'\n"},{"name":"cl.cloudinit.basic","result":"PASS","duration":96939079748,"output":""},{"name":"cl.network.wireguard","result":"PASS","duration":97526829346,"output":"        cluster.go:152: + ip --json address show kv0 | jq -r '.[] | .addr_info | .[] | select( .family == \"inet\") | .local'\n"},{"name":"cl.ignition.v2.ext4root","result":"PASS","duration":97905833916,"output":""},{"name":"cl.ignition.v2.btrfsroot","result":"PASS","duration":98415026072,"output":""},{"name":"cl.ignition.v1.groups","result":"PASS","duration":98471485288,"output":""},{"name":"cl.cloudinit.script","result":"PASS","duration":98697459632,"output":""},{"name":"cl.ignition.v1.xfsroot","result":"PASS","duration":100220966505,"output":""},{"name":"cl.ignition.v1.users","result":"PASS","duration":101360947962,"output":""},{"name":"docker.lib-coreos-dockerd-compat/resources","result":"PASS","duration":10222685215,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 2.12MB 0.0s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.1s done\n        cluster.go:125: #5 writing image sha256:c03f3f3ad8751f5b65c3431b55ea2d34929c61f3746d4d5f5e8169f42c711f1f done\n        cluster.go:125: #5 naming to docker.io/library/sleep 0.0s done\n        cluster.go:125: #5 DONE 0.1s\n        cluster.go:125: WARNING: Your kernel does not support OomKillDisable. OomKillDisable discarded.\n        cluster.go:125: WARNING: Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.\n"},{"name":"cl.ignition.v1.once","result":"PASS","duration":120077605574,"output":""},{"name":"cl.ignition.luks","result":"PASS","duration":127490151132,"output":""},{"name":"cl.osreset.ignition-rerun","result":"PASS","duration":129362134597,"output":""},{"name":"cl.etcd-member.etcdctlv3","result":"PASS","duration":95460162634,"output":"        cluster.go:125: {\"level\":\"info\",\"ts\":1776382041.0049067,\"caller\":\"snapshot/v3_snapshot.go:68\",\"msg\":\"created temporary db file\",\"path\":\"/tmp/tmp.de97kW3drk/snapshot.db.part\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":1776382041.0056348,\"logger\":\"client\",\"caller\":\"v3/maintenance.go:211\",\"msg\":\"opened snapshot stream; downloading\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":1776382041.0056593,\"caller\":\"snapshot/v3_snapshot.go:76\",\"msg\":\"fetching snapshot\",\"endpoint\":\"127.0.0.1:2379\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":1776382041.0108697,\"logger\":\"client\",\"caller\":\"v3/maintenance.go:219\",\"msg\":\"completed snapshot read; closing\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":1776382041.010921,\"caller\":\"snapshot/v3_snapshot.go:91\",\"msg\":\"fetched snapshot\",\"endpoint\":\"127.0.0.1:2379\",\"size\":\"20 kB\",\"took\":\"now\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":1776382041.011061,\"caller\":\"snapshot/v3_snapshot.go:100\",\"msg\":\"saved\",\"path\":\"/tmp/tmp.de97kW3drk/snapshot.db\"}\n        cluster.go:125: Deprecated: Use `etcdutl snapshot status` instead.\n"},{"name":"cl.ignition.v2_1.vfat","result":"PASS","duration":95098236378,"output":""},{"name":"cl.network.nftables","result":"PASS","duration":94425162162,"output":"        cluster.go:152: + sudo nft --json list ruleset | jq '.nftables[] | select(.rule) | .rule.expr[0].match.right'\n"},{"name":"bpf.ig/ig","result":"PASS","duration":101509927379,"output":"        cluster.go:125: + sudo ig run trace_exec:v0.50.0 --help\n        cluster.go:125: + trap 'kill %%' ERR\n        cluster.go:125: + timeout 30 grep -F -m1 running...\n        cluster.go:125: + sudo ig run trace_exec:v0.50.0 --host --filter proc.comm=docker,args~ps --output json --verbose\n        cluster.go:125: + docker info\n        cluster.go:125: + docker ps\n        cluster.go:125: + docker images\n        cluster.go:125: + kill %%\n        cluster.go:125: + wait\n        cluster.go:125: + jq -s -e '.[] | select(.args == \"/usr/bin/docker\\u00a0ps\")' ig.json\n        cluster.go:125: + jq -s -e 'isempty(.[] | select(.args == \"/usr/bin/docker\\u00a0info\"))' ig.json\n        cluster.go:125: + jq -s -e 'isempty(.[] | select(.args == \"/usr/bin/docker\\u00a0images\"))' ig.json\n        cluster.go:125: + sudo ig run trace_dns:v0.50.0 --help\n        cluster.go:125: + trap 'kill %%' ERR\n        cluster.go:125: + timeout 30 grep -F -m1 running...\n        cluster.go:125: + sudo ig run trace_dns:v0.50.0 --host --filter name=flatcar.org. --output json --verbose\n        cluster.go:125: + dig kinvolk.io\n        cluster.go:125: + dig flatcar.org\n        cluster.go:125: + dig stable.release.flatcar-linux.net\n        cluster.go:125: + kill %%\n        cluster.go:125: + wait\n        cluster.go:125: + jq -s -e '.[] | select(.name == \"flatcar.org.\")' ig.json\n        cluster.go:125: + jq -s -e 'isempty(.[] | select(.name == \"kinvolk.io.\"))' ig.json\n        cluster.go:125: + jq -s -e 'isempty(.[] | select(.name == \"stable.release.flatcar-linux.net.\"))' ig.json\n"},{"name":"cl.ignition.v2.users","result":"PASS","duration":100981449057,"output":""},{"name":"docker.userns","result":"PASS","duration":103055242229,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 2.19MB 0.0s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers\n        cluster.go:125: #5 exporting layers 0.1s done\n        cluster.go:125: #5 writing image sha256:5353da274bbd65b62a4a2d7502bf23324b3bab95e3cde3a6cdc939df65e7d35d done\n        cluster.go:125: #5 naming to docker.io/library/userns-test 0.0s done\n        cluster.go:125: #5 DONE 0.1s\n"},{"name":"bpf.ig","result":"PASS","duration":104234354615,"output":"    --- PASS: bpf.ig/ig (101.51s)\n            cluster.go:125: + sudo ig run trace_exec:v0.50.0 --help\n            cluster.go:125: + trap 'kill %%' ERR\n            cluster.go:125: + timeout 30 grep -F -m1 running...\n            cluster.go:125: + sudo ig run trace_exec:v0.50.0 --host --filter proc.comm=docker,args~ps --output json --verbose\n            cluster.go:125: + docker info\n            cluster.go:125: + docker ps\n            cluster.go:125: + docker images\n            cluster.go:125: + kill %%\n            cluster.go:125: + wait\n            cluster.go:125: + jq -s -e '.[] | select(.args == \"/usr/bin/docker\\u00a0ps\")' ig.json\n            cluster.go:125: + jq -s -e 'isempty(.[] | select(.args == \"/usr/bin/docker\\u00a0info\"))' ig.json\n            cluster.go:125: + jq -s -e 'isempty(.[] | select(.args == \"/usr/bin/docker\\u00a0images\"))' ig.json\n            cluster.go:125: + sudo ig run trace_dns:v0.50.0 --help\n            cluster.go:125: + trap 'kill %%' ERR\n            cluster.go:125: + timeout 30 grep -F -m1 running...\n            cluster.go:125: + sudo ig run trace_dns:v0.50.0 --host --filter name=flatcar.org. --output json --verbose\n            cluster.go:125: + dig kinvolk.io\n            cluster.go:125: + dig flatcar.org\n            cluster.go:125: + dig stable.release.flatcar-linux.net\n            cluster.go:125: + kill %%\n            cluster.go:125: + wait\n            cluster.go:125: + jq -s -e '.[] | select(.name == \"flatcar.org.\")' ig.json\n            cluster.go:125: + jq -s -e 'isempty(.[] | select(.name == \"kinvolk.io.\"))' ig.json\n            cluster.go:125: + jq -s -e 'isempty(.[] | select(.name == \"stable.release.flatcar-linux.net.\"))' ig.json\n"},{"name":"coreos.tls.fetch-urls","result":"PASS","duration":103301015085,"output":""},{"name":"cl.network.initramfs.second-boot","result":"PASS","duration":117446771991,"output":""},{"name":"coreos.auth.verify","result":"PASS","duration":93579220185,"output":""},{"name":"cl.ignition.misc.empty","result":"PASS","duration":94625406310,"output":""},{"name":"coreos.selinux.boolean","result":"PASS","duration":124281007192,"output":""},{"name":"docker.network-openbsd-nc","result":"PASS","duration":111137641420,"output":"        docker.go:413: creating netcat containers\n        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context:\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 2.46MB 0.0s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.0s done\n        cluster.go:125: #5 writing image sha256:7c99d39c007f8d5680219e5a5c4334740d4b1a96ab74775b0e4446b5525d15a3 done\n        cluster.go:125: #5 naming to docker.io/library/netcat done\n        cluster.go:125: #5 DONE 0.1s\n        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context:\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 2.46MB 0.0s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.1s done\n        cluster.go:125: #5 writing image sha256:fb4190e9545f8ab4c1c4d39c6862c5df97ac2e868edb524b610048eae8086c22\n        cluster.go:125: #5 writing image sha256:fb4190e9545f8ab4c1c4d39c6862c5df97ac2e868edb524b610048eae8086c22 done\n        cluster.go:125: #5 naming to docker.io/library/netcat 0.0s done\n        cluster.go:125: #5 DONE 0.1s\n"},{"name":"docker.lib-coreos-dockerd-compat/networks-reliably","result":"PASS","duration":144406723480,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context:\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 4.03MB 0.0s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.1s done\n        cluster.go:125: #5 writing image sha256:418eef6d5373e221dca576f1f88ee193bfc99c97fdba8cd24572ca59143e9017\n        cluster.go:125: #5 writing image sha256:418eef6d5373e221dca576f1f88ee193bfc99c97fdba8cd24572ca59143e9017 done\n        cluster.go:125: #5 naming to docker.io/library/ping 0.0s done\n        cluster.go:125: #5 DONE 0.1s\n"},{"name":"docker.lib-coreos-dockerd-compat/user-no-caps","result":"PASS","duration":4045930549,"output":"        cluster.go:125: #0 building with \"default\" instance using docker driver\n        cluster.go:125: \n        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context:\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 5.08MB 0.0s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers\n        cluster.go:125: #5 exporting layers 0.1s done\n        cluster.go:125: #5 writing image sha256:901a0016a2618d59201a9f5342f3b5e6e195050bd2fb6577942c75e3b2009077 done\n        cluster.go:125: #5 naming to docker.io/library/captest 0.0s done\n        cluster.go:125: #5 DONE 0.1s\n"},{"name":"docker.lib-coreos-dockerd-compat/ownership","result":"PASS","duration":9029727833,"output":"        cluster.go:125: Unable to find image 'ghcr.io/flatcar/nginx:latest' locally\n        cluster.go:125: latest: Pulling from flatcar/nginx\n        cluster.go:125: f704f61efc9c: Pulling fs layer\n        cluster.go:125: a1eae9f5b3f3: Pulling fs layer\n        cluster.go:125: b91cc56ccc4f: Pulling fs layer\n        cluster.go:125: 44eaefe7f5ca: Pulling fs layer\n        cluster.go:125: 9c0f97895f78: Pulling fs layer\n        cluster.go:125: 8b4d97d25516: Pulling fs layer\n        cluster.go:125: 5f961e328469: Pulling fs layer\n        cluster.go:125: 44eaefe7f5ca: Waiting\n        cluster.go:125: 9c0f97895f78: Waiting\n        cluster.go:125: 8b4d97d25516: Waiting\n        cluster.go:125: 5f961e328469: Waiting\n        cluster.go:125: b91cc56ccc4f: Verifying Checksum\n        cluster.go:125: b91cc56ccc4f: Download complete\n        cluster.go:125: 44eaefe7f5ca: Verifying Checksum\n        cluster.go:125: 44eaefe7f5ca: Download complete\n        cluster.go:125: a1eae9f5b3f3: Verifying Checksum\n        cluster.go:125: a1eae9f5b3f3: Download complete\n        cluster.go:125: 9c0f97895f78: Verifying Checksum\n        cluster.go:125: 9c0f97895f78: Download complete\n        cluster.go:125: 8b4d97d25516: Download complete\n        cluster.go:125: f704f61efc9c: Verifying Checksum\n        cluster.go:125: f704f61efc9c: Download complete\n        cluster.go:125: 5f961e328469: Verifying Checksum\n        cluster.go:125: 5f961e328469: Download complete\n        cluster.go:125: f704f61efc9c: Pull complete\n        cluster.go:125: a1eae9f5b3f3: Pull complete\n        cluster.go:125: b91cc56ccc4f: Pull complete\n        cluster.go:125: 44eaefe7f5ca: Pull complete\n        cluster.go:125: 9c0f97895f78: Pull complete\n        cluster.go:125: 8b4d97d25516: Pull complete\n        cluster.go:125: 5f961e328469: Pull complete\n        cluster.go:125: Digest: sha256:37746262896e4e1a260f21898a0759befa3e3bc64a33bd95f7cd1b8400a9b03b\n        cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/nginx:latest\n"},{"name":"docker.lib-coreos-dockerd-compat","result":"PASS","duration":265428942051,"output":"    --- PASS: docker.lib-coreos-dockerd-compat/docker-info (3.29s)\n    --- PASS: docker.lib-coreos-dockerd-compat/resources (10.22s)\n            cluster.go:125: #0 building with \"default\" instance using docker driver\n            cluster.go:125: \n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile: 108B done\n            cluster.go:125: #1 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 2.12MB 0.0s done\n            cluster.go:125: #3 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers 0.1s done\n            cluster.go:125: #5 writing image sha256:c03f3f3ad8751f5b65c3431b55ea2d34929c61f3746d4d5f5e8169f42c711f1f done\n            cluster.go:125: #5 naming to docker.io/library/sleep 0.0s done\n            cluster.go:125: #5 DONE 0.1s\n            cluster.go:125: WARNING: Your kernel does not support OomKillDisable. OomKillDisable discarded.\n            cluster.go:125: WARNING: Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.\n    --- PASS: docker.lib-coreos-dockerd-compat/networks-reliably (144.41s)\n            cluster.go:125: #0 building with \"default\" instance using docker driver\n            cluster.go:125: \n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile: 108B done\n            cluster.go:125: #1 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context:\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 4.03MB 0.0s done\n            cluster.go:125: #3 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers 0.1s done\n            cluster.go:125: #5 writing image sha256:418eef6d5373e221dca576f1f88ee193bfc99c97fdba8cd24572ca59143e9017\n            cluster.go:125: #5 writing image sha256:418eef6d5373e221dca576f1f88ee193bfc99c97fdba8cd24572ca59143e9017 done\n            cluster.go:125: #5 naming to docker.io/library/ping 0.0s done\n            cluster.go:125: #5 DONE 0.1s\n    --- PASS: docker.lib-coreos-dockerd-compat/user-no-caps (4.05s)\n            cluster.go:125: #0 building with \"default\" instance using docker driver\n            cluster.go:125: \n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile: 108B done\n            cluster.go:125: #1 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context:\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 5.08MB 0.0s done\n            cluster.go:125: #3 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers\n            cluster.go:125: #5 exporting layers 0.1s done\n            cluster.go:125: #5 writing image sha256:901a0016a2618d59201a9f5342f3b5e6e195050bd2fb6577942c75e3b2009077 done\n            cluster.go:125: #5 naming to docker.io/library/captest 0.0s done\n            cluster.go:125: #5 DONE 0.1s\n    --- PASS: docker.lib-coreos-dockerd-compat/ownership (9.03s)\n            cluster.go:125: Unable to find image 'ghcr.io/flatcar/nginx:latest' locally\n            cluster.go:125: latest: Pulling from flatcar/nginx\n            cluster.go:125: f704f61efc9c: Pulling fs layer\n            cluster.go:125: a1eae9f5b3f3: Pulling fs layer\n            cluster.go:125: b91cc56ccc4f: Pulling fs layer\n            cluster.go:125: 44eaefe7f5ca: Pulling fs layer\n            cluster.go:125: 9c0f97895f78: Pulling fs layer\n            cluster.go:125: 8b4d97d25516: Pulling fs layer\n            cluster.go:125: 5f961e328469: Pulling fs layer\n            cluster.go:125: 44eaefe7f5ca: Waiting\n            cluster.go:125: 9c0f97895f78: Waiting\n            cluster.go:125: 8b4d97d25516: Waiting\n            cluster.go:125: 5f961e328469: Waiting\n            cluster.go:125: b91cc56ccc4f: Verifying Checksum\n            cluster.go:125: b91cc56ccc4f: Download complete\n            cluster.go:125: 44eaefe7f5ca: Verifying Checksum\n            cluster.go:125: 44eaefe7f5ca: Download complete\n            cluster.go:125: a1eae9f5b3f3: Verifying Checksum\n            cluster.go:125: a1eae9f5b3f3: Download complete\n            cluster.go:125: 9c0f97895f78: Verifying Checksum\n            cluster.go:125: 9c0f97895f78: Download complete\n            cluster.go:125: 8b4d97d25516: Download complete\n            cluster.go:125: f704f61efc9c: Verifying Checksum\n            cluster.go:125: f704f61efc9c: Download complete\n            cluster.go:125: 5f961e328469: Verifying Checksum\n            cluster.go:125: 5f961e328469: Download complete\n            cluster.go:125: f704f61efc9c: Pull complete\n            cluster.go:125: a1eae9f5b3f3: Pull complete\n            cluster.go:125: b91cc56ccc4f: Pull complete\n            cluster.go:125: 44eaefe7f5ca: Pull complete\n            cluster.go:125: 9c0f97895f78: Pull complete\n            cluster.go:125: 8b4d97d25516: Pull complete\n            cluster.go:125: 5f961e328469: Pull complete\n            cluster.go:125: Digest: sha256:37746262896e4e1a260f21898a0759befa3e3bc64a33bd95f7cd1b8400a9b03b\n            cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/nginx:latest\n"},{"name":"cl.verity/verify","result":"PASS","duration":94718691984,"output":"        cluster.go:125: Success\n"},{"name":"cl.ignition.kargs","result":"PASS","duration":94170524604,"output":"        cluster.go:152: + cat /proc/cmdline\n"},{"name":"coreos.ignition.resource.remote","result":"PASS","duration":96770400851,"output":""},{"name":"coreos.ignition.groups","result":"PASS","duration":98379143336,"output":""},{"name":"cl.ignition.v1.noop","result":"PASS","duration":93411079441,"output":""},{"name":"cl.metadata.azure","result":"PASS","duration":94649016097,"output":""},{"name":"cl.cloudinit.multipart-mime","result":"PASS","duration":104029306915,"output":""},{"name":"docker.containerd-restart","result":"PASS","duration":226716748773,"output":"        cluster.go:125: Unable to find image 'ghcr.io/flatcar/busybox:latest' locally\n        cluster.go:125: latest: Pulling from flatcar/busybox\n        cluster.go:125: dbc3eadfebd7: Pulling fs layer\n        cluster.go:125: dbc3eadfebd7: Verifying Checksum\n        cluster.go:125: dbc3eadfebd7: Download complete\n        cluster.go:125: dbc3eadfebd7: Pull complete\n        cluster.go:125: Digest: sha256:93e8234eb9ca92b9aae20fd73d6c9447ac3d1cc741c6e80c737f821dca582a0e\n        cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/busybox:latest\n"},{"name":"kubeadm.v1.35.1.cilium.base/node_readiness","result":"PASS","duration":14215298377,"output":""},{"name":"sysext.custom-docker.sysext","result":"PASS","duration":142840330408,"output":"        cluster.go:125: bash: line 1: docker: command not found\n        cluster.go:125: Cloning into 'sysext-bakery'...\n        cluster.go:125: Note: switching to '9850ffd5b2353f45a9b3bf4fb84f8138a149e3e7'.\n        cluster.go:125: \n        cluster.go:125: You are in 'detached HEAD' state. You can look around, make experimental\n        cluster.go:125: changes and commit them, and you can discard any commits you make in this\n        cluster.go:125: state without impacting any branches by switching back to a branch.\n        cluster.go:125: \n        cluster.go:125: If you want to create a new branch to retain commits you create, you may\n        cluster.go:125: do so (now or later) by using -c with the switch command. Example:\n        cluster.go:125: \n        cluster.go:125:   git switch -c \u003cnew-branch-name\u003e\n        cluster.go:125: \n        cluster.go:125: Or undo this operation with:\n        cluster.go:125: \n        cluster.go:125:   git switch -\n        cluster.go:125: \n        cluster.go:125: Turn off this advice by setting config variable advice.detachedHead to false\n        cluster.go:125: \n        cluster.go:125: HEAD is now at 9850ffd Merge pull request #31 from flatcar/t-lo/fix-docker-23-containerd-shim\n        cluster.go:125: mke2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: resize2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: mke2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: resize2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: Unable to find image 'ghcr.io/flatcar/busybox:latest' locally\n        cluster.go:125: latest: Pulling from flatcar/busybox\n        cluster.go:125: dbc3eadfebd7: Pulling fs layer\n        cluster.go:125: dbc3eadfebd7: Verifying Checksum\n        cluster.go:125: dbc3eadfebd7: Download complete\n        cluster.go:125: dbc3eadfebd7: Pull complete\n        cluster.go:125: Digest: sha256:93e8234eb9ca92b9aae20fd73d6c9447ac3d1cc741c6e80c737f821dca582a0e\n        cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/busybox:latest\n        cluster.go:125: mke2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: resize2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: mke2fs 1.47.3 (8-Jul-2025)\n        cluster.go:125: resize2fs 1.47.3 (8-Jul-2025)\n"},{"name":"kubeadm.v1.35.1.cilium.base/nginx_deployment","result":"PASS","duration":9219905713,"output":""},{"name":"kubeadm.v1.35.1.cilium.base/NFS_deployment","result":"PASS","duration":19010588845,"output":"        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n        cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n"},{"name":"systemd.journal.user","result":"PASS","duration":97412401690,"output":""},{"name":"coreos.locksmith.tls","result":"PASS","duration":140851976900,"output":""},{"name":"kubeadm.v1.35.1.cilium.base/IPSec_encryption","result":"PASS","duration":19796294684,"output":"        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"cl.ignition.v2_1.ext4checkexisting","result":"PASS","duration":186884121197,"output":""},{"name":"kubeadm.v1.35.1.cilium.base","result":"PASS","duration":385572288158,"output":"        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.13.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.6-0\n        cluster.go:125: [init] Using Kubernetes version: v1.35.4\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459.2.4-n-f657e2c310\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459.2.4-n-f657e2c310\": lookup ci-4459.2.4-n-f657e2c310 on 168.63.129.16:53: no such host\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4459.2.4-n-f657e2c310 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.24]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 501.056718ms\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.24:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 3.014861679s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 3.281341718s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 5.001552566s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459.2.4-n-f657e2c310 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459.2.4-n-f657e2c310 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: 2e5x19.p4mmnze7yjs14t9l\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.24:6443 --token 2e5x19.p4mmnze7yjs14t9l \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:40dc01ce3b8e514f95cf9612b79ee136d2ba97f9eee996061d3d41216edc0e37 \n        cluster.go:125: i  Using Cilium version 1.12.5\n        cluster.go:125: ? Auto-detected cluster name: kubernetes\n        cluster.go:125: ? Auto-detected datapath mode: tunnel\n        cluster.go:125: ? Auto-detected kube-proxy has been installed\n        cluster.go:125: i  helm template --namespace kube-system cilium cilium/cilium --version 1.12.5 --set cluster.id=0,cluster.name=kubernetes,encryption.nodeEncryption=false,extraConfig.cluster-pool-ipv4-cidr=192.168.0.0/17,extraConfig.enable-endpoint-routes=true,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=vxlan\n        cluster.go:125: i  Storing helm values file in kube-system/cilium-cli-helm-values Secret\n        cluster.go:125: ? Created CA in secret cilium-ca\n        cluster.go:125: ? Generating certificates for Hubble...\n        cluster.go:125: ? Creating Service accounts...\n        cluster.go:125: ? Creating Cluster roles...\n        cluster.go:125: ? Creating ConfigMap for Cilium version 1.12.5...\n        cluster.go:125: i  Manual overwrite in ConfigMap: enable-endpoint-routes=true\n        cluster.go:125: i  Manual overwrite in ConfigMap: cluster-pool-ipv4-cidr=192.168.0.0/17\n        cluster.go:125: ? Creating Agent DaemonSet...\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: ? Creating Operator Deployment...\n        cluster.go:125: ? Waiting for Cilium to be installed and ready...\n        cluster.go:125: ? Cilium was successfully installed! Run 'cilium status' to view installation health\n        cluster.go:125: \u001b[33m    /??\\\n        cluster.go:125: \u001b[36m /??\u001b[33m\\__/\u001b[32m??\\\u001b[0m    Cilium:         \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[36m \\__\u001b[31m/??\\\u001b[32m__/\u001b[0m    Operator:       \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[32m /??\u001b[31m\\__/\u001b[35m??\\\u001b[0m    Hubble:         \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[32m \\__\u001b[34m/??\\\u001b[35m__/\u001b[0m    ClusterMesh:    \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[34m    \\__/\n        cluster.go:125: \u001b[0m\n        cluster.go:125: DaemonSet        cilium             \n        cluster.go:125: Deployment       cilium-operator    \n        cluster.go:125: Containers:      cilium             \n        cluster.go:125:                  cilium-operator    \n        cluster.go:125: Cluster Pods:    0/0 managed by Cilium\n        cluster.go:125: W0416 23:29:27.879504    2376 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.35.1.cilium.base/node_readiness (14.22s)\n    --- PASS: kubeadm.v1.35.1.cilium.base/nginx_deployment (9.22s)\n    --- PASS: kubeadm.v1.35.1.cilium.base/NFS_deployment (19.01s)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n            cluster.go:125: jq: error (at \u003cstdin\u003e:123): Cannot iterate over null (null)\n    --- PASS: kubeadm.v1.35.1.cilium.base/IPSec_encryption (19.80s)\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n            cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n"},{"name":"coreos.ignition.resource.local","result":"PASS","duration":292979003379,"output":""},{"name":"coreos.ignition.ssh.key","result":"PASS","duration":97544938770,"output":""},{"name":"docker.btrfs-storage","result":"PASS","duration":96830576594,"output":""},{"name":"docker.enable-service.sysext","result":"PASS","duration":94678914811,"output":"        cluster.go:152: + systemctl is-enabled docker\n"},{"name":"coreos.locksmith.reboot","result":"PASS","duration":426154137120,"output":""},{"name":"cl.flannel.vxlan","result":"PASS","duration":137806259138,"output":"        flannel.go:121: ping from ci-4459.2.4-n-d6a1c0bed5(10.254.63.0) to ci-4459.2.4-n-7ed14217b3(10.254.83.0)\n"},{"name":"sysext.simple","result":"PASS","duration":101268308631,"output":""},{"name":"docker.selinux","result":"PASS","duration":76025887133,"output":"        cluster.go:125: Unable to find image 'ghcr.io/flatcar/busybox:latest' locally\n        cluster.go:125: latest: Pulling from flatcar/busybox\n        cluster.go:125: dbc3eadfebd7: Pulling fs layer\n        cluster.go:125: dbc3eadfebd7: Verifying Checksum\n        cluster.go:125: dbc3eadfebd7: Download complete\n        cluster.go:125: dbc3eadfebd7: Pull complete\n        cluster.go:125: Digest: sha256:93e8234eb9ca92b9aae20fd73d6c9447ac3d1cc741c6e80c737f821dca582a0e\n        cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/busybox:latest\n        cluster.go:125: sh: can't create /opt/hello: Permission denied\n"},{"name":"cl.verity/corruption","result":"PASS","duration":175177901324,"output":""},{"name":"cl.ignition.v2.xfsroot","result":"PASS","duration":97125544313,"output":""},{"name":"cl.verity","result":"PASS","duration":362725384891,"output":"    --- PASS: cl.verity/verify (94.72s)\n            cluster.go:125: Success\n    --- PASS: cl.verity/corruption (175.18s)\n"},{"name":"coreos.ignition.once","result":"PASS","duration":149698036594,"output":""},{"name":"kubeadm.v1.34.4.flannel.base/node_readiness","result":"PASS","duration":7802157002,"output":""},{"name":"kubeadm.v1.34.4.flannel.base/nginx_deployment","result":"FAIL","duration":20957464040,"output":"        cluster.go:145: \"kubectl get deployments -o json | jq '.items | .[] | .status.readyReplicas'\" failed: output , status ssh: handshake failed: read tcp 10.200.1.6:37550-\u003e20.57.153.118:22: read: connection reset by peer\n"},{"name":"kubeadm.v1.34.4.flannel.base/NFS_deployment","result":"FAIL","duration":1022502604,"output":"        kubeadm.go:238: unable to add helm NFS repo: dial tcp 20.57.153.118:22: connect: connection refused\n"},{"name":"cl.update.reboot","result":"FAIL","duration":104963093815,"output":"        cluster.go:145: \"sudo dd if=/dev/disk/by-partlabel/USR-A of=/dev/disk/by-partlabel/USR-B bs=10M status=none\" failed: output , status wait: remote command exited without exit status or exit signal\n"},{"name":"kubeadm.v1.33.8.flannel.base","result":"FAIL","duration":2292024535,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-43a97362ed/providers/Microsoft.Network/publicIPAddresses/ip-071f557899\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-43a97362ed' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.translation","result":"FAIL","duration":217382653,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-43a97362ed/providers/Microsoft.Network/publicIPAddresses/ip-2cdeb2555e\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-43a97362ed' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"docker.base","result":"FAIL","duration":238941371,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-43a97362ed/providers/Microsoft.Network/publicIPAddresses/ip-4fa008dcba\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-43a97362ed' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.v2_1.swap","result":"FAIL","duration":225002139,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-43a97362ed/providers/Microsoft.Network/publicIPAddresses/ip-fa0d6048ef\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-43a97362ed' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.34.4.flannel.base","result":"FAIL","duration":396145993676,"output":"        cluster.go:125: I0416 23:29:54.556799    2526 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.5-0\n        cluster.go:125: I0416 23:30:06.931727    2740 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [init] Using Kubernetes version: v1.34.7\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459.2.4-n-9655a6ec23\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459.2.4-n-9655a6ec23\": lookup ci-4459.2.4-n-9655a6ec23 on 168.63.129.16:53: no such host\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4459.2.4-n-9655a6ec23 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.8]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 1.001462121s\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.8:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 3.503259101s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 4.005282393s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 6.001562869s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459.2.4-n-9655a6ec23 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459.2.4-n-9655a6ec23 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: psjp6i.zs27pe5gxke203tk\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.8:6443 --token psjp6i.zs27pe5gxke203tk \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:00f5f68cdc08319022270d5e9222cabe19f6aff5c3985ff5706231b8ba94b446 \n        cluster.go:125: namespace/kube-flannel created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: serviceaccount/flannel created\n        cluster.go:125: configmap/kube-flannel-cfg created\n        cluster.go:125: daemonset.apps/kube-flannel-ds created\n        cluster.go:125: W0416 23:31:48.960846    2320 joinconfiguration.go:112] [config] WARNING: Ignored configuration document with GroupVersionKind kubelet.config.k8s.io/v1beta1, Kind=KubeletConfiguration\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n    --- PASS: kubeadm.v1.34.4.flannel.base/node_readiness (7.80s)\n    --- FAIL: kubeadm.v1.34.4.flannel.base/nginx_deployment (20.96s)\n            cluster.go:145: \"kubectl get deployments -o json | jq '.items | .[] | .status.readyReplicas'\" failed: output , status ssh: handshake failed: read tcp 10.200.1.6:37550-\u003e20.57.153.118:22: read: connection reset by peer\n    --- FAIL: kubeadm.v1.34.4.flannel.base/NFS_deployment (1.02s)\n            kubeadm.go:238: unable to add helm NFS repo: dial tcp 20.57.153.118:22: connect: connection refused\n"},{"name":"cl.internet","result":"FAIL","duration":218437948,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-43a97362ed/providers/Microsoft.Network/publicIPAddresses/ip-3507637e59\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-43a97362ed' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"coreos.selinux.enforce","result":"FAIL","duration":219797254,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-43a97362ed/providers/Microsoft.Network/publicIPAddresses/ip-454f07b4d8\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-43a97362ed' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.v2.noop","result":"FAIL","duration":217797053,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-43a97362ed/providers/Microsoft.Network/publicIPAddresses/ip-82df765881\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-43a97362ed' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.35.1.flannel.base","result":"FAIL","duration":2229854951,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-43a97362ed/providers/Microsoft.Network/publicIPAddresses/ip-328d1dfbb2\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-43a97362ed' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"systemd.sysusers.gshadow","result":"FAIL","duration":115909384460,"output":"        harness.go:646: Cluster failed starting machines: PollUntilDone(ci-4459.2.4-n-98506a6746): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/westus2/operations/7368c462-3793-434c-a675-cc4aed06ee9e\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-16T23:30:42.5731242+00:00\",\n  \"endTime\": \"2026-04-16T23:32:31.2464177+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"7368c462-3793-434c-a675-cc4aed06ee9e\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.etcd-member.v2-backup-restore","result":"FAIL","duration":51803494265,"output":"        harness.go:646: Cluster failed starting machines: PollUntilDone(ci-4459.2.4-n-f76c29f0f4): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/westus2/operations/67fdaa99-1279-4591-9d14-8b4c87f4ae9a\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-16T23:31:52.0373869+00:00\",\n  \"endTime\": \"2026-04-16T23:32:33.6733889+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"67fdaa99-1279-4591-9d14-8b4c87f4ae9a\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.34.4.calico.base","result":"FAIL","duration":414769963637,"output":"        cluster.go:125: I0416 23:29:56.400452    2604 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.5-0\n        cluster.go:125: I0416 23:30:08.009623    2882 version.go:260] remote version is much newer: v1.35.4; falling back to: stable-1.34\n        cluster.go:125: [init] Using Kubernetes version: v1.34.7\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459.2.4-n-b3358a4beb\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4459.2.4-n-b3358a4beb\": lookup ci-4459.2.4-n-b3358a4beb on 168.63.129.16:53: no such host\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4459.2.4-n-b3358a4beb kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.6]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 501.678724ms\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.6:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 3.003075108s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 3.206281834s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 5.001084457s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459.2.4-n-b3358a4beb as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4459.2.4-n-b3358a4beb as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: g9imb3.u39eucul7slnbma5\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.6:6443 --token g9imb3.u39eucul7slnbma5 \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:ad19be199ec797663d015d11605b9f92c17340474100dc333d7ff666845ff875 \n        cluster.go:125: namespace/tigera-operator created\n        cluster.go:125: serviceaccount/tigera-operator created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created\n        cluster.go:125: rolebinding.rbac.authorization.k8s.io/tigera-operator-secrets created\n        cluster.go:125: deployment.apps/tigera-operator created\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io condition met\n        cluster.go:125: installation.operator.tigera.io/default created\n        cluster.go:125: apiserver.operator.tigera.io/default created\n        cluster.go:125: goldmane.operator.tigera.io/default created\n        cluster.go:125: whisker.operator.tigera.io/default created\n        kubeadm.go:197: unable to setup cluster: unable to create worker node: waiting for machine to become active: GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-43a97362ed/providers/Microsoft.Compute/virtualMachines/ci-4459.2.4-n-65e803fd19\n--------------------------------------------------------------------------------\nRESPONSE 404: 404 Not Found\nERROR CODE: NotFound\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"NotFound\",\n    \"message\": \"The entity was not found in this Azure location.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.34.4.cilium.base","result":"FAIL","duration":147299437099,"output":"        kubeadm.go:197: unable to setup cluster: unable to create master node: PollUntilDone(ci-4459.2.4-n-0e28a723a9): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/westus2/operations/2a5c6122-6dc8-40de-ae0d-d12ac1278b81\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-16T23:31:18.8497665+00:00\",\n  \"endTime\": \"2026-04-16T23:32:34.0012796+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"2a5c6122-6dc8-40de-ae0d-d12ac1278b81\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.33.8.calico.base","result":"FAIL","duration":86053046948,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: PollUntilDone(ci-4459.2.4-n-3eea0eb3fa): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/westus2/operations/440002b4-10be-4fb0-8300-73b5baa4964c\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-16T23:31:20.4778952+00:00\",\n  \"endTime\": \"2026-04-16T23:32:30.0326245+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"440002b4-10be-4fb0-8300-73b5baa4964c\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.overlay.cleanup","result":"FAIL","duration":50791981799,"output":"        harness.go:646: Cluster failed starting machines: PollUntilDone(ci-4459.2.4-n-e25e6e6db2): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/westus2/operations/7b27add8-0027-430e-ad3d-c8113dc97fc9\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-16T23:31:54.8502984+00:00\",\n  \"endTime\": \"2026-04-16T23:32:32.4237216+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"7b27add8-0027-430e-ad3d-c8113dc97fc9\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"coreos.ignition.security.tls","result":"FAIL","duration":218178577620,"output":"        security.go:132: starting client: PollUntilDone(ci-4459.2.4-n-17a9a59635): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/westus2/operations/322d12e4-c122-4945-86ee-5bfe87083c56\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-16T23:30:52.9669037+00:00\",\n  \"endTime\": \"2026-04-16T23:32:27.2118758+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"322d12e4-c122-4945-86ee-5bfe87083c56\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.etcd-member.discovery","result":"FAIL","duration":113094873657,"output":"        harness.go:646: Cluster failed starting machines: PollUntilDone(ci-4459.2.4-n-11d31f04cd): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/westus2/operations/482f7673-f4e7-48d2-95f5-2b1ef045da7e\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-16T23:30:57.189057+00:00\",\n  \"endTime\": \"2026-04-16T23:32:33.8343967+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"482f7673-f4e7-48d2-95f5-2b1ef045da7e\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.v1.btrfsroot","result":"FAIL","duration":81038459408,"output":"        harness.go:646: Cluster failed starting machines: PollUntilDone(ci-4459.2.4-n-4720243717): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/westus2/operations/5129cd50-6960-469d-ab21-64d6593ab53b\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-16T23:31:31.2424808+00:00\",\n  \"endTime\": \"2026-04-16T23:32:44.7093047+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"5129cd50-6960-469d-ab21-64d6593ab53b\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"sysext.disable-docker","result":"FAIL","duration":54062496866,"output":"        harness.go:646: Cluster failed starting machines: PollUntilDone(ci-4459.2.4-n-c333e8aa40): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/westus2/operations/7a95a149-2b89-4c13-bef3-d4ab3e164ad6\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-16T23:32:01.3632904+00:00\",\n  \"endTime\": \"2026-04-16T23:32:30.3820322+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"7a95a149-2b89-4c13-bef3-d4ab3e164ad6\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.v1.ext4root","result":"FAIL","duration":81310464668,"output":"        harness.go:646: Cluster failed starting machines: PollUntilDone(ci-4459.2.4-n-8c085ccce8): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/westus2/operations/9a9e9e8f-7f1a-4ad0-a533-0926b1ac9115\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-16T23:31:33.266798+00:00\",\n  \"endTime\": \"2026-04-16T23:32:47.4174866+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"9a9e9e8f-7f1a-4ad0-a533-0926b1ac9115\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.35.1.calico.base","result":"FAIL","duration":209397729875,"output":"        kubeadm.go:197: unable to setup cluster: unable to create master node: PollUntilDone(ci-4459.2.4-n-5309b3a352): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/westus2/operations/2ed4ce13-2165-48a0-82b1-ed1d0c2b1e50\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-16T23:31:05.7763956+00:00\",\n  \"endTime\": \"2026-04-16T23:32:35.1571652+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"2ed4ce13-2165-48a0-82b1-ed1d0c2b1e50\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.33.8.cilium.base","result":"FAIL","duration":113015251517,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: PollUntilDone(ci-4459.2.4-n-81e74af908): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/westus2/operations/f02ca549-aeab-453f-8350-350fc4c5da47\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-16T23:31:11.9567526+00:00\",\n  \"endTime\": \"2026-04-16T23:32:39.26685+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"f02ca549-aeab-453f-8350-350fc4c5da47\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.toolbox.dnf-install","result":"FAIL","duration":202923533286,"output":"        harness.go:646: Cluster failed starting machines: PollUntilDone(ci-4459.2.4-n-4bbe1da7b9): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/westus2/operations/54e7a583-47a8-4e80-8663-b7ce915048d6\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-16T23:30:44.8387268+00:00\",\n  \"endTime\": \"2026-04-16T23:33:43.1920824+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"54e7a583-47a8-4e80-8663-b7ce915048d6\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.basic","result":"FAIL","duration":781683910731,"output":"        harness.go:646: Cluster failed starting machines: PollUntilDone(ci-4459.2.4-n-8ab2567a18): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/westus2/operations/d4227d92-14bc-4f47-8b86-b3a5182140fa\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-16T23:31:53.5378132+00:00\",\n  \"endTime\": \"2026-04-16T23:44:23.1674802+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"d4227d92-14bc-4f47-8b86-b3a5182140fa\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.locksmith.cluster","result":"FAIL","duration":1198990010088,"output":"        locksmith.go:184: [0] ssh unreachable or system not ready: context deadline exceeded [1] ssh unreachable or system not ready: context deadline exceeded [2] ssh unreachable or system not ready: context deadline exceeded\n"},{"name":"cl.update.badverity","result":"FAIL","duration":3880233898661,"output":"        update.go:168: ssh unreachable or system not ready: failure checking if machine is running: systemctl is-system-running returned stdout: \"\", stderr: \"\", err: dial tcp 20.115.162.238:22: i/o timeout, systemctl list-jobs returned stdout: \"\", stderr: \"\", err: dial tcp 20.115.162.238:22: i/o timeout\n"},{"name":"coreos.update.badusr","result":"FAIL","duration":5209018574251,"output":"        update.go:168: ssh unreachable or system not ready: failure checking if machine is running: systemctl is-system-running returned stdout: \"\", stderr: \"\", err: dial tcp 20.109.152.145:22: i/o timeout, systemctl list-jobs returned stdout: \"\", stderr: \"\", err: dial tcp 20.109.152.145:22: i/o timeout\n"}],"result":"FAIL","platform":"azure","version":"4459.2.4"}
