{"tests":[{"name":"cl.ignition.v2_1.swap","result":"PASS","duration":83524027561,"output":""},{"name":"cl.etcd-member.etcdctlv3","result":"PASS","duration":83733905770,"output":"        cluster.go:125: {\"level\":\"info\",\"ts\":1777073424.1477873,\"caller\":\"snapshot/v3_snapshot.go:68\",\"msg\":\"created temporary db file\",\"path\":\"/tmp/tmp.y2QSnOtuec/snapshot.db.part\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":1777073424.1487927,\"logger\":\"client\",\"caller\":\"v3/maintenance.go:211\",\"msg\":\"opened snapshot stream; downloading\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":1777073424.148837,\"caller\":\"snapshot/v3_snapshot.go:76\",\"msg\":\"fetching snapshot\",\"endpoint\":\"127.0.0.1:2379\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":1777073424.1535382,\"logger\":\"client\",\"caller\":\"v3/maintenance.go:219\",\"msg\":\"completed snapshot read; closing\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":1777073424.1535761,\"caller\":\"snapshot/v3_snapshot.go:91\",\"msg\":\"fetched snapshot\",\"endpoint\":\"127.0.0.1:2379\",\"size\":\"20 kB\",\"took\":\"now\"}\n        cluster.go:125: {\"level\":\"info\",\"ts\":1777073424.1536088,\"caller\":\"snapshot/v3_snapshot.go:100\",\"msg\":\"saved\",\"path\":\"/tmp/tmp.y2QSnOtuec/snapshot.db\"}\n        cluster.go:125: Deprecated: Use `etcdutl snapshot status` instead.\n"},{"name":"cl.etcd-member.v2-backup-restore","result":"PASS","duration":87569497566,"output":"        cluster.go:125: 2026-04-24T23:30:24Z\tinfo\tetcdutl/backup_command.go:216\tignoring EntryConfChange raft entry\n        cluster.go:125: 2026-04-24T23:30:24Z\tinfo\tetcdutl/backup_command.go:231\tignoring member attribute update on\t{\"entry\": \"Term:2 Index:3 Data:\\\"\\\\010\\\\201\\\\354\\\\233\\\\242\\\\235\\\\270\\\\247\\\\305\\\\231\\\\001\\\\022\\\\003PUT\\\\032\u0026/0/members/6e5328d2c230998a/attributes\\\\\\\"R{\\\\\\\"name\\\\\\\":\\\\\\\"a7ad1e8efb7a41798e3c66fc4e0a1287\\\\\\\",\\\\\\\"clientURLs\\\\\\\":[\\\\\\\"http://10.0.0.19:2379\\\\\\\"]}(\\\\0002\\\\0008\\\\000H\\\\000P\\\\000X\\\\000`\\\\000h\\\\000p\\\\000x\\\\000\\\\200\\\\001\\\\000\\\" \", \"v2Req.Path\": \"/0/members/6e5328d2c230998a/attributes\"}\n        cluster.go:125: 2026-04-24T23:30:24Z\tinfo\tetcdutl/backup_command.go:252\tignoring v3 raft entry\n        cluster.go:125: 2026-04-24T23:30:24Z\tinfo\tmembership/store.go:119\tTrimming membership information from the backend...\n"},{"name":"cl.basic/PortSSH","result":"PASS","duration":269754133,"output":""},{"name":"cl.basic/DbusPerms","result":"PASS","duration":411406277,"output":""},{"name":"cl.basic/Symlink","result":"PASS","duration":276596871,"output":""},{"name":"cl.ignition.kargs","result":"PASS","duration":113082894621,"output":"        cluster.go:152: + cat /proc/cmdline\n"},{"name":"sysext.disable-containerd","result":"PASS","duration":113315496739,"output":""},{"name":"cl.basic/SymlinkFlatcar","result":"PASS","duration":294476098,"output":""},{"name":"coreos.auth.verify","result":"PASS","duration":113363808489,"output":""},{"name":"cl.users.shells","result":"PASS","duration":113412495584,"output":""},{"name":"cl.network.wireguard","result":"PASS","duration":113522845888,"output":"        cluster.go:152: + ip --json address show kv0 | jq -r '.[] | .addr_info | .[] | select( .family == \"inet\") | .local'\n"},{"name":"cl.swap_activation","result":"PASS","duration":114047847800,"output":""},{"name":"coreos.ignition.groups","result":"PASS","duration":114094923052,"output":""},{"name":"cl.basic/Useradd","result":"PASS","duration":853167067,"output":""},{"name":"cl.basic/Microcode","result":"PASS","duration":278927383,"output":""},{"name":"cl.ignition.v1.users","result":"PASS","duration":114521323314,"output":""},{"name":"systemd.sysusers.gshadow","result":"PASS","duration":114534224065,"output":""},{"name":"cl.basic/UpdateEngineKeys","result":"PASS","duration":270199846,"output":""},{"name":"cl.basic/ServicesActive","result":"PASS","duration":295294503,"output":""},{"name":"cl.basic/ReadOnly","result":"PASS","duration":261582999,"output":""},{"name":"cl.basic/RandomUUID","result":"PASS","duration":348008907,"output":""},{"name":"cl.basic/MachineID","result":"PASS","duration":266334250,"output":""},{"name":"cl.basic/CloudConfig","result":"PASS","duration":1406409129,"output":""},{"name":"cl.basic/Script","result":"PASS","duration":313665774,"output":""},{"name":"cl.basic","result":"PASS","duration":119877486193,"output":"    --- PASS: cl.basic/PortSSH (0.27s)\n    --- PASS: cl.basic/DbusPerms (0.41s)\n    --- PASS: cl.basic/Symlink (0.28s)\n    --- PASS: cl.basic/SymlinkFlatcar (0.29s)\n    --- PASS: cl.basic/Useradd (0.85s)\n    --- PASS: cl.basic/Microcode (0.28s)\n    --- PASS: cl.basic/UpdateEngineKeys (0.27s)\n    --- PASS: cl.basic/ServicesActive (0.30s)\n    --- PASS: cl.basic/ReadOnly (0.26s)\n    --- PASS: cl.basic/RandomUUID (0.35s)\n    --- PASS: cl.basic/MachineID (0.27s)\n    --- PASS: cl.basic/CloudConfig (1.41s)\n    --- PASS: cl.basic/Script (0.31s)\n"},{"name":"docker.selinux","result":"PASS","duration":122089658774,"output":"        cluster.go:125: Unable to find image 'ghcr.io/flatcar/busybox:latest' locally\n        cluster.go:125: latest: Pulling from flatcar/busybox\n        cluster.go:125: 4bf2067f7735: Pulling fs layer\n        cluster.go:125: 4bf2067f7735: Verifying Checksum\n        cluster.go:125: 4bf2067f7735: Download complete\n        cluster.go:125: 4bf2067f7735: Pull complete\n        cluster.go:125: Digest: sha256:93e8234eb9ca92b9aae20fd73d6c9447ac3d1cc741c6e80c737f821dca582a0e\n        cluster.go:125: Status: Downloaded newer image for ghcr.io/flatcar/busybox:latest\n        cluster.go:125: sh: can't create /opt/hello: Permission denied\n"},{"name":"cl.network.initramfs.second-boot","result":"PASS","duration":130523844149,"output":""},{"name":"cl.flannel.udp","result":"PASS","duration":184930531099,"output":"        cluster.go:125: Timeout occurred while waiting for network connectivity.\n        flannel.go:121: ping from ci-4081.3.6-n-ecbbd39a82(10.254.2.0) to ci-4081.3.6-n-a38527aef1(10.254.52.1)\n"},{"name":"cl.cloudinit.script","result":"PASS","duration":82541089385,"output":""},{"name":"docker.base/docker-info","result":"PASS","duration":2897410345,"output":""},{"name":"cl.ignition.v2.noop","result":"PASS","duration":82986097842,"output":""},{"name":"coreos.tls.fetch-urls","result":"PASS","duration":88026353579,"output":""},{"name":"docker.base/resources","result":"PASS","duration":10460288967,"output":"        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile:\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 2.20MB 0.0s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.1s done\n        cluster.go:125: #5 writing image sha256:ff2d90739ff0e13781f962f954f25fb8f287c75425df5b46e58652df19df98e6 done\n        cluster.go:125: #5 naming to docker.io/library/sleep done\n        cluster.go:125: #5 DONE 0.1s\n        cluster.go:125: WARNING: Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.\n        cluster.go:125: WARNING: Your kernel does not support OomKillDisable. OomKillDisable discarded.\n"},{"name":"coreos.ignition.security.tls","result":"PASS","duration":226063908445,"output":""},{"name":"cl.osreset.ignition-rerun","result":"PASS","duration":112049240116,"output":""},{"name":"cl.ignition.translation","result":"PASS","duration":113460193385,"output":"        cluster.go:152: + ip --json address show kola | jq -r '.[] | .addr_info | .[] | select( .family == \"inet\") | .local'\n        cluster.go:152: + cat /etc/systemd/network/00-dummy.network\n"},{"name":"cl.etcd-member.discovery","result":"PASS","duration":114523626925,"output":""},{"name":"sysext.simple","result":"PASS","duration":114709755215,"output":""},{"name":"coreos.locksmith.tls","result":"PASS","duration":127421101227,"output":""},{"name":"cl.internet/UpdateEngine","result":"PASS","duration":360570208,"output":""},{"name":"coreos.selinux.enforce","result":"PASS","duration":132073761793,"output":""},{"name":"sysext.disable-docker","result":"PASS","duration":128724329518,"output":""},{"name":"cl.internet/DockerPing","result":"PASS","duration":9347017595,"output":""},{"name":"cl.update.badverity","result":"FAIL","duration":1484504043,"output":"        harness.go:646: Cluster failed starting machines: creating nic: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/networkInterfaces/nic-02176ca18e\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.internet/DockerEcho","result":"PASS","duration":725924062,"output":""},{"name":"kubeadm.v1.33.8.flannel.base","result":"FAIL","duration":2064424124,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-eef5811f9a\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"systemd.journal.user","result":"FAIL","duration":99534219,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-475e0fb30b\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.35.1.cilium.base","result":"FAIL","duration":2063921360,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-c021567e70\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"sysext.custom-docker.sysext","result":"FAIL","duration":186871979,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-77e9fe3f16\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"coreos.ignition.once","result":"FAIL","duration":185783669,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-8d5a61bb1d\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"docker.network-openbsd-nc","result":"FAIL","duration":94801092,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-1e7845b6ee\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.toolbox.dnf-install","result":"FAIL","duration":112534487,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-987541d0f6\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.overlay.cleanup","result":"FAIL","duration":72561235,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-b3e7936f3a\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.v2.xfsroot","result":"FAIL","duration":66238212,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-9770e9bf1e\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.flannel.vxlan","result":"FAIL","duration":290903168,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-0da04e85c4\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"docker.containerd-restart","result":"FAIL","duration":65837458,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-90d1b0a928\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"docker.userns","result":"FAIL","duration":77739541,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-edc1cc55d5\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.v2.ext4root","result":"FAIL","duration":102806043,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-e4dfc1a968\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"coreos.selinux.boolean","result":"FAIL","duration":71622315,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-e9fbc97bb2\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"docker.base/networks-reliably","result":"FAIL","duration":52118167869,"output":"        cluster.go:125: #1 [internal] load build definition from Dockerfile\n        cluster.go:125: #1 transferring dockerfile: 108B done\n        cluster.go:125: #1 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #2 [internal] load .dockerignore\n        cluster.go:125: #2 transferring context: 2B done\n        cluster.go:125: #2 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #3 [internal] load build context\n        cluster.go:125: #3 transferring context: 4.04MB 0.0s done\n        cluster.go:125: #3 DONE 0.1s\n        cluster.go:125: \n        cluster.go:125: #4 [1/1] COPY . /\n        cluster.go:125: #4 DONE 0.0s\n        cluster.go:125: \n        cluster.go:125: #5 exporting to image\n        cluster.go:125: #5 exporting layers 0.1s done\n        cluster.go:125: #5 writing image sha256:69e33aa7989053e019c65e4c6420bc418f23d216ff8d41c38b49dfe1c43bc03d done\n        cluster.go:125: #5 naming to docker.io/library/ping 0.0s done\n        cluster.go:125: #5 DONE 0.1s\n        cluster.go:145: \"for i in $(seq 1 100); do\\n\\t\\techo -n \\\"$i: \\\"\\n\\t\\tdocker run --rm ping sh -c 'ping -i 0.2 172.17.0.1 -w 1 \u003e/dev/null \u0026\u0026 echo PASS || echo FAIL'\\n\\tdone\" failed: output 1: PASS\n2: PASS\n3: PASS\n4: PASS\n5: PASS\n6: PASS\n7: PASS\n8: PASS\n9: PASS\n10: PASS\n11: PASS\n12: PASS\n13: PASS\n14: PASS\n15: PASS\n16: PASS\n17: PASS\n18: PASS\n19: PASS\n20: PASS\n21: PASS\n22: PASS\n23: PASS\n24: PASS\n25: PASS\n26: PASS\n27: PASS\n28: PASS\n29: PASS\n30: PASS\n31:, status wait: remote command exited without exit status or exit signal\n"},{"name":"cl.internet/NTPDate","result":"PASS","duration":7070015746,"output":""},{"name":"docker.base/user-no-caps","result":"FAIL","duration":51619800,"output":"        cluster.go:145: \"tmpdir=$(mktemp -d); cd $tmpdir; echo -e \\\"FROM scratch\\\\nCOPY . /\\\" \u003e Dockerfile;\\n\\t        b=$(which capsh sh grep cat ls); libs=$(sudo ldd $b | grep -o /lib'[^ ]*' | sort -u);\\n\\t        sudo rsync -av --relative --copy-links $b $libs ./;\\n\\t        sudo docker build -t captest .\" failed: output , status ssh: handshake failed: read tcp 10.200.1.4:33802-\u003e20.223.150.222:22: read: connection reset by peer\n"},{"name":"docker.base/ownership","result":"FAIL","duration":51570205,"output":"        cluster.go:145: \"docker run --name ownership ghcr.io/flatcar/nginx stat -c \\\"%u/%g\\\" /etc/shadow\" failed: output , status ssh: handshake failed: read tcp 10.200.1.4:33816-\u003e20.223.150.222:22: read: connection reset by peer\n"},{"name":"kubeadm.v1.34.4.flannel.base","result":"FAIL","duration":2056336289,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-b37d7d1359\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.v2_1.vfat","result":"FAIL","duration":69085913,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-a2c0ae037a\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.v2.btrfsroot","result":"FAIL","duration":89778975,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-456882cb2a\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.v2.users","result":"FAIL","duration":166745633,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-eda7ee5f04\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.update.reboot","result":"PASS","duration":147384478185,"output":""},{"name":"coreos.ignition.resource.remote","result":"FAIL","duration":72705443,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-39aaedc2fa\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.v2_1.ext4checkexisting","result":"FAIL","duration":103427360,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-c010603d03\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.internet","result":"PASS","duration":131143990248,"output":"    --- PASS: cl.internet/UpdateEngine (0.36s)\n    --- PASS: cl.internet/DockerPing (9.35s)\n    --- PASS: cl.internet/DockerEcho (0.73s)\n    --- PASS: cl.internet/NTPDate (7.07s)\n"},{"name":"cl.ignition.v1.xfsroot","result":"FAIL","duration":84010937,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-727b7fc6ee\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.33.8.calico.base","result":"FAIL","duration":178174143783,"output":"        cluster.go:125: I0424 23:33:14.343846    2388 version.go:261] remote version is much newer: v1.36.0; falling back to: stable-1.33\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.33.11\n        kubeadm.go:197: unable to setup cluster: unable to run master script: wait: remote command exited without exit status or exit signal\n"},{"name":"cl.ignition.symlink","result":"FAIL","duration":64931095,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-04e044f16d\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"docker.base","result":"FAIL","duration":178013264788,"output":"    --- PASS: docker.base/docker-info (2.90s)\n    --- PASS: docker.base/resources (10.46s)\n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile:\n            cluster.go:125: #1 transferring dockerfile: 108B done\n            cluster.go:125: #1 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 2.20MB 0.0s done\n            cluster.go:125: #3 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers 0.1s done\n            cluster.go:125: #5 writing image sha256:ff2d90739ff0e13781f962f954f25fb8f287c75425df5b46e58652df19df98e6 done\n            cluster.go:125: #5 naming to docker.io/library/sleep done\n            cluster.go:125: #5 DONE 0.1s\n            cluster.go:125: WARNING: Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.\n            cluster.go:125: WARNING: Your kernel does not support OomKillDisable. OomKillDisable discarded.\n    --- FAIL: docker.base/networks-reliably (52.12s)\n            cluster.go:125: #1 [internal] load build definition from Dockerfile\n            cluster.go:125: #1 transferring dockerfile: 108B done\n            cluster.go:125: #1 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #2 [internal] load .dockerignore\n            cluster.go:125: #2 transferring context: 2B done\n            cluster.go:125: #2 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #3 [internal] load build context\n            cluster.go:125: #3 transferring context: 4.04MB 0.0s done\n            cluster.go:125: #3 DONE 0.1s\n            cluster.go:125: \n            cluster.go:125: #4 [1/1] COPY . /\n            cluster.go:125: #4 DONE 0.0s\n            cluster.go:125: \n            cluster.go:125: #5 exporting to image\n            cluster.go:125: #5 exporting layers 0.1s done\n            cluster.go:125: #5 writing image sha256:69e33aa7989053e019c65e4c6420bc418f23d216ff8d41c38b49dfe1c43bc03d done\n            cluster.go:125: #5 naming to docker.io/library/ping 0.0s done\n            cluster.go:125: #5 DONE 0.1s\n            cluster.go:145: \"for i in $(seq 1 100); do\\n\\t\\techo -n \\\"$i: \\\"\\n\\t\\tdocker run --rm ping sh -c 'ping -i 0.2 172.17.0.1 -w 1 \u003e/dev/null \u0026\u0026 echo PASS || echo FAIL'\\n\\tdone\" failed: output 1: PASS\n    2: PASS\n    3: PASS\n    4: PASS\n    5: PASS\n    6: PASS\n    7: PASS\n    8: PASS\n    9: PASS\n    10: PASS\n    11: PASS\n    12: PASS\n    13: PASS\n    14: PASS\n    15: PASS\n    16: PASS\n    17: PASS\n    18: PASS\n    19: PASS\n    20: PASS\n    21: PASS\n    22: PASS\n    23: PASS\n    24: PASS\n    25: PASS\n    26: PASS\n    27: PASS\n    28: PASS\n    29: PASS\n    30: PASS\n    31:, status wait: remote command exited without exit status or exit signal\n    --- FAIL: docker.base/user-no-caps (0.05s)\n            cluster.go:145: \"tmpdir=$(mktemp -d); cd $tmpdir; echo -e \\\"FROM scratch\\\\nCOPY . /\\\" \u003e Dockerfile;\\n\\t        b=$(which capsh sh grep cat ls); libs=$(sudo ldd $b | grep -o /lib'[^ ]*' | sort -u);\\n\\t        sudo rsync -av --relative --copy-links $b $libs ./;\\n\\t        sudo docker build -t captest .\" failed: output , status ssh: handshake failed: read tcp 10.200.1.4:33802-\u003e20.223.150.222:22: read: connection reset by peer\n    --- FAIL: docker.base/ownership (0.05s)\n            cluster.go:145: \"docker run --name ownership ghcr.io/flatcar/nginx stat -c \\\"%u/%g\\\" /etc/shadow\" failed: output , status ssh: handshake failed: read tcp 10.200.1.4:33816-\u003e20.223.150.222:22: read: connection reset by peer\n"},{"name":"coreos.ignition.systemd.enable-service","result":"FAIL","duration":85260408,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-06833f7850\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.misc.empty","result":"FAIL","duration":69740479,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-0e4daf0580\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.v1.noop","result":"FAIL","duration":87089806,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-209d00dfde\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.luks","result":"FAIL","duration":72636996,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-f9b4ede94e\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"coreos.ignition.resource.local","result":"FAIL","duration":299945839,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-c50d512dda\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.v1.groups","result":"FAIL","duration":96045232,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-3b2dd083f2\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.v1.once","result":"FAIL","duration":90776414,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-0ce7d25342\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"docker.btrfs-storage","result":"FAIL","duration":65905534,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-90edf3f85b\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"bpf.ig/ig","result":"FAIL","duration":322918213,"output":"        ig.go:50: creating node: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-62438ccf95\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"coreos.ignition.ssh.key","result":"FAIL","duration":63805843,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-b2e2230eaf\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.cloudinit.basic","result":"FAIL","duration":70066871,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-7739bd3878\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.cloudinit.multipart-mime","result":"FAIL","duration":74555348,"output":"        harness.go:646: Cluster failed starting machines: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-b9addca590\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.33.8.flannel.cgroupv1.base","result":"FAIL","duration":2093180604,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-5a5369fa66\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.33.8.cilium.base","result":"FAIL","duration":2070717581,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-8108c0fb48\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"bpf.ig","result":"FAIL","duration":2323873654,"output":"    --- FAIL: bpf.ig/ig (0.32s)\n            ig.go:50: creating node: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-62438ccf95\n    --------------------------------------------------------------------------------\n    RESPONSE 409: 409 Conflict\n    ERROR CODE: ResourceGroupBeingDeleted\n    --------------------------------------------------------------------------------\n    {\n      \"error\": {\n        \"code\": \"ResourceGroupBeingDeleted\",\n        \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n      }\n    }\n    --------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.33.8.calico.cgroupv1.base","result":"FAIL","duration":2076980732,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: creating public ip: PUT https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Network/publicIPAddresses/ip-eddf3b77d9\n--------------------------------------------------------------------------------\nRESPONSE 409: 409 Conflict\nERROR CODE: ResourceGroupBeingDeleted\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"ResourceGroupBeingDeleted\",\n    \"message\": \"The resource group 'kola-cluster-image-9fd5883bba' is in deprovisioning state and cannot perform this operation.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.35.1.flannel.base","result":"FAIL","duration":273817636761,"output":"        cluster.go:125: I0424 23:32:19.021434    2461 version.go:260] remote version is much newer: v1.36.0; falling back to: stable-1.35\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.35.4\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.13.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.6-0\n        cluster.go:125: I0424 23:32:32.420102    2671 version.go:260] remote version is much newer: v1.36.0; falling back to: stable-1.35\n        cluster.go:125: [init] Using Kubernetes version: v1.35.4\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING ContainerRuntimeVersion]: You must update your container runtime to a version that supports the CRI method RuntimeConfig. Falling back to using cgroupDriver from kubelet config will be removed in 1.36. For more information, see https://git.k8s.io/enhancements/keps/sig-node/4033-group-driver-detection-over-cri\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4081.3.6-n-7db0bcb296\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4081.3.6-n-7db0bcb296\": lookup ci-4081.3.6-n-7db0bcb296 on 168.63.129.16:53: no such host\n        cluster.go:125: \t[WARNING Service-kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: W0424 23:32:32.611497    2671 checks.go:906] detected that the sandbox image \"registry.k8s.io/pause:3.8\" of the container runtime is inconsistent with that used by kubeadm. It is recommended to use \"registry.k8s.io/pause:3.10.1\" as the CRI sandbox image.\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4081.3.6-n-7db0bcb296 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.26]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 500.839287ms\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.26:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 4.508526281s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 5.187866927s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 7.001485914s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4081.3.6-n-7db0bcb296 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4081.3.6-n-7db0bcb296 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: ddpijz.0rvt9z36frgn5q49\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.26:6443 --token ddpijz.0rvt9z36frgn5q49 \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:c3f4a6a907020887014711a34e45e1a30569b2363477ae29b18470eeaff880cd \n        cluster.go:125: namespace/kube-flannel created\n        cluster.go:125: clusterrole.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: clusterrolebinding.rbac.authorization.k8s.io/flannel created\n        cluster.go:125: serviceaccount/flannel created\n        cluster.go:125: configmap/kube-flannel-cfg created\n        cluster.go:125: daemonset.apps/kube-flannel-ds created\n        kubeadm.go:197: unable to setup cluster: unable to create worker node: PollUntilDone(ci-4081.3.6-n-24ffd5f9a9): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/northeurope/operations/5b3a10c6-e2b1-488c-8897-06b17344c072\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-24T23:32:47.5930289+00:00\",\n  \"endTime\": \"2026-04-24T23:33:33.3057873+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"5b3a10c6-e2b1-488c-8897-06b17344c072\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"coreos.update.badusr","result":"FAIL","duration":78793699169,"output":"        harness.go:646: Cluster failed starting machines: PollUntilDone(ci-4081.3.6-n-475e858fd9): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/northeurope/operations/1ab2daf9-5c2f-4b53-a247-6587c1dd55fe\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-24T23:32:20.6885893+00:00\",\n  \"endTime\": \"2026-04-24T23:33:33.7910001+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"1ab2daf9-5c2f-4b53-a247-6587c1dd55fe\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.metadata.azure","result":"FAIL","duration":48853669409,"output":"        harness.go:646: Cluster failed starting machines: PollUntilDone(ci-4081.3.6-n-6c5b1a9603): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/northeurope/operations/eae90a70-d05a-454a-a6e0-3cfea1df3d9d\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-24T23:32:51.127317+00:00\",\n  \"endTime\": \"2026-04-24T23:33:33.6535924+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"eae90a70-d05a-454a-a6e0-3cfea1df3d9d\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.33.8.cilium.cgroupv1.base","result":"FAIL","duration":276876966735,"output":"        cluster.go:125: I0424 23:32:20.875527    2643 version.go:261] remote version is much newer: v1.36.0; falling back to: stable-1.33\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.33.11\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.0\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.5.24-0\n        cluster.go:125: I0424 23:32:34.874823    2865 version.go:261] remote version is much newer: v1.36.0; falling back to: stable-1.33\n        cluster.go:125: [init] Using Kubernetes version: v1.33.11\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING SystemVerification]: cgroups v1 support is in maintenance mode, please migrate to cgroups v2\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4081.3.6-n-495a71b429\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4081.3.6-n-495a71b429\": lookup ci-4081.3.6-n-495a71b429 on 168.63.129.16:53: no such host\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: W0424 23:32:35.067401    2865 checks.go:843] detected that the sandbox image \"registry.k8s.io/pause:3.8\" of the container runtime is inconsistent with that used by kubeadm.It is recommended to use \"registry.k8s.io/pause:3.10\" as the CRI sandbox image.\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4081.3.6-n-495a71b429 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.27]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 501.937545ms\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.27:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 2.54856253s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 3.314370322s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 5.001777498s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4081.3.6-n-495a71b429 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4081.3.6-n-495a71b429 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: rono8k.usr39dgmdee4j2jm\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.27:6443 --token rono8k.usr39dgmdee4j2jm \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:317b87545802334ee17d022683d7342dd48a78187263c5ff6c91b02792f5ce29 \n        cluster.go:125: i  Using Cilium version 1.12.5\n        cluster.go:125: ? Auto-detected cluster name: kubernetes\n        cluster.go:125: ? Auto-detected datapath mode: tunnel\n        cluster.go:125: ? Auto-detected kube-proxy has been installed\n        cluster.go:125: i  helm template --namespace kube-system cilium cilium/cilium --version 1.12.5 --set cluster.id=0,cluster.name=kubernetes,encryption.nodeEncryption=false,extraConfig.cluster-pool-ipv4-cidr=192.168.0.0/17,extraConfig.enable-endpoint-routes=true,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=vxlan\n        cluster.go:125: i  Storing helm values file in kube-system/cilium-cli-helm-values Secret\n        cluster.go:125: ? Created CA in secret cilium-ca\n        cluster.go:125: ? Generating certificates for Hubble...\n        cluster.go:125: ? Creating Service accounts...\n        cluster.go:125: ? Creating Cluster roles...\n        cluster.go:125: ? Creating ConfigMap for Cilium version 1.12.5...\n        cluster.go:125: i  Manual overwrite in ConfigMap: enable-endpoint-routes=true\n        cluster.go:125: i  Manual overwrite in ConfigMap: cluster-pool-ipv4-cidr=192.168.0.0/17\n        cluster.go:125: ? Creating Agent DaemonSet...\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: ? Creating Operator Deployment...\n        cluster.go:125: ? Waiting for Cilium to be installed and ready...\n        cluster.go:125: ? Cilium was successfully installed! Run 'cilium status' to view installation health\n        cluster.go:125: \u001b[33m    /??\\\n        cluster.go:125: \u001b[36m /??\u001b[33m\\__/\u001b[32m??\\\u001b[0m    Cilium:         \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[36m \\__\u001b[31m/??\\\u001b[32m__/\u001b[0m    Operator:       \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[32m /??\u001b[31m\\__/\u001b[35m??\\\u001b[0m    Hubble:         \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[32m \\__\u001b[34m/??\\\u001b[35m__/\u001b[0m    ClusterMesh:    \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[34m    \\__/\n        cluster.go:125: \u001b[0m\n        cluster.go:125: Deployment       cilium-operator    \n        cluster.go:125: DaemonSet        cilium             \n        cluster.go:125: Containers:      cilium             \n        cluster.go:125:                  cilium-operator    \n        cluster.go:125: Cluster Pods:    0/0 managed by Cilium\n        kubeadm.go:197: unable to setup cluster: unable to create worker node: PollUntilDone(ci-4081.3.6-n-845761880b): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/northeurope/operations/b23e45a9-8b16-4414-a86e-7f9b8efbba6f\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-24T23:32:50.9058504+00:00\",\n  \"endTime\": \"2026-04-24T23:33:33.4293582+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"b23e45a9-8b16-4414-a86e-7f9b8efbba6f\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.35.1.calico.base","result":"FAIL","duration":50364730431,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: PollUntilDone(ci-4081.3.6-n-685414ff41): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/northeurope/operations/87ecbc40-db87-4b64-a439-c602ecfc1f19\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-24T23:32:52.6621443+00:00\",\n  \"endTime\": \"2026-04-24T23:33:33.7910001+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"87ecbc40-db87-4b64-a439-c602ecfc1f19\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.network.iptables","result":"FAIL","duration":78927816576,"output":"        harness.go:646: Cluster failed starting machines: PollUntilDone(ci-4081.3.6-n-e2f6f89649): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/northeurope/operations/b40fd4d3-318d-490a-b60d-537956a35956\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-24T23:32:26.1686291+00:00\",\n  \"endTime\": \"2026-04-24T23:33:33.5436583+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"b40fd4d3-318d-490a-b60d-537956a35956\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.34.4.cilium.base","result":"FAIL","duration":280677528919,"output":"        cluster.go:125: I0424 23:32:21.613174    2529 version.go:260] remote version is much newer: v1.36.0; falling back to: stable-1.34\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-apiserver:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-controller-manager:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-scheduler:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/kube-proxy:v1.34.7\n        cluster.go:125: [config/images] Pulled registry.k8s.io/coredns/coredns:v1.12.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/pause:3.10.1\n        cluster.go:125: [config/images] Pulled registry.k8s.io/etcd:3.6.5-0\n        cluster.go:125: I0424 23:32:34.820768    2747 version.go:260] remote version is much newer: v1.36.0; falling back to: stable-1.34\n        cluster.go:125: [init] Using Kubernetes version: v1.34.7\n        cluster.go:125: [preflight] Running pre-flight checks\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4081.3.6-n-1c86d6ea5a\" could not be reached\n        cluster.go:125: \t[WARNING Hostname]: hostname \"ci-4081.3.6-n-1c86d6ea5a\": lookup ci-4081.3.6-n-1c86d6ea5a on 168.63.129.16:53: no such host\n        cluster.go:125: \t[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'\n        cluster.go:125: [preflight] Pulling images required for setting up a Kubernetes cluster\n        cluster.go:125: [preflight] This might take a minute or two, depending on the speed of your internet connection\n        cluster.go:125: [preflight] You can also perform this action beforehand using 'kubeadm config images pull'\n        cluster.go:125: W0424 23:32:35.362973    2747 checks.go:827] detected that the sandbox image \"registry.k8s.io/pause:3.8\" of the container runtime is inconsistent with that used by kubeadm. It is recommended to use \"registry.k8s.io/pause:3.10.1\" as the CRI sandbox image.\n        cluster.go:125: [certs] Using certificateDir folder \"/etc/kubernetes/pki\"\n        cluster.go:125: [certs] Generating \"ca\" certificate and key\n        cluster.go:125: [certs] Generating \"apiserver\" certificate and key\n        cluster.go:125: [certs] apiserver serving cert is signed for DNS names [ci-4081.3.6-n-1c86d6ea5a kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.28]\n        cluster.go:125: [certs] Generating \"apiserver-kubelet-client\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-ca\" certificate and key\n        cluster.go:125: [certs] Generating \"front-proxy-client\" certificate and key\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/ca certificate authority generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/server certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/peer certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation\n        cluster.go:125: [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation\n        cluster.go:125: [certs] Generating \"sa\" key and public key\n        cluster.go:125: [kubeconfig] Using kubeconfig folder \"/etc/kubernetes\"\n        cluster.go:125: [kubeconfig] Writing \"admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"super-admin.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"kubelet.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"controller-manager.conf\" kubeconfig file\n        cluster.go:125: [kubeconfig] Writing \"scheduler.conf\" kubeconfig file\n        cluster.go:125: [control-plane] Using manifest folder \"/etc/kubernetes/manifests\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-apiserver\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-controller-manager\"\n        cluster.go:125: [control-plane] Creating static Pod manifest for \"kube-scheduler\"\n        cluster.go:125: [kubelet-start] Writing kubelet environment file with flags to file \"/var/lib/kubelet/kubeadm-flags.env\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/instance-config.yaml\"\n        cluster.go:125: [patches] Applied patch of type \"application/strategic-merge-patch+json\" to target \"kubeletconfiguration\"\n        cluster.go:125: [kubelet-start] Writing kubelet configuration to file \"/var/lib/kubelet/config.yaml\"\n        cluster.go:125: [kubelet-start] Starting the kubelet\n        cluster.go:125: [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory \"/etc/kubernetes/manifests\"\n        cluster.go:125: [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s\n        cluster.go:125: [kubelet-check] The kubelet is healthy after 501.344917ms\n        cluster.go:125: [control-plane-check] Waiting for healthy control plane components. This can take up to 30m0s\n        cluster.go:125: [control-plane-check] Checking kube-apiserver at https://10.0.0.28:6443/livez\n        cluster.go:125: [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz\n        cluster.go:125: [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez\n        cluster.go:125: [control-plane-check] kube-controller-manager is healthy after 2.511250582s\n        cluster.go:125: [control-plane-check] kube-scheduler is healthy after 3.269972539s\n        cluster.go:125: [control-plane-check] kube-apiserver is healthy after 5.002378282s\n        cluster.go:125: [upload-config] Storing the configuration used in ConfigMap \"kubeadm-config\" in the \"kube-system\" Namespace\n        cluster.go:125: [kubelet] Creating a ConfigMap \"kubelet-config\" in namespace kube-system with the configuration for the kubelets in the cluster\n        cluster.go:125: [upload-certs] Skipping phase. Please see --upload-certs\n        cluster.go:125: [mark-control-plane] Marking the node ci-4081.3.6-n-1c86d6ea5a as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]\n        cluster.go:125: [mark-control-plane] Marking the node ci-4081.3.6-n-1c86d6ea5a as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]\n        cluster.go:125: [bootstrap-token] Using token: 5ry9t4.0ira480x1n5p1k5p\n        cluster.go:125: [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n        cluster.go:125: [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n        cluster.go:125: [bootstrap-token] Creating the \"cluster-info\" ConfigMap in the \"kube-public\" namespace\n        cluster.go:125: [kubelet-finalize] Updating \"/etc/kubernetes/kubelet.conf\" to point to a rotatable kubelet client certificate and key\n        cluster.go:125: [addons] Applied essential addon: CoreDNS\n        cluster.go:125: [addons] Applied essential addon: kube-proxy\n        cluster.go:125: \n        cluster.go:125: Your Kubernetes control-plane has initialized successfully!\n        cluster.go:125: \n        cluster.go:125: To start using your cluster, you need to run the following as a regular user:\n        cluster.go:125: \n        cluster.go:125:   mkdir -p $HOME/.kube\n        cluster.go:125:   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n        cluster.go:125:   sudo chown $(id -u):$(id -g) $HOME/.kube/config\n        cluster.go:125: \n        cluster.go:125: Alternatively, if you are the root user, you can run:\n        cluster.go:125: \n        cluster.go:125:   export KUBECONFIG=/etc/kubernetes/admin.conf\n        cluster.go:125: \n        cluster.go:125: You should now deploy a pod network to the cluster.\n        cluster.go:125: Run \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\n        cluster.go:125:   https://kubernetes.io/docs/concepts/cluster-administration/addons/\n        cluster.go:125: \n        cluster.go:125: Then you can join any number of worker nodes by running the following on each as root:\n        cluster.go:125: \n        cluster.go:125: kubeadm join 10.0.0.28:6443 --token 5ry9t4.0ira480x1n5p1k5p \\\n        cluster.go:125: \t--discovery-token-ca-cert-hash sha256:818930c77557909277c09b3a750ff8ce53866ee001c751c2c199cca7dc91ac86 \n        cluster.go:125: i  Using Cilium version 1.12.5\n        cluster.go:125: ? Auto-detected cluster name: kubernetes\n        cluster.go:125: ? Auto-detected datapath mode: tunnel\n        cluster.go:125: ? Auto-detected kube-proxy has been installed\n        cluster.go:125: i  helm template --namespace kube-system cilium cilium/cilium --version 1.12.5 --set cluster.id=0,cluster.name=kubernetes,encryption.nodeEncryption=false,extraConfig.cluster-pool-ipv4-cidr=192.168.0.0/17,extraConfig.enable-endpoint-routes=true,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=vxlan\n        cluster.go:125: i  Storing helm values file in kube-system/cilium-cli-helm-values Secret\n        cluster.go:125: ? Created CA in secret cilium-ca\n        cluster.go:125: ? Generating certificates for Hubble...\n        cluster.go:125: ? Creating Service accounts...\n        cluster.go:125: ? Creating Cluster roles...\n        cluster.go:125: ? Creating ConfigMap for Cilium version 1.12.5...\n        cluster.go:125: i  Manual overwrite in ConfigMap: enable-endpoint-routes=true\n        cluster.go:125: i  Manual overwrite in ConfigMap: cluster-pool-ipv4-cidr=192.168.0.0/17\n        cluster.go:125: ? Creating Agent DaemonSet...\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/mount-cgroup]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/clean-cilium-state]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: level=warning msg=\"spec.template.metadata.annotations[container.apparmor.security.beta.kubernetes.io/cilium-agent]: deprecated since v1.30; use the \\\"appArmorProfile\\\" field instead\" subsys=klog\n        cluster.go:125: ? Creating Operator Deployment...\n        cluster.go:125: ? Waiting for Cilium to be installed and ready...\n        cluster.go:125: ? Cilium was successfully installed! Run 'cilium status' to view installation health\n        cluster.go:125: \u001b[33m    /??\\\n        cluster.go:125: \u001b[36m /??\u001b[33m\\__/\u001b[32m??\\\u001b[0m    Cilium:         \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[36m \\__\u001b[31m/??\\\u001b[32m__/\u001b[0m    Operator:       \u001b[32mOK\u001b[0m\n        cluster.go:125: \u001b[32m /??\u001b[31m\\__/\u001b[35m??\\\u001b[0m    Hubble:         \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[32m \\__\u001b[34m/??\\\u001b[35m__/\u001b[0m    ClusterMesh:    \u001b[36mdisabled\u001b[0m\n        cluster.go:125: \u001b[34m    \\__/\n        cluster.go:125: \u001b[0m\n        cluster.go:125: Deployment       cilium-operator    \n        cluster.go:125: DaemonSet        cilium             \n        cluster.go:125: Containers:      cilium-operator    \n        cluster.go:125:                  cilium             \n        cluster.go:125: Cluster Pods:    0/0 managed by Cilium\n        kubeadm.go:197: unable to setup cluster: unable to create worker node: PollUntilDone(ci-4081.3.6-n-afe3eba3dd): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/northeurope/operations/01772464-e2df-4916-b768-4ad6e1b7e5a3\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-24T23:32:54.1479419+00:00\",\n  \"endTime\": \"2026-04-24T23:33:33.7910001+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"01772464-e2df-4916-b768-4ad6e1b7e5a3\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.v1.ext4root","result":"FAIL","duration":47945401650,"output":"        harness.go:646: Cluster failed starting machines: PollUntilDone(ci-4081.3.6-n-f96be78ed2): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/northeurope/operations/2dd4bf5a-d803-4457-b3fe-99aa07cb4d44\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-24T23:32:59.3080908+00:00\",\n  \"endTime\": \"2026-04-24T23:33:33.7910001+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"2dd4bf5a-d803-4457-b3fe-99aa07cb4d44\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.ignition.v1.btrfsroot","result":"FAIL","duration":99478882730,"output":"        harness.go:646: Cluster failed starting machines: waiting for machine to become active: GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Compute/virtualMachines/ci-4081.3.6-n-d2cc968ba4\n--------------------------------------------------------------------------------\nRESPONSE 404: 404 Not Found\nERROR CODE: NotFound\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"NotFound\",\n    \"message\": \"The entity was not found in this Azure location.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"docker.enable-service.sysext","result":"FAIL","duration":88262484298,"output":"        harness.go:646: Cluster failed starting machines: waiting for machine to become active: GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/resourceGroups/kola-cluster-image-9fd5883bba/providers/Microsoft.Compute/virtualMachines/ci-4081.3.6-n-0f610b0e3e\n--------------------------------------------------------------------------------\nRESPONSE 404: 404 Not Found\nERROR CODE: NotFound\n--------------------------------------------------------------------------------\n{\n  \"error\": {\n    \"code\": \"NotFound\",\n    \"message\": \"The entity was not found in this Azure location.\"\n  }\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.verity","result":"FAIL","duration":48701060462,"output":"        harness.go:646: Cluster failed starting machines: PollUntilDone(ci-4081.3.6-n-ea108ec1f8): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/northeurope/operations/f51612f3-7fbe-483e-a6fe-8cae135552f9\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-24T23:33:05.7564893+00:00\",\n  \"endTime\": \"2026-04-24T23:33:33.7910001+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"f51612f3-7fbe-483e-a6fe-8cae135552f9\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"cl.locksmith.cluster","result":"FAIL","duration":65062478937,"output":"        harness.go:646: Cluster failed starting machines: PollUntilDone(ci-4081.3.6-n-de543dddb3): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/northeurope/operations/dd14dbbe-2cbf-4df2-b14f-8ec2fa38ab5b\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-24T23:32:51.5539245+00:00\",\n  \"endTime\": \"2026-04-24T23:33:33.5436583+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"dd14dbbe-2cbf-4df2-b14f-8ec2fa38ab5b\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"kubeadm.v1.34.4.calico.base","result":"FAIL","duration":50246170588,"output":"        kubeadm.go:197: unable to setup cluster: unable to create etcd node: PollUntilDone(ci-4081.3.6-n-923ad2bf3e): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/northeurope/operations/815ff40b-77a5-46a8-8d58-092f881707a3\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-24T23:33:09.9287572+00:00\",\n  \"endTime\": \"2026-04-24T23:33:33.3057873+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"815ff40b-77a5-46a8-8d58-092f881707a3\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"docker.lib-coreos-dockerd-compat","result":"FAIL","duration":77840207678,"output":"        harness.go:646: Cluster failed starting machines: PollUntilDone(ci-4081.3.6-n-42506c0512): GET https://management.azure.com/subscriptions/0e46bd28-a80f-4d3a-8200-d9eb8d80cb2e/providers/Microsoft.Compute/locations/northeurope/operations/0ea5c857-2561-4bc0-ac20-02a79968476f\n--------------------------------------------------------------------------------\nRESPONSE 200: 200 OK\nERROR CODE: OperationPreempted\n--------------------------------------------------------------------------------\n{\n  \"startTime\": \"2026-04-24T23:32:52.702866+00:00\",\n  \"endTime\": \"2026-04-24T23:33:40.3628852+00:00\",\n  \"status\": \"Canceled\",\n  \"error\": {\n    \"code\": \"OperationPreempted\",\n    \"message\": \"Operation execution has been preempted by a more recent operation.\"\n  },\n  \"name\": \"0ea5c857-2561-4bc0-ac20-02a79968476f\"\n}\n--------------------------------------------------------------------------------\n"},{"name":"coreos.locksmith.reboot","result":"FAIL","duration":1196634105594,"output":"        locksmith.go:141: failed to check rebooted machine: ssh unreachable or system not ready: context deadline exceeded\n"}],"result":"FAIL","platform":"azure","version":"4081.3.6"}
