From 172ea4483babb3057a02a1cf178c3531132d37be Mon Sep 17 00:00:00 2001 From: gitea_admin Date: Wed, 13 Aug 2025 12:32:44 +0000 Subject: [PATCH] Delete __documents/operation_payloads.txt --- __documents/operation_payloads.txt | 1144 ---------------------------- 1 file changed, 1144 deletions(-) delete mode 100644 __documents/operation_payloads.txt diff --git a/__documents/operation_payloads.txt b/__documents/operation_payloads.txt deleted file mode 100644 index 7042299..0000000 --- a/__documents/operation_payloads.txt +++ /dev/null @@ -1,1144 +0,0 @@ -airflow-worker-0.airflow-worker.intelliarc.svc.cluster.local -*** Found local files: -*** * /opt/airflow/logs/dag_id=1a694fc12661eefb618c55c6afcbc3ae61c4ea32/run_id=scheduled__2025-08-03T10:00:00+00:00/task_id=main/attempt=1.log -[2025-08-04, 10:00:01 UTC] {local_task_job_runner.py:120} ▶ Pre task execution logs -[2025-08-04, 10:00:01 UTC] {spark_kubernetes.py:282} INFO - Creating sparkApplication. -[2025-08-04, 10:00:01 UTC] {custom_object_launcher.py:301} WARNING - Spark job submitted but not yet started. job_id: main-x2zblobw -[2025-08-04, 10:00:14 UTC] {baseoperator.py:400} WARNING - SparkKubernetesOperator.execute cannot be called outside TaskInstance! -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Running driver with proxy user. Cluster manager: Kubernetes -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:09 WARN [ main] [o.a.h.u.NativeCodeLoader ] Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Running Spark version 3.5.2 -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] OS info Linux, 6.1.141-165.249.amzn2023.x86_64, amd64 -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Java version 17.0.12 -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] ============================================================== -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] No custom resources configured for spark.driver. -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] ============================================================== -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Submitted application: exp360cust -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Default ResourceProfile created, executor resources: Map(cores -> name: cores, amount: 4, script: , vendor: , memory -> name: memory, amount: 4096, script: , vendor: , offHeap -> name: offHeap, amount: 0, script: , vendor: ), task resources: Map(cpus -> name: cpus, amount: 1.0) -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Limiting resource is cpus at 4 tasks per executor -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Added ResourceProfile id: 0 -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Changing view acls to: spark -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Changing modify acls to: spark -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Changing view acls groups to: -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Changing modify acls groups to: -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] SecurityManager: authentication disabled; ui acls disabled; users with view permissions: spark; groups with view permissions: EMPTY; users with modify permissions: spark; groups with modify permissions: EMPTY -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Successfully started service 'sparkDriver' on port 7078. -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Registering MapOutputTracker -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Registering BlockManagerMaster -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Using org.apache.spark.storage.DefaultTopologyMapper for getting topology information -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] BlockManagerMasterEndpoint up -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Registering BlockManagerMasterHeartbeat -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Created local directory at /var/data/spark-77925c74-10d6-43b7-877b-9e0a03bd8e58/blockmgr-c194a627-2ff9-4502-9667-ae78b210216c -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] MemoryStore started with capacity 2.2 GiB -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Registering OutputCommitCoordinator -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.s.j.u.l.Log ] Logging initialized @7866ms to org.sparkproject.jetty.util.log.Slf4jLog -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Start Jetty 0.0.0.0:4040 for SparkUI -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.s.j.s.Server ] jetty-9.4.54.v20240208; built: 2024-02-08T19:42:39.027Z; git: cef3fbd6d736a21e7d541a5db490381d95a2047d; jvm 17.0.12+7 -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.s.j.s.Server ] Started @7983ms -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.s.j.s.AbstractConnector ] Started ServerConnector@d3a9bc8{HTTP/1.1, (http/1.1)}{0.0.0.0:4040} -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Successfully started service 'SparkUI' on port 4040. -[2025-08-04, 10:00:15 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@97b8f67{/,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:16 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:15 INFO [ Thread-4] [o.a.s.i.Logging ] Auto-configuring K8S client using current context from users K8S config file -[2025-08-04, 10:00:16 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:16 INFO [ecutor-snapshots-subscribers-0] [o.a.s.i.Logging ] Going to request 2 executors from Kubernetes for ResourceProfile Id: 0, target: 2, known: 0, sharedSlotFromPendingPods: 2147483647. -[2025-08-04, 10:00:16 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:16 INFO [ecutor-snapshots-subscribers-0] [o.a.s.i.Logging ] Found 0 reusable PVCs from 0 PVCs -[2025-08-04, 10:00:16 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:16 INFO [ecutor-snapshots-subscribers-0] [o.a.s.i.Logging ] Decommissioning not enabled, skipping shutdown script -[2025-08-04, 10:00:16 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:16 INFO [ Thread-4] [o.a.s.i.Logging ] Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 7079. -[2025-08-04, 10:00:16 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:16 INFO [ Thread-4] [o.a.s.n.n.NettyBlockTransferService ] Server created on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc 10.1.103.46:7079 -[2025-08-04, 10:00:16 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:16 INFO [ Thread-4] [o.a.s.i.Logging ] Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy -[2025-08-04, 10:00:16 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:16 INFO [ Thread-4] [o.a.s.i.Logging ] Registering BlockManager BlockManagerId(driver, main-x2zblobw-da7e479874862258-driver-svc.dev2.svc, 7079, None) -[2025-08-04, 10:00:16 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:16 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Registering block manager main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 with 2.2 GiB RAM, BlockManagerId(driver, main-x2zblobw-da7e479874862258-driver-svc.dev2.svc, 7079, None) -[2025-08-04, 10:00:16 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:16 INFO [ Thread-4] [o.a.s.i.Logging ] Registered BlockManager BlockManagerId(driver, main-x2zblobw-da7e479874862258-driver-svc.dev2.svc, 7079, None) -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:16 INFO [ Thread-4] [o.a.s.i.Logging ] Initialized BlockManager: BlockManagerId(driver, main-x2zblobw-da7e479874862258-driver-svc.dev2.svc, 7079, None) -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ecutor-snapshots-subscribers-0] [o.a.s.i.Logging ] Decommissioning not enabled, skipping shutdown script -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.a.s.i.Logging ] Logging events to file:/sparkfiles/spark-events/spark-2157c1fc2dae4114a9e062bfdda9d4a3.inprogress -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Stopped o.s.j.s.ServletContextHandler@97b8f67{/,null,STOPPED,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@7b285c46{/jobs,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@6a6bffa8{/jobs/json,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@6caad36b{/jobs/job,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@488cc376{/jobs/job/json,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@6412e100{/stages,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@6792ecd7{/stages/json,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@5dbf5a83{/stages/stage,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@8fb31a2{/stages/stage/json,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@2939b291{/stages/pool,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@55d1e9d0{/stages/pool/json,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@131b215f{/storage,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@78ef17cb{/storage/json,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@50c41955{/storage/rdd,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@25e35795{/storage/rdd/json,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@31722dbe{/environment,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@397a316b{/environment/json,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@3c3f6a8e{/executors,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@6e07d17{/executors/json,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@580e9785{/executors/threadDump,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@7c1dc916{/executors/threadDump/json,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@58868594{/executors/heapHistogram,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@33f7c210{/executors/heapHistogram/json,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@6fa89c71{/static,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@69c47270{/,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@2b18a571{/api,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@4d43609e{/jobs/job/kill,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:17 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@155fcbd2{/stages/stage/kill,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:17 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@5f641198{/metrics/json,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:47 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:46 INFO [ Thread-4] [o.a.s.i.Logging ] SchedulerBackend is ready for scheduling beginning after waiting maxRegisteredResourcesWaitingTime: 30000000000(ns) -[2025-08-04, 10:00:47 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:47 INFO [ Thread-4] [o.a.s.i.Logging ] Setting hive.metastore.warehouse.dir ('null') to the value of spark.sql.warehouse.dir. -[2025-08-04, 10:00:47 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:47 INFO [ Thread-4] [o.a.s.i.Logging ] Warehouse path is 'file:/opt/spark/work-dir/spark-warehouse'. -[2025-08-04, 10:00:47 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:47 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@6edb299b{/SQL,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:47 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:47 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@3b849f4e{/SQL/json,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:47 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:47 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@31263b96{/SQL/execution,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:47 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:47 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@734eb365{/SQL/execution/json,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:48 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:47 INFO [ Thread-4] [o.s.j.s.h.ContextHandler ] Started o.s.j.s.ServletContextHandler@5cf923cf{/static/sql,null,AVAILABLE,@Spark} -[2025-08-04, 10:00:48 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:48 WARN [ Thread-4] [o.a.h.m.i.MetricsConfig ] Cannot locate configuration: tried hadoop-metrics2-s3a-file-system.properties,hadoop-metrics2.properties -[2025-08-04, 10:00:48 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:48 INFO [ Thread-4] [o.a.h.m.i.MetricsSystemImpl ] Scheduled Metric snapshot period at 10 second(s). -[2025-08-04, 10:00:48 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:48 INFO [ Thread-4] [o.a.h.m.i.MetricsSystemImpl ] s3a-file-system metrics system started -[2025-08-04, 10:00:49 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:48 INFO [ Thread-4] [o.a.i.CatalogUtil ] Loading custom FileIO implementation: org.apache.iceberg.hadoop.HadoopFileIO -[2025-08-04, 10:00:50 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:49 INFO [ Thread-4] [o.a.i.BaseMetastoreCatalog ] Table loaded by catalog: dremio.failedpayments -[2025-08-04, 10:00:50 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:50 INFO [ Thread-4] [o.a.i.BaseMetastoreCatalog ] Table loaded by catalog: dremio.payments -[2025-08-04, 10:00:50 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] ['payment_date', 'amount', 'gateway', 'payment_method'] -[2025-08-04, 10:00:50 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:50 INFO [ Thread-4] [o.a.i.BaseMetastoreCatalog ] Table loaded by catalog: dremio.successpaymentmetrics -[2025-08-04, 10:00:50 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] ['payment_date', 'amount', 'gateway', 'payment_method'] -[2025-08-04, 10:00:50 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] ['payment_date', 'amount', 'gateway', 'payment_method'] -[2025-08-04, 10:00:50 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] ['payment_method', 'payment_date', 'method_count', 'rank_method'] -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] ['payment_date', 'payment_method', 'failure_reason', 'gateway'] -2025-08-04T10:00:51.284107165Z -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.i.BaseMetastoreCatalog ] Table loaded by catalog: dremio.failedpaymentmetrics -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#165 -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.failedpaymentmetrics snapshot 4230869966404603177 created at 2025-08-04T09:53:13.359+00:00 with filter true -2025-08-04T10:00:51.533439372Z -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.failedpaymentmetrics -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.failedpaymentmetrics -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#185 -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.failedpayments snapshot 8729442881185065522 created at 2025-07-29T06:38:24.650+00:00 with filter true -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.failedpayments -2025-08-04T10:00:51.710151439Z -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 3 partition(s) for table dremio.failedpayments -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushing operators to dremio.failedpayments -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushed Filters: payment_date IS NOT NULL -2025-08-04T10:00:51.716372435Z -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Post-Scan Filters: isnotnull(payment_date#7),(cast(payment_date#7 as date) >= coalesce(scalar-subquery#163 [], scalar-subquery#164 [])) -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: gateway#5, payment_method#6, payment_date#7, failure_reason#9 -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.failedpayments snapshot 8729442881185065522 created at 2025-07-29T06:38:24.650+00:00 with filter payment_date IS NOT NULL -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.failedpayments -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 3 partition(s) for table dremio.failedpayments -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_0 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_0_piece0 stored as bytes in memory (estimated size 31.5 KiB, free 2.2 GiB) -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_0_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.5 KiB, free: 2.2 GiB) -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 0 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_1 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_1_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_1_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:00:51 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 1 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:51 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_2 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_2_piece0 stored as bytes in memory (estimated size 31.5 KiB, free 2.2 GiB) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_2_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.5 KiB, free: 2.2 GiB) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 2 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_1_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_3 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_3_piece0 stored as bytes in memory (estimated size 31.5 KiB, free 2.2 GiB) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_3_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.5 KiB, free: 2.2 GiB) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 3 from broadcast at SparkBatch.java:85 -2025-08-04T10:00:52.323939171Z -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] ['payment_date', 'payment_method', 'failure_reason', 'gateway'] -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushing operators to dremio.failedpaymentmetrics -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushed filters: -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Filters evaluated on data source side: -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Filters evaluated on Spark side: -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#252, failure_reason#253, gateway#254, failure_count#255, _file#262 -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.failedpaymentmetrics snapshot 4230869966404603177 created at 2025-08-04T09:53:13.359+00:00 with filter true -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.failedpaymentmetrics -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.i.s.s.SparkWrite ] Requesting 0 bytes advisory partition size for table dremio.failedpaymentmetrics -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.i.s.s.SparkWrite ] Requesting UnspecifiedDistribution as write distribution for table dremio.failedpaymentmetrics -2025-08-04T10:00:52.406523073Z -2025-08-04T10:00:52.409947017Z -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.i.s.s.SparkWrite ] Requesting [] as write ordering for table dremio.failedpaymentmetrics -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushing operators to dremio.failedpaymentmetrics -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushed Filters: payment_date IS NOT NULL, gateway IS NOT NULL, failure_reason IS NOT NULL -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Post-Scan Filters: isnotnull(payment_date#344),isnotnull(gateway#346),isnotnull(failure_reason#345) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#344, failure_reason#345, gateway#346, _file#348 -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.failedpaymentmetrics snapshot 4230869966404603177 created at 2025-08-04T09:53:13.359+00:00 with filter ((payment_date IS NOT NULL AND gateway IS NOT NULL) AND failure_reason IS NOT NULL) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.failedpaymentmetrics -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.failedpaymentmetrics -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_4 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_4_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_4_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 4 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_5 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_5_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_5_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 5 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_6 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_6_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_6_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 6 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ Thread-4] [o.a.s.i.Logging ] Code generated in 147.502459 ms -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ subquery-2] [o.a.s.i.Logging ] Code generated in 147.535767 ms -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ subquery-1] [o.a.s.i.Logging ] Code generated in 148.606794 ms -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Registering RDD 6 ($anonfun$withThreadLocalCaptured$1 at :0) as input to shuffle 0 -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ subquery-1] [o.a.s.i.Logging ] Code generated in 17.71674 ms -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got map stage job 0 ($anonfun$withThreadLocalCaptured$1 at :0) with 3 output partitions -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ShuffleMapStage 0 ($anonfun$withThreadLocalCaptured$1 at :0) -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List() -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:00:52 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ShuffleMapStage 0 (MapPartitionsRDD[6] at $anonfun$withThreadLocalCaptured$1 at :0), which has no missing parents -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:52 INFO [ subquery-1] [o.a.s.i.Logging ] Starting job: $anonfun$withThreadLocalCaptured$1 at :0 -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_7 stored as values in memory (estimated size 18.9 KiB, free 2.2 GiB) -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_7_piece0 stored as bytes in memory (estimated size 8.0 KiB, free 2.2 GiB) -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_7_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 8.0 KiB, free: 2.2 GiB) -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_6_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 7 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_4_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 3 missing tasks from ShuffleMapStage 0 (MapPartitionsRDD[6] at $anonfun$withThreadLocalCaptured$1 at :0) (first 15 tasks are for partitions Vector(0, 1, 2)) -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 0.0 with 3 tasks resource profile 0 -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Registering RDD 7 ($anonfun$withThreadLocalCaptured$1 at :0) as input to shuffle 1 -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got job 1 ($anonfun$withThreadLocalCaptured$1 at :0) with 1 output partitions -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ResultStage 2 ($anonfun$withThreadLocalCaptured$1 at :0) -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List(ShuffleMapStage 1) -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ResultStage 2 (MapPartitionsRDD[10] at $anonfun$withThreadLocalCaptured$1 at :0), which has no missing parents -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_5_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_8 stored as values in memory (estimated size 13.0 KiB, free 2.2 GiB) -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_8_piece0 stored as bytes in memory (estimated size 6.1 KiB, free 2.2 GiB) -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_8_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 6.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 8 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:00:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 1 missing tasks from ResultStage 2 (MapPartitionsRDD[10] at $anonfun$withThreadLocalCaptured$1 at :0) (first 15 tasks are for partitions Vector(0)) -[2025-08-04, 10:01:08 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:00:53 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 2.0 with 1 tasks resource profile 0 -[2025-08-04, 10:01:23 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:01:08 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:01:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:01:23 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:01:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:01:38 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:02:08 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:01:53 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:02:23 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:02:08 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:02:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:02:23 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:02:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:02:38 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:03:08 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:02:53 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:03:23 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:03:08 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:03:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:03:23 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:03:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:03:38 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:04:08 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:03:53 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:04:23 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:04:08 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:04:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:04:23 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:04:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:04:38 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:05:08 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:04:53 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:05:23 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:05:08 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:05:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:05:23 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:05:53 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:05:38 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:06:08 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:05:53 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:06:20 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:08 WARN [ task-starvation-timer] [o.a.s.i.Logging ] Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources -[2025-08-04, 10:06:22 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:20 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] No executor found for 10.1.105.41:46626 -[2025-08-04, 10:06:22 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:22 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Registered executor NettyRpcEndpointRef(spark-client://Executor) (10.1.105.41:46638) with ID 2, ResourceProfileId 0 -[2025-08-04, 10:06:22 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:22 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Registering block manager 10.1.105.41:40655 with 2.2 GiB RAM, BlockManagerId(2, 10.1.105.41, 40655, None) -[2025-08-04, 10:06:22 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:22 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 0.0 (TID 0) (10.1.105.41, executor 2, partition 0, PROCESS_LOCAL, 15127 bytes) -[2025-08-04, 10:06:22 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:22 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 1.0 in stage 0.0 (TID 1) (10.1.105.41, executor 2, partition 1, PROCESS_LOCAL, 15127 bytes) -[2025-08-04, 10:06:22 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:22 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 2.0 in stage 0.0 (TID 2) (10.1.105.41, executor 2, partition 2, PROCESS_LOCAL, 14617 bytes) -[2025-08-04, 10:06:22 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:22 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 2.0 (TID 3) (10.1.105.41, executor 2, partition 0, PROCESS_LOCAL, 9018 bytes) -[2025-08-04, 10:06:23 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:22 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_8_piece0 in memory on 10.1.105.41:40655 (size: 6.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:24 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:23 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_7_piece0 in memory on 10.1.105.41:40655 (size: 8.0 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:24 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:24 INFO [ dispatcher-event-loop-5] [o.a.s.i.Logging ] Asked to send map output locations for shuffle 1 to 10.1.105.41:46638 -[2025-08-04, 10:06:25 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:24 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] No executor found for 10.1.125.71:47652 -[2025-08-04, 10:06:25 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:25 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Registered executor NettyRpcEndpointRef(spark-client://Executor) (10.1.125.71:47656) with ID 1, ResourceProfileId 0 -[2025-08-04, 10:06:25 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:25 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Registering block manager 10.1.125.71:45293 with 2.2 GiB RAM, BlockManagerId(1, 10.1.125.71, 45293, None) -[2025-08-04, 10:06:25 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:25 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Finished task 0.0 in stage 2.0 (TID 3) in 3043 ms on 10.1.105.41 (executor 2) (1/1) -[2025-08-04, 10:06:25 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:25 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Removed TaskSet 2.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:25 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:25 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_2_piece0 in memory on 10.1.105.41:40655 (size: 31.5 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:25 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:25 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ResultStage 2 ($anonfun$withThreadLocalCaptured$1 at :0) finished in 332.368 s -[2025-08-04, 10:06:25 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:25 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Job 1 is finished. Cancelling potential speculative or zombie tasks for this job -[2025-08-04, 10:06:25 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:25 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Killing all running tasks in stage 2: Stage finished -[2025-08-04, 10:06:27 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:25 INFO [ subquery-1] [o.a.s.i.Logging ] Job 1 finished: $anonfun$withThreadLocalCaptured$1 at :0, took 332.478605 s -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:27 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Finished task 2.0 in stage 0.0 (TID 2) in 5442 ms on 10.1.105.41 (executor 2) (1/3) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Finished task 0.0 in stage 0.0 (TID 0) in 5642 ms on 10.1.105.41 (executor 2) (2/3) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Finished task 1.0 in stage 0.0 (TID 1) in 5629 ms on 10.1.105.41 (executor 2) (3/3) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Removed TaskSet 0.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ShuffleMapStage 0 ($anonfun$withThreadLocalCaptured$1 at :0) finished in 335.053 s -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] looking for newly runnable stages -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] running: Set() -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] waiting: Set() -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] failed: Set() -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ subquery-2] [o.a.s.i.Logging ] Code generated in 10.309916 ms -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ subquery-2] [o.a.s.i.Logging ] Starting job: $anonfun$withThreadLocalCaptured$1 at :0 -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got job 2 ($anonfun$withThreadLocalCaptured$1 at :0) with 1 output partitions -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ResultStage 4 ($anonfun$withThreadLocalCaptured$1 at :0) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List(ShuffleMapStage 3) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ResultStage 4 (MapPartitionsRDD[13] at $anonfun$withThreadLocalCaptured$1 at :0), which has no missing parents -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_9 stored as values in memory (estimated size 13.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_9_piece0 stored as bytes in memory (estimated size 6.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_9_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 6.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 9 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 1 missing tasks from ResultStage 4 (MapPartitionsRDD[13] at $anonfun$withThreadLocalCaptured$1 at :0) (first 15 tasks are for partitions Vector(0)) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_8_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 6.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 4.0 with 1 tasks resource profile 0 -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 4.0 (TID 4) (10.1.105.41, executor 2, partition 0, NODE_LOCAL, 9018 bytes) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_8_piece0 on 10.1.105.41:40655 in memory (size: 6.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_7_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 8.0 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_7_piece0 on 10.1.105.41:40655 in memory (size: 8.0 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_9_piece0 in memory on 10.1.105.41:40655 (size: 6.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dispatcher-event-loop-4] [o.a.s.i.Logging ] Asked to send map output locations for shuffle 0 to 10.1.105.41:46638 -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Finished task 0.0 in stage 4.0 (TID 4) in 82 ms on 10.1.105.41 (executor 2) (1/1) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Removed TaskSet 4.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ResultStage 4 ($anonfun$withThreadLocalCaptured$1 at :0) finished in 0.102 s -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Job 2 is finished. Cancelling potential speculative or zombie tasks for this job -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Killing all running tasks in stage 4: Stage finished -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ subquery-2] [o.a.s.i.Logging ] Job 2 finished: $anonfun$withThreadLocalCaptured$1 at :0, took 0.109687 s -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ subquery-0] [o.a.s.i.Logging ] Code generated in 27.883139 ms -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got job 3 ($anonfun$withThreadLocalCaptured$1 at :0) with 3 output partitions -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ResultStage 5 ($anonfun$withThreadLocalCaptured$1 at :0) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List() -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ResultStage 5 (AdaptiveSparkPlan isFinalPlan=false -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] +- Project [cast(payment_date#7 as date) AS payment_date#36, payment_method#6, failure_reason#9, gateway#5] -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] +- Filter (isnotnull(payment_date#7) AND (cast(payment_date#7 as date) >= coalesce(Subquery subquery#163, [id=#71], Subquery subquery#164, [id=#72]))) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : :- Subquery subquery#163, [id=#71] -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : : +- AdaptiveSparkPlan isFinalPlan=false -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : : +- HashAggregate(keys=[], functions=[max(cast(payment_date#165 as date))], output=[max(payment_date)#170]) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : : +- Exchange SinglePartition, ENSURE_REQUIREMENTS, [plan_id=58] -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : : +- HashAggregate(keys=[], functions=[partial_max(cast(payment_date#165 as date))], output=[max#208]) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : : +- Project [payment_date#165] -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : : +- BatchScan dremio.failedpaymentmetrics[payment_date#165] dremio.failedpaymentmetrics (branch=null) [filters=, groupedBy=] RuntimeFilters: [] -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : +- Subquery subquery#164, [id=#72] -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : +- AdaptiveSparkPlan is... MapPartitionsRDD[18] at $anonfun$withThreadLocalCaptured$1 at :0), which has no missing parents -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_10 stored as values in memory (estimated size 21.1 KiB, free 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_10_piece0 stored as bytes in memory (estimated size 8.4 KiB, free 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_9_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 6.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_10_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 8.4 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 10 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 3 missing tasks from ResultStage 5 (AdaptiveSparkPlan isFinalPlan=false -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] +- Project [cast(payment_date#7 as date) AS payment_date#36, payment_method#6, failure_reason#9, gateway#5] -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] +- Filter (isnotnull(payment_date#7) AND (cast(payment_date#7 as date) >= coalesce(Subquery subquery#163, [id=#71], Subquery subquery#164, [id=#72]))) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : :- Subquery subquery#163, [id=#71] -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : : +- AdaptiveSparkPlan isFinalPlan=false -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : : +- HashAggregate(keys=[], functions=[max(cast(payment_date#165 as date))], output=[max(payment_date)#170]) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : : +- Exchange SinglePartition, ENSURE_REQUIREMENTS, [plan_id=58] -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : : +- HashAggregate(keys=[], functions=[partial_max(cast(payment_date#165 as date))], output=[max#208]) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : : +- Project [payment_date#165] -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : : +- BatchScan dremio.failedpaymentmetrics[payment_date#165] dremio.failedpaymentmetrics (branch=null) [filters=, groupedBy=] RuntimeFilters: [] -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : +- Subquery subquery#164, [id=#72] -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] : +- AdaptiveSparkPlan is... MapPartitionsRDD[18] at $anonfun$withThreadLocalCaptured$1 at :0) (first 15 tasks are for partitions Vector(0, 1, 2)) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 5.0 with 3 tasks resource profile 0 -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_9_piece0 on 10.1.105.41:40655 in memory (size: 6.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 5.0 (TID 5) (10.1.105.41, executor 2, partition 0, PROCESS_LOCAL, 15614 bytes) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 1.0 in stage 5.0 (TID 6) (10.1.125.71, executor 1, partition 1, PROCESS_LOCAL, 15614 bytes) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 2.0 in stage 5.0 (TID 7) (10.1.105.41, executor 2, partition 2, PROCESS_LOCAL, 15104 bytes) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_10_piece0 in memory on 10.1.105.41:40655 (size: 8.4 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_3_piece0 in memory on 10.1.105.41:40655 (size: 31.5 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added rdd_18_2 in memory on 10.1.105.41:40655 (size: 1128.0 B, free: 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Finished task 2.0 in stage 5.0 (TID 7) in 392 ms on 10.1.105.41 (executor 2) (1/3) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_10_piece0 in memory on 10.1.125.71:45293 (size: 8.4 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:28 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added rdd_18_0 in memory on 10.1.105.41:40655 (size: 1088.0 B, free: 2.2 GiB) -[2025-08-04, 10:06:29 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:28 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Finished task 0.0 in stage 5.0 (TID 5) in 556 ms on 10.1.105.41 (executor 2) (2/3) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:29 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_3_piece0 in memory on 10.1.125.71:45293 (size: 31.5 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added rdd_18_1 in memory on 10.1.125.71:45293 (size: 1352.0 B, free: 2.2 GiB) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Finished task 1.0 in stage 5.0 (TID 6) in 4304 ms on 10.1.125.71 (executor 1) (3/3) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Removed TaskSet 5.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ResultStage 5 ($anonfun$withThreadLocalCaptured$1 at :0) finished in 4.329 s -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Job 3 is finished. Cancelling potential speculative or zombie tasks for this job -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Killing all running tasks in stage 5: Stage finished -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ subquery-0] [o.a.s.i.Logging ] Block broadcast_11 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_10_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 8.4 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ subquery-0] [o.a.s.i.Logging ] Block broadcast_11_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_11_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ subquery-0] [o.a.s.i.Logging ] Created broadcast 11 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_10_piece0 on 10.1.105.41:40655 in memory (size: 8.4 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_10_piece0 on 10.1.125.71:45293 in memory (size: 8.4 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ subquery-0] [o.a.s.i.Logging ] Code generated in 48.122507 ms -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ subquery-0] [o.a.s.i.Logging ] Predicate isnotnull(payment_date#36) generates partition filter: ((payment_date.count#467 - payment_date.nullCount#466) > 0) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ subquery-0] [o.a.s.i.Logging ] Predicate isnotnull(gateway#39) generates partition filter: ((gateway.count#482 - gateway.nullCount#481) > 0) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ subquery-0] [o.a.s.i.Logging ] Predicate isnotnull(failure_reason#38) generates partition filter: ((failure_reason.count#477 - failure_reason.nullCount#476) > 0) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Registering RDD 23 ($anonfun$withThreadLocalCaptured$1 at :0) as input to shuffle 2 -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got map stage job 4 ($anonfun$withThreadLocalCaptured$1 at :0) with 3 output partitions -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ShuffleMapStage 6 ($anonfun$withThreadLocalCaptured$1 at :0) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List() -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ShuffleMapStage 6 (MapPartitionsRDD[23] at $anonfun$withThreadLocalCaptured$1 at :0), which has no missing parents -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_12 stored as values in memory (estimated size 50.9 KiB, free 2.2 GiB) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_12_piece0 stored as bytes in memory (estimated size 21.1 KiB, free 2.2 GiB) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_11_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_12_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 21.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 12 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 3 missing tasks from ShuffleMapStage 6 (MapPartitionsRDD[23] at $anonfun$withThreadLocalCaptured$1 at :0) (first 15 tasks are for partitions Vector(0, 1, 2)) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 6.0 with 3 tasks resource profile 0 -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 6.0 (TID 8) (10.1.105.41, executor 2, partition 0, PROCESS_LOCAL, 15603 bytes) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 1.0 in stage 6.0 (TID 9) (10.1.125.71, executor 1, partition 1, PROCESS_LOCAL, 15603 bytes) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 2.0 in stage 6.0 (TID 10) (10.1.105.41, executor 2, partition 2, PROCESS_LOCAL, 15093 bytes) -[2025-08-04, 10:06:32 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_12_piece0 in memory on 10.1.105.41:40655 (size: 21.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:33 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:32 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_12_piece0 in memory on 10.1.125.71:45293 (size: 21.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:33 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:33 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Finished task 2.0 in stage 6.0 (TID 10) in 969 ms on 10.1.105.41 (executor 2) (1/3) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:33 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Finished task 0.0 in stage 6.0 (TID 8) in 971 ms on 10.1.105.41 (executor 2) (2/3) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Finished task 1.0 in stage 6.0 (TID 9) in 1309 ms on 10.1.125.71 (executor 1) (3/3) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Removed TaskSet 6.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ShuffleMapStage 6 ($anonfun$withThreadLocalCaptured$1 at :0) finished in 1.324 s -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] looking for newly runnable stages -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] running: Set() -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] waiting: Set() -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] failed: Set() -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ subquery-0] [o.a.s.i.Logging ] Block broadcast_13 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_12_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 21.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ subquery-0] [o.a.s.i.Logging ] Block broadcast_13_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_13_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_12_piece0 on 10.1.125.71:45293 in memory (size: 21.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_12_piece0 on 10.1.105.41:40655 in memory (size: 21.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ subquery-0] [o.a.s.i.Logging ] Created broadcast 13 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ subquery-0] [o.a.s.i.Logging ] For shuffle(2), advisory target size: 67108864, actual target size 1048576, minimum partition size: 1048576 -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ broadcast-exchange-0] [o.a.s.i.Logging ] Code generated in 28.537961 ms -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ broadcast-exchange-0] [o.a.s.i.Logging ] Starting job: $anonfun$withThreadLocalCaptured$1 at :0 -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got job 5 ($anonfun$withThreadLocalCaptured$1 at :0) with 1 output partitions -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ResultStage 8 ($anonfun$withThreadLocalCaptured$1 at :0) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List(ShuffleMapStage 7) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ResultStage 8 (MapPartitionsRDD[26] at $anonfun$withThreadLocalCaptured$1 at :0), which has no missing parents -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_14 stored as values in memory (estimated size 57.8 KiB, free 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_14_piece0 stored as bytes in memory (estimated size 23.9 KiB, free 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_14_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 23.9 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 14 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 1 missing tasks from ResultStage 8 (MapPartitionsRDD[26] at $anonfun$withThreadLocalCaptured$1 at :0) (first 15 tasks are for partitions Vector(0)) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 8.0 with 1 tasks resource profile 0 -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 8.0 (TID 11) (10.1.125.71, executor 1, partition 0, NODE_LOCAL, 9018 bytes) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_14_piece0 in memory on 10.1.125.71:45293 (size: 23.9 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-event-loop-1] [o.a.s.i.Logging ] Asked to send map output locations for shuffle 2 to 10.1.125.71:47656 -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_13_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Finished task 0.0 in stage 8.0 (TID 11) in 281 ms on 10.1.125.71 (executor 1) (1/1) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Removed TaskSet 8.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ResultStage 8 ($anonfun$withThreadLocalCaptured$1 at :0) finished in 0.290 s -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Job 5 is finished. Cancelling potential speculative or zombie tasks for this job -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Killing all running tasks in stage 8: Stage finished -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ broadcast-exchange-0] [o.a.s.i.Logging ] Job 5 finished: $anonfun$withThreadLocalCaptured$1 at :0, took 0.298573 s -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ broadcast-exchange-0] [o.a.s.i.Logging ] Code generated in 10.656507 ms -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_14_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 23.9 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_14_piece0 on 10.1.125.71:45293 in memory (size: 23.9 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ broadcast-exchange-0] [o.a.s.i.Logging ] Block broadcast_15_piece0 stored as bytes in memory (estimated size 567.0 B, free 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_15_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 567.0 B, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ broadcast-exchange-0] [o.a.s.i.Logging ] Created broadcast 15 from $anonfun$withThreadLocalCaptured$1 at :0 -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ subquery-0] [o.a.s.i.Logging ] Block broadcast_16 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ subquery-0] [o.a.s.i.Logging ] Block broadcast_16_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_16_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ subquery-0] [o.a.s.i.Logging ] Created broadcast 16 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ subquery-0] [o.a.s.i.Logging ] Code generated in 54.385134 ms -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_17 stored as values in memory (estimated size 40.0 B, free 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_17_piece0 stored as bytes in memory (estimated size 86.0 B, free 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_16_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_17_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 86.0 B, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 17 from sql at :0 -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.i.s.s.SparkCopyOnWriteScan ] 0 of 0 task(s) for table dremio.failedpaymentmetrics matched runtime file filter with 0 location(s) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_18 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_18_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_18_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 18 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.s.i.Logging ] Code generated in 42.984457 ms -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Registering RDD 37 (sql at :0) as input to shuffle 3 -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got map stage job 6 (sql at :0) with 3 output partitions -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ShuffleMapStage 9 (sql at :0) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List() -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ShuffleMapStage 9 (MapPartitionsRDD[37] at sql at :0), which has no missing parents -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_19 stored as values in memory (estimated size 50.9 KiB, free 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_19_piece0 stored as bytes in memory (estimated size 21.3 KiB, free 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_18_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_19_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 21.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 19 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 3 missing tasks from ShuffleMapStage 9 (MapPartitionsRDD[37] at sql at :0) (first 15 tasks are for partitions Vector(0, 1, 2)) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 9.0 with 3 tasks resource profile 0 -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 9.0 (TID 12) (10.1.105.41, executor 2, partition 0, PROCESS_LOCAL, 15603 bytes) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 1.0 in stage 9.0 (TID 13) (10.1.125.71, executor 1, partition 1, PROCESS_LOCAL, 15603 bytes) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 2.0 in stage 9.0 (TID 14) (10.1.105.41, executor 2, partition 2, PROCESS_LOCAL, 15093 bytes) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_19_piece0 in memory on 10.1.125.71:45293 (size: 21.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_19_piece0 in memory on 10.1.105.41:40655 (size: 21.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Finished task 0.0 in stage 9.0 (TID 12) in 153 ms on 10.1.105.41 (executor 2) (1/3) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Finished task 2.0 in stage 9.0 (TID 14) in 154 ms on 10.1.105.41 (executor 2) (2/3) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Finished task 1.0 in stage 9.0 (TID 13) in 183 ms on 10.1.125.71 (executor 1) (3/3) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Removed TaskSet 9.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ShuffleMapStage 9 (sql at :0) finished in 0.199 s -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] looking for newly runnable stages -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] running: Set() -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] waiting: Set() -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] failed: Set() -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.s.i.Logging ] For shuffle(3), advisory target size: 67108864, actual target size 1048576, minimum partition size: 1048576 -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.s.i.Logging ] Code generated in 6.32557 ms -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.s.i.Logging ] spark.sql.codegen.aggregate.map.twolevel.enabled is set to true, but current version of codegened fast hashmap does not support this aggregate. -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.s.i.Logging ] Code generated in 13.510309 ms -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_20 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_20_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_19_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 21.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_20_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_19_piece0 on 10.1.105.41:40655 in memory (size: 21.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_19_piece0 on 10.1.125.71:45293 in memory (size: 21.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 20 from broadcast at SparkWrite.java:193 -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.s.i.Logging ] Start processing data source write support: IcebergBatchWrite(table=dremio.failedpaymentmetrics, format=PARQUET). The input RDD has 1 partitions. -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ Thread-4] [o.a.s.i.Logging ] Starting job: sql at :0 -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got job 7 (sql at :0) with 1 output partitions -[2025-08-04, 10:06:34 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ResultStage 11 (sql at :0) -[2025-08-04, 10:06:35 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List(ShuffleMapStage 10) -[2025-08-04, 10:06:35 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:06:35 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:34 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ResultStage 11 (MapPartitionsRDD[41] at sql at :0), which has no missing parents -[2025-08-04, 10:06:35 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:35 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_21 stored as values in memory (estimated size 66.5 KiB, free 2.2 GiB) -[2025-08-04, 10:06:35 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:35 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_21_piece0 stored as bytes in memory (estimated size 27.4 KiB, free 2.2 GiB) -[2025-08-04, 10:06:35 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:35 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_21_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 27.4 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:35 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:35 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 21 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:06:35 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:35 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 1 missing tasks from ResultStage 11 (MapPartitionsRDD[41] at sql at :0) (first 15 tasks are for partitions Vector(0)) -[2025-08-04, 10:06:35 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:35 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 11.0 with 1 tasks resource profile 0 -[2025-08-04, 10:06:35 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:35 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 11.0 (TID 15) (10.1.125.71, executor 1, partition 0, NODE_LOCAL, 9018 bytes) -[2025-08-04, 10:06:35 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:35 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_21_piece0 in memory on 10.1.125.71:45293 (size: 27.4 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:35 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:35 INFO [ dispatcher-event-loop-7] [o.a.s.i.Logging ] Asked to send map output locations for shuffle 3 to 10.1.125.71:47656 -[2025-08-04, 10:06:36 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:35 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_20_piece0 in memory on 10.1.125.71:45293 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:36 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:36 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Finished task 0.0 in stage 11.0 (TID 15) in 1538 ms on 10.1.125.71 (executor 1) (1/1) -[2025-08-04, 10:06:36 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:36 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Removed TaskSet 11.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:36 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:36 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ResultStage 11 (sql at :0) finished in 1.547 s -[2025-08-04, 10:06:36 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:36 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Job 7 is finished. Cancelling potential speculative or zombie tasks for this job -[2025-08-04, 10:06:36 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:36 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Killing all running tasks in stage 11: Stage finished -[2025-08-04, 10:06:36 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:36 INFO [ Thread-4] [o.a.s.i.Logging ] Job 7 finished: sql at :0, took 1.555123 s -[2025-08-04, 10:06:36 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:36 INFO [ Thread-4] [o.a.s.i.Logging ] Data source write support IcebergBatchWrite(table=dremio.failedpaymentmetrics, format=PARQUET) is committing. -[2025-08-04, 10:06:37 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:36 INFO [ Thread-4] [o.a.i.s.s.SparkWrite ] Committing overwrite of 0 data files with 1 new data files, scanSnapshotId: 4230869966404603177, conflictDetectionFilter: true to table dremio.failedpaymentmetrics -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:37 INFO [ Thread-4] [o.a.i.h.HadoopTableOperations ] Committed a new metadata file s3://experience-360/failedpaymentmetrics/metadata/v141.metadata.json -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.i.SnapshotProducer ] Committed snapshot 7097650574085158880 (BaseOverwriteFiles) -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.i.m.LoggingMetricsReporter ] Received metrics report: CommitReport{tableName=dremio.failedpaymentmetrics, snapshotId=7097650574085158880, sequenceNumber=141, operation=append, commitMetrics=CommitMetricsResult{totalDuration=TimerResult{timeUnit=NANOSECONDS, totalDuration=PT1.597596144S, count=1}, attempts=CounterResult{unit=COUNT, value=1}, addedDataFiles=CounterResult{unit=COUNT, value=1}, removedDataFiles=null, totalDataFiles=CounterResult{unit=COUNT, value=1}, addedDeleteFiles=null, addedEqualityDeleteFiles=null, addedPositionalDeleteFiles=null, addedDVs=null, removedDeleteFiles=null, removedEqualityDeleteFiles=null, removedPositionalDeleteFiles=null, removedDVs=null, totalDeleteFiles=CounterResult{unit=COUNT, value=0}, addedRecords=CounterResult{unit=COUNT, value=14}, removedRecords=null, totalRecords=CounterResult{unit=COUNT, value=14}, addedFilesSizeInBytes=CounterResult{unit=BYTES, value=1822}, removedFilesSizeInBytes=null, totalFilesSizeInBytes=null, addedPositionalDeletes=null, removedPositionalDeletes=null, totalPositionalDeletes=CounterResult{unit=COUNT, value=0}, addedEqualityDeletes=null, removedEqualityDeletes=null, totalEqualityDeletes=CounterResult{unit=COUNT, value=0}}, metadata={engine-version=3.5.2, app-id=spark-2157c1fc2dae4114a9e062bfdda9d4a3, engine-name=spark, iceberg-version=Apache Iceberg 1.8.1 (commit 9ce0fcf0af7becf25ad9fc996c3bad2afdcfd33d)}} -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.i.s.s.SparkWrite ] Committed in 1775 ms -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.s.i.Logging ] Data source write support IcebergBatchWrite(table=dremio.failedpaymentmetrics, format=PARQUET) committed. -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] ['payment_date', 'total_payments', 'total_value_processed'] -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] ['payment_date', 'most_used_payment_method'] -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] ['payment_date', 'high_valued_payments'] -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] ['payment_date', 'total_failed_payments'] -2025-08-04T10:06:38.643702000Z -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.i.BaseMetastoreCatalog ] Table loaded by catalog: dremio.successpaymentmetrics -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#74 -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.successpaymentmetrics snapshot 8867521827718512735 created at 2025-08-04T09:28:24.650+00:00 with filter true -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.successpaymentmetrics -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.successpaymentmetrics -2025-08-04T10:06:38.700111184Z -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#95 -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.payments snapshot 4255712500230507056 created at 2025-08-04T09:52:09.903+00:00 with filter true -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.payments -2025-08-04T10:06:38.915768922Z -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 13 partition(s) for table dremio.payments -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#515 -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.successpaymentmetrics snapshot 8867521827718512735 created at 2025-08-04T09:28:24.650+00:00 with filter true -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.successpaymentmetrics -2025-08-04T10:06:38.921406637Z -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.successpaymentmetrics -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#527 -[2025-08-04, 10:06:38 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.payments snapshot 4255712500230507056 created at 2025-08-04T09:52:09.903+00:00 with filter true -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:38 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.payments -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_20_piece0 on 10.1.125.71:45293 in memory (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_20_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_21_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 27.4 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_21_piece0 on 10.1.125.71:45293 in memory (size: 27.4 KiB, free: 2.2 GiB) -2025-08-04T10:06:39.050135276Z -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 13 partition(s) for table dremio.payments -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#541 -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.successpaymentmetrics snapshot 8867521827718512735 created at 2025-08-04T09:28:24.650+00:00 with filter true -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.successpaymentmetrics -2025-08-04T10:06:39.055468101Z -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.successpaymentmetrics -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#553 -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.payments snapshot 4255712500230507056 created at 2025-08-04T09:52:09.903+00:00 with filter true -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.payments -2025-08-04T10:06:39.222120787Z -2025-08-04T10:06:39.222761044Z -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 13 partition(s) for table dremio.payments -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushing operators to dremio.payments -2025-08-04T10:06:39.224819177Z -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushed Filters: payment_date IS NOT NULL, gateway IS NOT NULL, gateway = 'CCS' -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Post-Scan Filters: isnotnull(payment_date#50),isnotnull(gateway#49),(gateway#49 = CCS),(cast(payment_date#50 as date) >= coalesce(scalar-subquery#72 [], scalar-subquery#73 [])) -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushing operators to dremio.payments -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushed Filters: payment_date IS NOT NULL, gateway IS NOT NULL, gateway = 'CCS' -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Post-Scan Filters: isnotnull(payment_date#511),isnotnull(gateway#510),(gateway#510 = CCS),(cast(payment_date#511 as date) >= coalesce(scalar-subquery#72 [], scalar-subquery#73 [])) -2025-08-04T10:06:39.227088102Z -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushing operators to dremio.payments -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushed Filters: payment_date IS NOT NULL, amount IS NOT NULL, gateway IS NOT NULL, amount > 500.00, gateway = 'CCS' -2025-08-04T10:06:39.228808981Z -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Post-Scan Filters: isnotnull(payment_date#537),isnotnull(amount#533),isnotnull(gateway#536),(amount#533 > 500.00),(gateway#536 = CCS),(cast(payment_date#537 as date) >= coalesce(scalar-subquery#72 [], scalar-subquery#73 [])) -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: amount#46, gateway#49, payment_date#50 -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.s.i.Logging ] -2025-08-04T10:06:39.230207634Z -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: gateway#510, payment_date#511, payment_method#512 -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: amount#533, gateway#536, payment_date#537 -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.payments snapshot 4255712500230507056 created at 2025-08-04T09:52:09.903+00:00 with filter ((payment_date IS NOT NULL AND gateway IS NOT NULL) AND gateway = (hash-5975a816)) -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.payments -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 8 partition(s) for table dremio.payments -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.payments snapshot 4255712500230507056 created at 2025-08-04T09:52:09.903+00:00 with filter ((payment_date IS NOT NULL AND gateway IS NOT NULL) AND gateway = (hash-5975a816)) -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.payments -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 8 partition(s) for table dremio.payments -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.payments snapshot 4255712500230507056 created at 2025-08-04T09:52:09.903+00:00 with filter ((((payment_date IS NOT NULL AND amount IS NOT NULL) AND gateway IS NOT NULL) AND amount > (hash-27bc26af)) AND gateway = (hash-5975a816)) -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.payments -2025-08-04T10:06:39.630425076Z -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.payments -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#74 -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.successpaymentmetrics snapshot 8867521827718512735 created at 2025-08-04T09:28:24.650+00:00 with filter true -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.successpaymentmetrics -2025-08-04T10:06:39.636819965Z -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.successpaymentmetrics -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#95 -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.payments snapshot 4255712500230507056 created at 2025-08-04T09:52:09.903+00:00 with filter true -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.payments -2025-08-04T10:06:39.767730541Z -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 13 partition(s) for table dremio.payments -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#515 -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.successpaymentmetrics snapshot 8867521827718512735 created at 2025-08-04T09:28:24.650+00:00 with filter true -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.successpaymentmetrics -2025-08-04T10:06:39.774707325Z -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.successpaymentmetrics -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#527 -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.payments snapshot 4255712500230507056 created at 2025-08-04T09:52:09.903+00:00 with filter true -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.payments -2025-08-04T10:06:39.897665163Z -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 13 partition(s) for table dremio.payments -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#541 -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.successpaymentmetrics snapshot 8867521827718512735 created at 2025-08-04T09:28:24.650+00:00 with filter true -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.successpaymentmetrics -2025-08-04T10:06:39.902682410Z -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.successpaymentmetrics -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#553 -[2025-08-04, 10:06:39 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.payments snapshot 4255712500230507056 created at 2025-08-04T09:52:09.903+00:00 with filter true -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:39 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.payments -2025-08-04T10:06:40.076947134Z -2025-08-04T10:06:40.078400728Z -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 13 partition(s) for table dremio.payments -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushing operators to dremio.successpaymentmetrics -2025-08-04T10:06:40.079411459Z -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushed filters: -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Filters evaluated on data source side: -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Filters evaluated on Spark side: -2025-08-04T10:06:40.080247714Z -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#567, total_payments#568, total_value_processed#569, total_failed_payments#570, most_used_payment_method#571, high_valued_payments#572, _file#585 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushing operators to dremio.payments -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushed Filters: payment_date IS NOT NULL, gateway IS NOT NULL, gateway = 'CCS' -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Post-Scan Filters: isnotnull(payment_date#50),isnotnull(gateway#49),(gateway#49 = CCS),(cast(payment_date#50 as date) >= coalesce(scalar-subquery#72 [], scalar-subquery#73 [])) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushing operators to dremio.payments -2025-08-04T10:06:40.082862202Z -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushed Filters: payment_date IS NOT NULL, gateway IS NOT NULL, gateway = 'CCS' -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Post-Scan Filters: isnotnull(payment_date#511),isnotnull(gateway#510),(gateway#510 = CCS),(cast(payment_date#511 as date) >= coalesce(scalar-subquery#72 [], scalar-subquery#73 [])) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushing operators to dremio.payments -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushed Filters: payment_date IS NOT NULL, amount IS NOT NULL, gateway IS NOT NULL, amount > 500.00, gateway = 'CCS' -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Post-Scan Filters: isnotnull(payment_date#537),isnotnull(amount#533),isnotnull(gateway#536),(amount#533 > 500.00),(gateway#536 = CCS),(cast(payment_date#537 as date) >= coalesce(scalar-subquery#72 [], scalar-subquery#73 [])) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] -2025-08-04T10:06:40.084718241Z -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: amount#46, gateway#49, payment_date#50 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] -2025-08-04T10:06:40.086312834Z -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: gateway#510, payment_date#511, payment_method#512 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: amount#533, gateway#536, payment_date#537 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.successpaymentmetrics snapshot 8867521827718512735 created at 2025-08-04T09:28:24.650+00:00 with filter true -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.successpaymentmetrics -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.payments snapshot 4255712500230507056 created at 2025-08-04T09:52:09.903+00:00 with filter ((payment_date IS NOT NULL AND gateway IS NOT NULL) AND gateway = (hash-5975a816)) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.payments -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 8 partition(s) for table dremio.payments -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.payments snapshot 4255712500230507056 created at 2025-08-04T09:52:09.903+00:00 with filter ((payment_date IS NOT NULL AND gateway IS NOT NULL) AND gateway = (hash-5975a816)) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.payments -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 8 partition(s) for table dremio.payments -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.payments snapshot 4255712500230507056 created at 2025-08-04T09:52:09.903+00:00 with filter ((((payment_date IS NOT NULL AND amount IS NOT NULL) AND gateway IS NOT NULL) AND amount > (hash-27bc26af)) AND gateway = (hash-5975a816)) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.payments -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.payments -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkWrite ] Requesting 0 bytes advisory partition size for table dremio.successpaymentmetrics -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkWrite ] Requesting UnspecifiedDistribution as write distribution for table dremio.successpaymentmetrics -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkWrite ] Requesting [] as write ordering for table dremio.successpaymentmetrics -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.successpaymentmetrics -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 13 partition(s) for table dremio.payments -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.successpaymentmetrics -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 13 partition(s) for table dremio.payments -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.successpaymentmetrics -2025-08-04T10:06:40.573272310Z -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 13 partition(s) for table dremio.payments -2025-08-04T10:06:40.575259259Z -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushing operators to dremio.successpaymentmetrics -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Pushed Filters: payment_date IS NOT NULL -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Post-Scan Filters: isnotnull(payment_date#715) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] Output: payment_date#715, _file#721 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.SnapshotScan ] Scanning table dremio.successpaymentmetrics snapshot 8867521827718512735 created at 2025-08-04T09:28:24.650+00:00 with filter payment_date IS NOT NULL -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.BaseDistributedDataScan ] Planning file tasks locally for table dremio.successpaymentmetrics -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.successpaymentmetrics -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 8 partition(s) for table dremio.payments -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 8 partition(s) for table dremio.payments -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.successpaymentmetrics -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 13 partition(s) for table dremio.payments -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.successpaymentmetrics -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 13 partition(s) for table dremio.payments -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 0 partition(s) for table dremio.successpaymentmetrics -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.i.s.s.SparkPartitioningAwareScan ] Reporting UnknownPartitioning with 13 partition(s) for table dremio.payments -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_22 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_22_piece0 stored as bytes in memory (estimated size 31.3 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_22_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 22 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_23 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_23_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_23_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 23 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_24 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_24_piece0 stored as bytes in memory (estimated size 31.3 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_24_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 24 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_25 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_25_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_25_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 25 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_26 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_26_piece0 stored as bytes in memory (estimated size 31.3 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_26_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 26 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_27 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_27_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_27_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 27 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_28 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_28_piece0 stored as bytes in memory (estimated size 31.3 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_28_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 28 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_29 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_29_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_29_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 29 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_30 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_30_piece0 stored as bytes in memory (estimated size 31.3 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_30_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 30 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_31 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_29_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_31_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_31_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 31 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_25_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_32 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_26_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_32_piece0 stored as bytes in memory (estimated size 31.3 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_32_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 32 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_22_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_33 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_33_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_33_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 33 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_34 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_34_piece0 stored as bytes in memory (estimated size 31.3 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_34_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 34 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_35 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_35_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_35_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 35 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_36 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_36_piece0 stored as bytes in memory (estimated size 31.3 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_36_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 36 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ Thread-4] [o.a.s.i.Logging ] Code generated in 7.530956 ms -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ subquery-3] [o.a.s.i.Logging ] Code generated in 7.49701 ms -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Registering RDD 51 ($anonfun$withThreadLocalCaptured$1 at :0) as input to shuffle 4 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got map stage job 8 ($anonfun$withThreadLocalCaptured$1 at :0) with 13 output partitions -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ShuffleMapStage 12 ($anonfun$withThreadLocalCaptured$1 at :0) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List() -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ShuffleMapStage 12 (MapPartitionsRDD[51] at $anonfun$withThreadLocalCaptured$1 at :0), which has no missing parents -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_37 stored as values in memory (estimated size 18.9 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_37_piece0 stored as bytes in memory (estimated size 8.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_37_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 8.0 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_31_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 37 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ subquery-4] [o.a.s.i.Logging ] Starting job: $anonfun$withThreadLocalCaptured$1 at :0 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 13 missing tasks from ShuffleMapStage 12 (MapPartitionsRDD[51] at $anonfun$withThreadLocalCaptured$1 at :0) (first 15 tasks are for partitions Vector(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 12.0 with 13 tasks resource profile 0 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Registering RDD 52 ($anonfun$withThreadLocalCaptured$1 at :0) as input to shuffle 5 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 12.0 (TID 16) (10.1.125.71, executor 1, partition 0, PROCESS_LOCAL, 14950 bytes) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_36_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 1.0 in stage 12.0 (TID 17) (10.1.105.41, executor 2, partition 1, PROCESS_LOCAL, 14575 bytes) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 2.0 in stage 12.0 (TID 18) (10.1.125.71, executor 1, partition 2, PROCESS_LOCAL, 14575 bytes) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 3.0 in stage 12.0 (TID 19) (10.1.105.41, executor 2, partition 3, PROCESS_LOCAL, 14539 bytes) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got job 9 ($anonfun$withThreadLocalCaptured$1 at :0) with 1 output partitions -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ResultStage 14 ($anonfun$withThreadLocalCaptured$1 at :0) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List(ShuffleMapStage 13) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 4.0 in stage 12.0 (TID 20) (10.1.125.71, executor 1, partition 4, PROCESS_LOCAL, 14575 bytes) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 5.0 in stage 12.0 (TID 21) (10.1.105.41, executor 2, partition 5, PROCESS_LOCAL, 14575 bytes) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 6.0 in stage 12.0 (TID 22) (10.1.125.71, executor 1, partition 6, PROCESS_LOCAL, 14575 bytes) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 7.0 in stage 12.0 (TID 23) (10.1.105.41, executor 2, partition 7, PROCESS_LOCAL, 14566 bytes) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ResultStage 14 (MapPartitionsRDD[55] at $anonfun$withThreadLocalCaptured$1 at :0), which has no missing parents -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_38 stored as values in memory (estimated size 13.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_37_piece0 in memory on 10.1.125.71:45293 (size: 8.0 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_38_piece0 stored as bytes in memory (estimated size 6.1 KiB, free 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_37_piece0 in memory on 10.1.105.41:40655 (size: 8.0 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_38_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 6.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_33_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 38 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:06:40 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 1 missing tasks from ResultStage 14 (MapPartitionsRDD[55] at $anonfun$withThreadLocalCaptured$1 at :0) (first 15 tasks are for partitions Vector(0)) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:40 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 14.0 with 1 tasks resource profile 0 -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_32_piece0 in memory on 10.1.105.41:40655 (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_32_piece0 in memory on 10.1.125.71:45293 (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 8.0 in stage 12.0 (TID 24) (10.1.105.41, executor 2, partition 8, PROCESS_LOCAL, 14959 bytes) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Finished task 5.0 in stage 12.0 (TID 21) in 433 ms on 10.1.105.41 (executor 2) (1/13) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 9.0 in stage 12.0 (TID 25) (10.1.105.41, executor 2, partition 9, PROCESS_LOCAL, 15148 bytes) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Finished task 7.0 in stage 12.0 (TID 23) in 434 ms on 10.1.105.41 (executor 2) (2/13) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 10.0 in stage 12.0 (TID 26) (10.1.105.41, executor 2, partition 10, PROCESS_LOCAL, 15148 bytes) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Finished task 3.0 in stage 12.0 (TID 19) in 451 ms on 10.1.105.41 (executor 2) (3/13) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 11.0 in stage 12.0 (TID 27) (10.1.105.41, executor 2, partition 11, PROCESS_LOCAL, 15148 bytes) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Finished task 1.0 in stage 12.0 (TID 17) in 468 ms on 10.1.105.41 (executor 2) (4/13) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 12.0 in stage 12.0 (TID 28) (10.1.125.71, executor 1, partition 12, PROCESS_LOCAL, 14248 bytes) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Finished task 0.0 in stage 12.0 (TID 16) in 494 ms on 10.1.125.71 (executor 1) (5/13) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 14.0 (TID 29) (10.1.125.71, executor 1, partition 0, PROCESS_LOCAL, 9018 bytes) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Finished task 6.0 in stage 12.0 (TID 22) in 495 ms on 10.1.125.71 (executor 1) (6/13) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Finished task 2.0 in stage 12.0 (TID 18) in 501 ms on 10.1.125.71 (executor 1) (7/13) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_38_piece0 in memory on 10.1.125.71:45293 (size: 6.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-event-loop-5] [o.a.s.i.Logging ] Asked to send map output locations for shuffle 5 to 10.1.125.71:47656 -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Finished task 4.0 in stage 12.0 (TID 20) in 535 ms on 10.1.125.71 (executor 1) (8/13) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Finished task 0.0 in stage 14.0 (TID 29) in 45 ms on 10.1.125.71 (executor 1) (1/1) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Removed TaskSet 14.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ResultStage 14 ($anonfun$withThreadLocalCaptured$1 at :0) finished in 0.532 s -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Job 9 is finished. Cancelling potential speculative or zombie tasks for this job -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Killing all running tasks in stage 14: Stage finished -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ subquery-4] [o.a.s.i.Logging ] Job 9 finished: $anonfun$withThreadLocalCaptured$1 at :0, took 0.545089 s -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Finished task 12.0 in stage 12.0 (TID 28) in 188 ms on 10.1.125.71 (executor 1) (9/13) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Finished task 10.0 in stage 12.0 (TID 26) in 373 ms on 10.1.105.41 (executor 2) (10/13) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Finished task 11.0 in stage 12.0 (TID 27) in 367 ms on 10.1.105.41 (executor 2) (11/13) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Finished task 9.0 in stage 12.0 (TID 25) in 408 ms on 10.1.105.41 (executor 2) (12/13) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Finished task 8.0 in stage 12.0 (TID 24) in 417 ms on 10.1.105.41 (executor 2) (13/13) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Removed TaskSet 12.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ShuffleMapStage 12 ($anonfun$withThreadLocalCaptured$1 at :0) finished in 0.865 s -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] looking for newly runnable stages -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] running: Set() -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] waiting: Set() -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] failed: Set() -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ subquery-5] [o.a.s.i.Logging ] Starting job: $anonfun$withThreadLocalCaptured$1 at :0 -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got job 10 ($anonfun$withThreadLocalCaptured$1 at :0) with 1 output partitions -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ResultStage 16 ($anonfun$withThreadLocalCaptured$1 at :0) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List(ShuffleMapStage 15) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ResultStage 16 (MapPartitionsRDD[58] at $anonfun$withThreadLocalCaptured$1 at :0), which has no missing parents -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_39 stored as values in memory (estimated size 13.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_39_piece0 stored as bytes in memory (estimated size 6.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_39_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 6.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_38_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 6.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 39 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 1 missing tasks from ResultStage 16 (MapPartitionsRDD[58] at $anonfun$withThreadLocalCaptured$1 at :0) (first 15 tasks are for partitions Vector(0)) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 16.0 with 1 tasks resource profile 0 -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 16.0 (TID 30) (10.1.125.71, executor 1, partition 0, NODE_LOCAL, 9018 bytes) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_38_piece0 on 10.1.125.71:45293 in memory (size: 6.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_37_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 8.0 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_37_piece0 on 10.1.105.41:40655 in memory (size: 8.0 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_37_piece0 on 10.1.125.71:45293 in memory (size: 8.0 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_39_piece0 in memory on 10.1.125.71:45293 (size: 6.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-event-loop-0] [o.a.s.i.Logging ] Asked to send map output locations for shuffle 4 to 10.1.125.71:47656 -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Finished task 0.0 in stage 16.0 (TID 30) in 33 ms on 10.1.125.71 (executor 1) (1/1) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Removed TaskSet 16.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ResultStage 16 ($anonfun$withThreadLocalCaptured$1 at :0) finished in 0.045 s -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Job 10 is finished. Cancelling potential speculative or zombie tasks for this job -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Killing all running tasks in stage 16: Stage finished -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ subquery-5] [o.a.s.i.Logging ] Job 10 finished: $anonfun$withThreadLocalCaptured$1 at :0, took 0.049194 s -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ subquery-3] [o.a.s.i.Logging ] Code generated in 28.439949 ms -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Registering RDD 62 ($anonfun$withThreadLocalCaptured$1 at :0) as input to shuffle 6 -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got map stage job 11 ($anonfun$withThreadLocalCaptured$1 at :0) with 8 output partitions -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ShuffleMapStage 17 ($anonfun$withThreadLocalCaptured$1 at :0) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List() -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ShuffleMapStage 17 (MapPartitionsRDD[62] at $anonfun$withThreadLocalCaptured$1 at :0), which has no missing parents -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_40 stored as values in memory (estimated size 67.7 KiB, free 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_39_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 6.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_40_piece0 stored as bytes in memory (estimated size 28.1 KiB, free 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_40_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 28.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_39_piece0 on 10.1.125.71:45293 in memory (size: 6.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 40 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 8 missing tasks from ShuffleMapStage 17 (MapPartitionsRDD[62] at $anonfun$withThreadLocalCaptured$1 at :0) (first 15 tasks are for partitions Vector(0, 1, 2, 3, 4, 5, 6, 7)) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 17.0 with 8 tasks resource profile 0 -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 17.0 (TID 31) (10.1.105.41, executor 2, partition 0, PROCESS_LOCAL, 15543 bytes) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 1.0 in stage 17.0 (TID 32) (10.1.125.71, executor 1, partition 1, PROCESS_LOCAL, 15552 bytes) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 2.0 in stage 17.0 (TID 33) (10.1.105.41, executor 2, partition 2, PROCESS_LOCAL, 15331 bytes) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 3.0 in stage 17.0 (TID 34) (10.1.125.71, executor 1, partition 3, PROCESS_LOCAL, 15331 bytes) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 4.0 in stage 17.0 (TID 35) (10.1.105.41, executor 2, partition 4, PROCESS_LOCAL, 15322 bytes) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 5.0 in stage 17.0 (TID 36) (10.1.125.71, executor 1, partition 5, PROCESS_LOCAL, 15552 bytes) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 6.0 in stage 17.0 (TID 37) (10.1.105.41, executor 2, partition 6, PROCESS_LOCAL, 15964 bytes) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 7.0 in stage 17.0 (TID 38) (10.1.125.71, executor 1, partition 7, PROCESS_LOCAL, 15024 bytes) -[2025-08-04, 10:06:41 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_40_piece0 in memory on 10.1.125.71:45293 (size: 28.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:41 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_40_piece0 in memory on 10.1.105.41:40655 (size: 28.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ subquery-3] [o.a.s.i.Logging ] Code generated in 47.745466 ms -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Registering RDD 66 ($anonfun$withThreadLocalCaptured$1 at :0) as input to shuffle 7 -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got map stage job 12 ($anonfun$withThreadLocalCaptured$1 at :0) with 8 output partitions -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ShuffleMapStage 18 ($anonfun$withThreadLocalCaptured$1 at :0) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List() -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ShuffleMapStage 18 (MapPartitionsRDD[66] at $anonfun$withThreadLocalCaptured$1 at :0), which has no missing parents -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_41 stored as values in memory (estimated size 71.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_42 stored as values in memory (estimated size 40.0 B, free 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_41_piece0 stored as bytes in memory (estimated size 29.6 KiB, free 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_41_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 29.6 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_42_piece0 stored as bytes in memory (estimated size 86.0 B, free 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 41 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_42_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 86.0 B, free: 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 42 from sql at :0 -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ Thread-4] [o.a.i.s.s.SparkCopyOnWriteScan ] 0 of 0 task(s) for table dremio.successpaymentmetrics matched runtime file filter with 0 location(s) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_43 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 8 missing tasks from ShuffleMapStage 18 (MapPartitionsRDD[66] at $anonfun$withThreadLocalCaptured$1 at :0) (first 15 tasks are for partitions Vector(0, 1, 2, 3, 4, 5, 6, 7)) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 18.0 with 8 tasks resource profile 0 -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_43_piece0 stored as bytes in memory (estimated size 31.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_43_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 43 from broadcast at SparkBatch.java:85 -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ Thread-4] [o.a.s.i.Logging ] spark.sql.codegen.aggregate.map.twolevel.enabled is set to true, but current version of codegened fast hashmap does not support this aggregate. -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ Thread-4] [o.a.s.i.Logging ] Code generated in 19.434293 ms -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Registering RDD 73 (sql at :0) as input to shuffle 8 -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got map stage job 13 (sql at :0) with 8 output partitions -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ShuffleMapStage 19 (sql at :0) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List() -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ShuffleMapStage 19 (MapPartitionsRDD[73] at sql at :0), which has no missing parents -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_44 stored as values in memory (estimated size 69.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_43_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_44_piece0 stored as bytes in memory (estimated size 28.7 KiB, free 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_44_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 28.7 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 44 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 8 missing tasks from ShuffleMapStage 19 (MapPartitionsRDD[73] at sql at :0) (first 15 tasks are for partitions Vector(0, 1, 2, 3, 4, 5, 6, 7)) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 19.0 with 8 tasks resource profile 0 -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ Thread-4] [o.a.s.i.Logging ] Code generated in 27.969885 ms -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_27_piece0 in memory on 10.1.125.71:45293 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ Thread-4] [o.a.s.i.Logging ] Code generated in 18.283696 ms -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ Thread-4] [o.a.s.i.Logging ] Predicate isnotnull(gateway#39) generates partition filter: ((gateway.count#967 - gateway.nullCount#966) > 0) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ Thread-4] [o.a.s.i.Logging ] Predicate (gateway#39 = CCS) generates partition filter: ((gateway.lowerBound#965 <= CCS) AND (CCS <= gateway.upperBound#964)) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Registering RDD 81 (sql at :0) as input to shuffle 9 -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got map stage job 14 (sql at :0) with 3 output partitions -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ShuffleMapStage 20 (sql at :0) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List() -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ShuffleMapStage 20 (MapPartitionsRDD[81] at sql at :0), which has no missing parents -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_45 stored as values in memory (estimated size 51.5 KiB, free 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_45_piece0 stored as bytes in memory (estimated size 21.8 KiB, free 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_45_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 21.8 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 45 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 3 missing tasks from ShuffleMapStage 20 (MapPartitionsRDD[81] at sql at :0) (first 15 tasks are for partitions Vector(0, 1, 2)) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 20.0 with 3 tasks resource profile 0 -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_17_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 86.0 B, free: 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_15_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 567.0 B, free: 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 18.0 (TID 39) (10.1.125.71, executor 1, partition 0, PROCESS_LOCAL, 15543 bytes) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Finished task 7.0 in stage 17.0 (TID 38) in 436 ms on 10.1.125.71 (executor 1) (1/8) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_41_piece0 in memory on 10.1.125.71:45293 (size: 29.6 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_27_piece0 in memory on 10.1.105.41:40655 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_35_piece0 in memory on 10.1.125.71:45293 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 1.0 in stage 18.0 (TID 40) (10.1.125.71, executor 1, partition 1, PROCESS_LOCAL, 15331 bytes) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Finished task 1.0 in stage 17.0 (TID 32) in 639 ms on 10.1.125.71 (executor 1) (2/8) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 2.0 in stage 18.0 (TID 41) (10.1.125.71, executor 1, partition 2, PROCESS_LOCAL, 15331 bytes) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Finished task 5.0 in stage 17.0 (TID 36) in 683 ms on 10.1.125.71 (executor 1) (3/8) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 3.0 in stage 18.0 (TID 42) (10.1.125.71, executor 1, partition 3, PROCESS_LOCAL, 15331 bytes) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Finished task 3.0 in stage 17.0 (TID 34) in 692 ms on 10.1.125.71 (executor 1) (4/8) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 4.0 in stage 18.0 (TID 43) (10.1.105.41, executor 2, partition 4, PROCESS_LOCAL, 15322 bytes) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Finished task 6.0 in stage 17.0 (TID 37) in 827 ms on 10.1.105.41 (executor 2) (5/8) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 5.0 in stage 18.0 (TID 44) (10.1.105.41, executor 2, partition 5, PROCESS_LOCAL, 15755 bytes) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Finished task 2.0 in stage 17.0 (TID 33) in 839 ms on 10.1.105.41 (executor 2) (6/8) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_41_piece0 in memory on 10.1.105.41:40655 (size: 29.6 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:42 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 6.0 in stage 18.0 (TID 45) (10.1.105.41, executor 2, partition 6, PROCESS_LOCAL, 15964 bytes) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Finished task 0.0 in stage 17.0 (TID 31) in 849 ms on 10.1.105.41 (executor 2) (7/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 7.0 in stage 18.0 (TID 46) (10.1.125.71, executor 1, partition 7, PROCESS_LOCAL, 15024 bytes) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Finished task 0.0 in stage 18.0 (TID 39) in 503 ms on 10.1.125.71 (executor 1) (1/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 19.0 (TID 47) (10.1.105.41, executor 2, partition 0, PROCESS_LOCAL, 15543 bytes) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Finished task 4.0 in stage 17.0 (TID 35) in 945 ms on 10.1.105.41 (executor 2) (8/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Removed TaskSet 17.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ShuffleMapStage 17 ($anonfun$withThreadLocalCaptured$1 at :0) finished in 0.965 s -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] looking for newly runnable stages -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] running: Set(ShuffleMapStage 19, ShuffleMapStage 20, ShuffleMapStage 18) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] waiting: Set() -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] failed: Set() -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_44_piece0 in memory on 10.1.105.41:40655 (size: 28.7 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_35_piece0 in memory on 10.1.105.41:40655 (size: 31.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 1.0 in stage 19.0 (TID 48) (10.1.125.71, executor 1, partition 1, PROCESS_LOCAL, 15331 bytes) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Finished task 1.0 in stage 18.0 (TID 40) in 383 ms on 10.1.125.71 (executor 1) (2/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:42 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_44_piece0 in memory on 10.1.125.71:45293 (size: 28.7 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 2.0 in stage 19.0 (TID 49) (10.1.125.71, executor 1, partition 2, PROCESS_LOCAL, 15331 bytes) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Finished task 2.0 in stage 18.0 (TID 41) in 382 ms on 10.1.125.71 (executor 1) (3/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_34_piece0 in memory on 10.1.105.41:40655 (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 3.0 in stage 19.0 (TID 50) (10.1.125.71, executor 1, partition 3, PROCESS_LOCAL, 15331 bytes) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Finished task 7.0 in stage 18.0 (TID 46) in 205 ms on 10.1.125.71 (executor 1) (4/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_34_piece0 in memory on 10.1.125.71:45293 (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 4.0 in stage 19.0 (TID 51) (10.1.125.71, executor 1, partition 4, PROCESS_LOCAL, 15322 bytes) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Finished task 3.0 in stage 18.0 (TID 42) in 480 ms on 10.1.125.71 (executor 1) (5/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 5.0 in stage 19.0 (TID 52) (10.1.105.41, executor 2, partition 5, PROCESS_LOCAL, 15755 bytes) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Finished task 6.0 in stage 18.0 (TID 45) in 486 ms on 10.1.105.41 (executor 2) (6/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 6.0 in stage 19.0 (TID 53) (10.1.105.41, executor 2, partition 6, PROCESS_LOCAL, 15964 bytes) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Finished task 5.0 in stage 18.0 (TID 44) in 523 ms on 10.1.105.41 (executor 2) (7/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 7.0 in stage 19.0 (TID 54) (10.1.105.41, executor 2, partition 7, PROCESS_LOCAL, 15024 bytes) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Finished task 4.0 in stage 18.0 (TID 43) in 569 ms on 10.1.105.41 (executor 2) (8/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Removed TaskSet 18.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ShuffleMapStage 18 ($anonfun$withThreadLocalCaptured$1 at :0) finished in 1.341 s -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] looking for newly runnable stages -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] running: Set(ShuffleMapStage 19, ShuffleMapStage 20) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] waiting: Set() -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] failed: Set() -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ Thread-4] [o.a.s.i.Logging ] For shuffle(7), advisory target size: 67108864, actual target size 1048576, minimum partition size: 1048576 -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 20.0 (TID 55) (10.1.105.41, executor 2, partition 0, PROCESS_LOCAL, 15603 bytes) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Finished task 0.0 in stage 19.0 (TID 47) in 478 ms on 10.1.105.41 (executor 2) (1/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ Thread-4] [o.a.s.i.Logging ] spark.sql.codegen.aggregate.map.twolevel.enabled is set to true, but current version of codegened fast hashmap does not support this aggregate. -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_45_piece0 in memory on 10.1.105.41:40655 (size: 21.8 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ Thread-4] [o.a.s.i.Logging ] Code generated in 16.59187 ms -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Registering RDD 85 (sql at :0) as input to shuffle 10 -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got map stage job 15 (sql at :0) with 1 output partitions -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ShuffleMapStage 22 (sql at :0) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List(ShuffleMapStage 21) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ShuffleMapStage 22 (MapPartitionsRDD[85] at sql at :0), which has no missing parents -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_46 stored as values in memory (estimated size 73.8 KiB, free 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_41_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 29.6 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_46_piece0 stored as bytes in memory (estimated size 30.7 KiB, free 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_46_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 30.7 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 46 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_41_piece0 on 10.1.125.71:45293 in memory (size: 29.6 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_41_piece0 on 10.1.105.41:40655 in memory (size: 29.6 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 1 missing tasks from ShuffleMapStage 22 (MapPartitionsRDD[85] at sql at :0) (first 15 tasks are for partitions Vector(0)) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 22.0 with 1 tasks resource profile 0 -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_40_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 28.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_40_piece0 on 10.1.125.71:45293 in memory (size: 28.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_40_piece0 on 10.1.105.41:40655 in memory (size: 28.1 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 2.0 in stage 20.0 (TID 56) (10.1.105.41, executor 2, partition 2, PROCESS_LOCAL, 15093 bytes) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Finished task 0.0 in stage 20.0 (TID 55) in 74 ms on 10.1.105.41 (executor 2) (1/3) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 22.0 (TID 57) (10.1.105.41, executor 2, partition 0, NODE_LOCAL, 9007 bytes) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Finished task 2.0 in stage 20.0 (TID 56) in 27 ms on 10.1.105.41 (executor 2) (2/3) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_46_piece0 in memory on 10.1.105.41:40655 (size: 30.7 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 1.0 in stage 20.0 (TID 58) (10.1.125.71, executor 1, partition 1, PROCESS_LOCAL, 15603 bytes) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Finished task 1.0 in stage 19.0 (TID 48) in 521 ms on 10.1.125.71 (executor 1) (2/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Finished task 2.0 in stage 19.0 (TID 49) in 477 ms on 10.1.125.71 (executor 1) (3/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_45_piece0 in memory on 10.1.125.71:45293 (size: 21.8 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Finished task 7.0 in stage 19.0 (TID 54) in 198 ms on 10.1.105.41 (executor 2) (4/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Finished task 1.0 in stage 20.0 (TID 58) in 70 ms on 10.1.125.71 (executor 1) (3/3) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Removed TaskSet 20.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ShuffleMapStage 20 (sql at :0) finished in 1.350 s -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] looking for newly runnable stages -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] running: Set(ShuffleMapStage 19, ShuffleMapStage 22) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] waiting: Set() -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] failed: Set() -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Finished task 3.0 in stage 19.0 (TID 50) in 482 ms on 10.1.125.71 (executor 1) (5/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Finished task 4.0 in stage 19.0 (TID 51) in 460 ms on 10.1.125.71 (executor 1) (6/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Finished task 6.0 in stage 19.0 (TID 53) in 369 ms on 10.1.105.41 (executor 2) (7/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dispatcher-event-loop-3] [o.a.s.i.Logging ] Asked to send map output locations for shuffle 7 to 10.1.105.41:46638 -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_45_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 21.8 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_45_piece0 on 10.1.105.41:40655 in memory (size: 21.8 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_45_piece0 on 10.1.125.71:45293 in memory (size: 21.8 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Finished task 5.0 in stage 19.0 (TID 52) in 456 ms on 10.1.105.41 (executor 2) (8/8) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-0] [o.a.s.i.Logging ] Removed TaskSet 19.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ShuffleMapStage 19 (sql at :0) finished in 1.655 s -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] looking for newly runnable stages -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] running: Set(ShuffleMapStage 22) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] waiting: Set() -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] failed: Set() -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Finished task 0.0 in stage 22.0 (TID 57) in 462 ms on 10.1.105.41 (executor 2) (1/1) -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ task-result-getter-2] [o.a.s.i.Logging ] Removed TaskSet 22.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ShuffleMapStage 22 (sql at :0) finished in 0.527 s -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] looking for newly runnable stages -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] running: Set() -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] waiting: Set() -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] failed: Set() -[2025-08-04, 10:06:43 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ Thread-4] [o.a.s.i.Logging ] For shuffle(10), advisory target size: 67108864, actual target size 1048576, minimum partition size: 1048576 -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ broadcast-exchange-1] [o.a.s.i.Logging ] Code generated in 8.421513 ms -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:43 INFO [ broadcast-exchange-1] [o.a.s.i.Logging ] Code generated in 8.462148 ms -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ broadcast-exchange-1] [o.a.s.i.Logging ] Starting job: $anonfun$withThreadLocalCaptured$1 at :0 -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got job 16 ($anonfun$withThreadLocalCaptured$1 at :0) with 1 output partitions -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ResultStage 25 ($anonfun$withThreadLocalCaptured$1 at :0) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List(ShuffleMapStage 24) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ResultStage 25 (MapPartitionsRDD[91] at $anonfun$withThreadLocalCaptured$1 at :0), which has no missing parents -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_47 stored as values in memory (estimated size 77.9 KiB, free 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_47_piece0 stored as bytes in memory (estimated size 32.2 KiB, free 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_46_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 30.7 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_47_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 32.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 47 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_46_piece0 on 10.1.105.41:40655 in memory (size: 30.7 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 1 missing tasks from ResultStage 25 (MapPartitionsRDD[91] at $anonfun$withThreadLocalCaptured$1 at :0) (first 15 tasks are for partitions Vector(0)) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 25.0 with 1 tasks resource profile 0 -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 25.0 (TID 59) (10.1.105.41, executor 2, partition 0, NODE_LOCAL, 9018 bytes) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_44_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 28.7 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_44_piece0 on 10.1.125.71:45293 in memory (size: 28.7 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_44_piece0 on 10.1.105.41:40655 in memory (size: 28.7 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_47_piece0 in memory on 10.1.105.41:40655 (size: 32.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dispatcher-event-loop-2] [o.a.s.i.Logging ] Asked to send map output locations for shuffle 10 to 10.1.105.41:46638 -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Finished task 0.0 in stage 25.0 (TID 59) in 321 ms on 10.1.105.41 (executor 2) (1/1) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ task-result-getter-3] [o.a.s.i.Logging ] Removed TaskSet 25.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ResultStage 25 ($anonfun$withThreadLocalCaptured$1 at :0) finished in 0.338 s -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Job 16 is finished. Cancelling potential speculative or zombie tasks for this job -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Killing all running tasks in stage 25: Stage finished -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ broadcast-exchange-1] [o.a.s.i.Logging ] Job 16 finished: $anonfun$withThreadLocalCaptured$1 at :0, took 0.344906 s -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ broadcast-exchange-1] [o.a.s.i.Logging ] Code generated in 4.560286 ms -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_47_piece0 on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 in memory (size: 32.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Removed broadcast_47_piece0 on 10.1.105.41:40655 in memory (size: 32.2 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ broadcast-exchange-1] [o.a.s.i.Logging ] Block broadcast_48_piece0 stored as bytes in memory (estimated size 384.0 B, free 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_48_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 384.0 B, free: 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ broadcast-exchange-1] [o.a.s.i.Logging ] Created broadcast 48 from $anonfun$withThreadLocalCaptured$1 at :0 -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ Thread-4] [o.a.s.i.Logging ] For shuffle(9, 8), advisory target size: 67108864, actual target size 1048576, minimum partition size: 1048576 -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ Thread-4] [o.a.s.i.Logging ] Code generated in 5.679677 ms -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ Thread-4] [o.a.s.i.Logging ] Code generated in 20.913584 ms -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ Thread-4] [o.a.s.i.Logging ] spark.sql.codegen.aggregate.map.twolevel.enabled is set to true, but current version of codegened fast hashmap does not support this aggregate. -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ Thread-4] [o.a.s.i.Logging ] Code generated in 10.956142 ms -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ Thread-4] [o.a.s.i.Logging ] spark.sql.codegen.aggregate.map.twolevel.enabled is set to true, but current version of codegened fast hashmap does not support this aggregate. -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ Thread-4] [o.a.s.i.Logging ] Code generated in 15.348835 ms -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_49 stored as values in memory (estimated size 32.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ Thread-4] [o.a.s.i.Logging ] Block broadcast_49_piece0 stored as bytes in memory (estimated size 31.3 KiB, free 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_49_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ Thread-4] [o.a.s.i.Logging ] Created broadcast 49 from broadcast at SparkWrite.java:193 -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ Thread-4] [o.a.s.i.Logging ] Start processing data source write support: IcebergBatchWrite(table=dremio.successpaymentmetrics, format=PARQUET). The input RDD has 1 partitions. -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ Thread-4] [o.a.s.i.Logging ] Starting job: sql at :0 -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Got job 17 (sql at :0) with 1 output partitions -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Final stage: ResultStage 28 (sql at :0) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Parents of final stage: List(ShuffleMapStage 27, ShuffleMapStage 26) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Missing parents: List() -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting ResultStage 28 (MapPartitionsRDD[99] at sql at :0), which has no missing parents -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_50 stored as values in memory (estimated size 163.0 KiB, free 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Block broadcast_50_piece0 stored as bytes in memory (estimated size 63.6 KiB, free 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_50_piece0 in memory on main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:7079 (size: 63.6 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Created broadcast 50 from broadcast at DAGScheduler.scala:1585 -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Submitting 1 missing tasks from ResultStage 28 (MapPartitionsRDD[99] at sql at :0) (first 15 tasks are for partitions Vector(0)) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Adding task set 28.0 with 1 tasks resource profile 0 -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Starting task 0.0 in stage 28.0 (TID 60) (10.1.125.71, executor 1, partition 0, NODE_LOCAL, 9300 bytes) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_50_piece0 in memory on 10.1.125.71:45293 (size: 63.6 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:44 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dispatcher-event-loop-1] [o.a.s.i.Logging ] Asked to send map output locations for shuffle 9 to 10.1.125.71:47656 -[2025-08-04, 10:06:45 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:44 INFO [ dispatcher-event-loop-7] [o.a.s.i.Logging ] Asked to send map output locations for shuffle 8 to 10.1.125.71:47656 -[2025-08-04, 10:06:45 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:45 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_48_piece0 in memory on 10.1.125.71:45293 (size: 384.0 B, free: 2.2 GiB) -[2025-08-04, 10:06:45 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:45 INFO [ dispatcher-BlockManagerMaster] [o.a.s.i.Logging ] Added broadcast_49_piece0 in memory on 10.1.125.71:45293 (size: 31.3 KiB, free: 2.2 GiB) -[2025-08-04, 10:06:45 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:45 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Finished task 0.0 in stage 28.0 (TID 60) in 671 ms on 10.1.125.71 (executor 1) (1/1) -[2025-08-04, 10:06:45 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:45 INFO [ task-result-getter-1] [o.a.s.i.Logging ] Removed TaskSet 28.0, whose tasks have all completed, from pool -[2025-08-04, 10:06:45 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:45 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] ResultStage 28 (sql at :0) finished in 0.686 s -[2025-08-04, 10:06:45 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:45 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Job 17 is finished. Cancelling potential speculative or zombie tasks for this job -[2025-08-04, 10:06:45 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:45 INFO [ dag-scheduler-event-loop] [o.a.s.i.Logging ] Killing all running tasks in stage 28: Stage finished -[2025-08-04, 10:06:45 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:45 INFO [ Thread-4] [o.a.s.i.Logging ] Job 17 finished: sql at :0, took 0.693099 s -[2025-08-04, 10:06:45 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:45 INFO [ Thread-4] [o.a.s.i.Logging ] Data source write support IcebergBatchWrite(table=dremio.successpaymentmetrics, format=PARQUET) is committing. -[2025-08-04, 10:06:45 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:45 INFO [ Thread-4] [o.a.i.s.s.SparkWrite ] Committing overwrite of 0 data files with 1 new data files, scanSnapshotId: 8867521827718512735, conflictDetectionFilter: true to table dremio.successpaymentmetrics -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:45 INFO [ Thread-4] [o.a.i.h.HadoopTableOperations ] Committed a new metadata file s3://experience-360/successpaymentmetrics/metadata/v31.metadata.json -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ Thread-4] [o.a.i.SnapshotProducer ] Committed snapshot 2976153908932583450 (BaseOverwriteFiles) -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ Thread-4] [o.a.i.m.LoggingMetricsReporter ] Received metrics report: CommitReport{tableName=dremio.successpaymentmetrics, snapshotId=2976153908932583450, sequenceNumber=31, operation=append, commitMetrics=CommitMetricsResult{totalDuration=TimerResult{timeUnit=NANOSECONDS, totalDuration=PT1.038228797S, count=1}, attempts=CounterResult{unit=COUNT, value=1}, addedDataFiles=CounterResult{unit=COUNT, value=1}, removedDataFiles=null, totalDataFiles=CounterResult{unit=COUNT, value=1}, addedDeleteFiles=null, addedEqualityDeleteFiles=null, addedPositionalDeleteFiles=null, addedDVs=null, removedDeleteFiles=null, removedEqualityDeleteFiles=null, removedPositionalDeleteFiles=null, removedDVs=null, totalDeleteFiles=CounterResult{unit=COUNT, value=0}, addedRecords=CounterResult{unit=COUNT, value=18}, removedRecords=null, totalRecords=CounterResult{unit=COUNT, value=18}, addedFilesSizeInBytes=CounterResult{unit=BYTES, value=2308}, removedFilesSizeInBytes=null, totalFilesSizeInBytes=CounterResult{unit=BYTES, value=2308}, addedPositionalDeletes=null, removedPositionalDeletes=null, totalPositionalDeletes=CounterResult{unit=COUNT, value=0}, addedEqualityDeletes=null, removedEqualityDeletes=null, totalEqualityDeletes=CounterResult{unit=COUNT, value=0}}, metadata={engine-version=3.5.2, app-id=spark-2157c1fc2dae4114a9e062bfdda9d4a3, engine-name=spark, iceberg-version=Apache Iceberg 1.8.1 (commit 9ce0fcf0af7becf25ad9fc996c3bad2afdcfd33d)}} -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ Thread-4] [o.a.i.s.s.SparkWrite ] Committed in 1188 ms -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ Thread-4] [o.a.s.i.Logging ] Data source write support IcebergBatchWrite(table=dremio.successpaymentmetrics, format=PARQUET) committed. -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] INFO:py4j.clientserver:Closing down clientserver connection -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ main] [o.a.s.i.Logging ] SparkContext is stopping with exitCode 0. -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ main] [o.s.j.s.AbstractConnector ] Stopped Spark@d3a9bc8{HTTP/1.1, (http/1.1)}{0.0.0.0:4040} -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ main] [o.a.s.i.Logging ] Stopped Spark web UI at http://main-x2zblobw-da7e479874862258-driver-svc.dev2.svc:4040 -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ main] [o.a.s.i.Logging ] Shutting down all executors -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [patcher-CoarseGrainedScheduler] [o.a.s.i.Logging ] Asking each executor to shut down -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 WARN [ -723978205-pool-20-thread-2] [o.a.s.i.Logging ] Kubernetes client has been closed. -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ dispatcher-event-loop-0] [o.a.s.i.Logging ] MapOutputTrackerMasterEndpoint stopped! -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ main] [o.a.s.i.Logging ] MemoryStore cleared -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ main] [o.a.s.i.Logging ] BlockManager stopped -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ main] [o.a.s.i.Logging ] BlockManagerMaster stopped -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ dispatcher-event-loop-2] [o.a.s.i.Logging ] OutputCommitCoordinator stopped! -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ main] [o.a.s.i.Logging ] Successfully stopped SparkContext -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ shutdown-hook-0] [o.a.s.i.Logging ] Shutdown hook called -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ shutdown-hook-0] [o.a.s.i.Logging ] Deleting directory /tmp/spark-87940d90-cce1-4d90-b080-dfd9d5d83e87 -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ shutdown-hook-0] [o.a.s.i.Logging ] Deleting directory /var/data/spark-77925c74-10d6-43b7-877b-9e0a03bd8e58/spark-adf0003f-64b6-446c-bb61-6ff613671662/pyspark-bf5aa326-fb34-4475-9a56-f176a8828c60 -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ shutdown-hook-0] [o.a.s.i.Logging ] Deleting directory /var/data/spark-77925c74-10d6-43b7-877b-9e0a03bd8e58/spark-adf0003f-64b6-446c-bb61-6ff613671662 -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ shutdown-hook-0] [o.a.h.m.i.MetricsSystemImpl ] Stopping s3a-file-system metrics system... -[2025-08-04, 10:06:46 UTC] {pod_manager.py:472} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ shutdown-hook-0] [o.a.h.m.i.MetricsSystemImpl ] s3a-file-system metrics system stopped. -[2025-08-04, 10:06:47 UTC] {pod_manager.py:490} INFO - [spark-kubernetes-driver] 2025-08-04 10:06:46 INFO [ shutdown-hook-0] [o.a.h.m.i.MetricsSystemImpl ] s3a-file-system metrics system shutdown complete. -[2025-08-04, 10:06:47 UTC] {pod_manager.py:623} INFO - Pod main-x2zblobw-driver has phase Running -[2025-08-04, 10:06:50 UTC] {spark_kubernetes.py:256} INFO - Deleting spark job: main-x2zblobw \ No newline at end of file