digraph G {
0 [labelType="html" label="<br><b>AdaptiveSparkPlan</b><br><br>"];
subgraph cluster1 {
isCluster="true";
label="WholeStageCodegen (2)\n \nduration: 13 ms";
2 [labelType="html" label="<b>HashAggregate</b><br><br>time in aggregation build: 13 ms<br>number of output rows: 1"];
}
3 [labelType="html" label="<b>Exchange</b><br><br>shuffle records written: 200<br>local merged chunks fetched: 0<br>shuffle write time total (min, med, max (stageId: taskId))<br>31 ms (0 ms, 0 ms, 0 ms (stage 119294.0: task 21068505))<br>remote merged bytes read: 0.0 B<br>local merged blocks fetched: 0<br>corrupt merged block chunks: 0<br>remote merged reqs duration: 0 ms<br>remote merged blocks fetched: 0<br>records read: 200<br>local bytes read: 4.1 KiB<br>fetch wait time: 0 ms<br>remote bytes read: 9.9 KiB<br>merged fetch fallback count: 0<br>local blocks read: 59<br>remote merged chunks fetched: 0<br>remote blocks read: 141<br>data size total (min, med, max (stageId: taskId))<br>3.1 KiB (16.0 B, 16.0 B, 16.0 B (stage 119294.0: task 21068379))<br>local merged bytes read: 0.0 B<br>number of partitions: 1<br>remote reqs duration: 19 ms<br>remote bytes read to disk: 0.0 B<br>shuffle bytes written total (min, med, max (stageId: taskId))<br>14.1 KiB (72.0 B, 72.0 B, 72.0 B (stage 119294.0: task 21068379))"];
subgraph cluster4 {
isCluster="true";
label="WholeStageCodegen (1)\n \nduration: total (min, med, max (stageId: taskId))\n0 ms (0 ms, 0 ms, 0 ms (stage 119294.0: task 21068379))";
5 [labelType="html" label="<b>HashAggregate</b><br><br>time in aggregation build total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 119294.0: task 21068379))<br>number of output rows: 200"];
6 [labelType="html" label="<br><b>Project</b><br><br>"];
7 [labelType="html" label="<b>Filter</b><br><br>number of output rows: 0"];
}
8 [labelType="html" label="<b>InMemoryTableScan</b><br><br>number of output rows: 0"];
9 [labelType="html" label="<br><b>AdaptiveSparkPlan</b><br><br>"];
subgraph cluster10 {
isCluster="true";
label="WholeStageCodegen (2)\n \nduration: total (min, med, max (stageId: taskId))\n0 ms (0 ms, 0 ms, 0 ms (stage 119291.0: task 21068180))";
11 [labelType="html" label="<br><b>SerializeFromObject</b><br><br>"];
}
12 [labelType="html" label="<br><b>MapGroups</b><br><br>"];
subgraph cluster13 {
isCluster="true";
label="WholeStageCodegen (1)\n \nduration: total (min, med, max (stageId: taskId))\n0 ms (0 ms, 0 ms, 0 ms (stage 119291.0: task 21068180))";
14 [labelType="html" label="<b>Sort</b><br><br>sort time total (min, med, max (stageId: taskId))<br>0 ms (0 ms, 0 ms, 0 ms (stage 119291.0: task 21068180))<br>peak memory total (min, med, max (stageId: taskId))<br>12.5 MiB (64.0 KiB, 64.0 KiB, 64.0 KiB (stage 119291.0: task 21068180))<br>spill size total (min, med, max (stageId: taskId))<br>0.0 B (0.0 B, 0.0 B, 0.0 B (stage 119291.0: task 21068180))"];
}
15 [labelType="html" label="<b>Exchange</b><br><br>shuffle records written: 0<br>local merged chunks fetched: 0<br>shuffle write time: 0 ms<br>remote merged bytes read: 0.0 B<br>local merged blocks fetched: 0<br>corrupt merged block chunks: 0<br>remote merged reqs duration: 0 ms<br>remote merged blocks fetched: 0<br>records read: 0<br>local bytes read: 0.0 B<br>fetch wait time: 0 ms<br>remote bytes read: 0.0 B<br>merged fetch fallback count: 0<br>local blocks read: 0<br>remote merged chunks fetched: 0<br>remote blocks read: 0<br>data size: 0.0 B<br>local merged bytes read: 0.0 B<br>number of partitions: 200<br>remote reqs duration: 0 ms<br>remote bytes read to disk: 0.0 B<br>shuffle bytes written: 0.0 B"];
16 [labelType="html" label="<br><b>AppendColumnsWithObject</b><br><br>"];
17 [labelType="html" label="<b>Scan</b><br><br>number of output rows: 0"];
2->0;
3->2;
5->3;
6->5;
7->6;
8->7;
9->8;
11->9;
12->11;
14->12;
15->14;
16->15;
17->16;
}
18
AdaptiveSparkPlan isFinalPlan=true
HashAggregate(keys=[], functions=[count(1)])
WholeStageCodegen (2)
Exchange SinglePartition, ENSURE_REQUIREMENTS, [plan_id=2967036]
HashAggregate(keys=[], functions=[partial_count(1)])
Project
Filter isDir#3280421: boolean
WholeStageCodegen (1)
InMemoryTableScan [isDir#3280421], [isDir#3280421]
AdaptiveSparkPlan isFinalPlan=true
SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).path, true, false, true) AS path#3280419, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).length AS length#3280420L, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).isDir AS isDir#3280421, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).modificationTime AS modificationTime#3280422L]
WholeStageCodegen (2)
MapGroups org.apache.spark.sql.KeyValueGroupedDataset$$Lambda$6934/0x0000000801d21280@ecda8fc, value#3280413.toString, newInstance(class org.apache.spark.sql.delta.SerializableFileStatus), [value#3280413], [path#3280403, length#3280404L, isDir#3280405, modificationTime#3280406L], obj#3280418: org.apache.spark.sql.delta.SerializableFileStatus
Sort [value#3280413 ASC NULLS FIRST], false, 0
WholeStageCodegen (1)
Exchange hashpartitioning(value#3280413, 200), ENSURE_REQUIREMENTS, [plan_id=2966955]
AppendColumnsWithObject org.apache.spark.sql.delta.commands.VacuumCommand$$$Lambda$6931/0x0000000801d1d990@4e6d2c38, [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).path, true, false, true) AS path#3280403, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).length AS length#3280404L, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).isDir AS isDir#3280405, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).modificationTime AS modificationTime#3280406L], [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false, true) AS value#3280413]
Scan[obj#3280402]
== Physical Plan ==
AdaptiveSparkPlan (26)
+- == Final Plan ==
* HashAggregate (20)
+- ShuffleQueryStage (19), Statistics(sizeInBytes=3.1 KiB, rowCount=200)
+- Exchange (18)
+- * HashAggregate (17)
+- * Project (16)
+- * Filter (15)
+- TableCacheQueryStage (14), Statistics(sizeInBytes=0.0 B, rowCount=0)
+- InMemoryTableScan (1)
+- InMemoryRelation (2)
+- AdaptiveSparkPlan (13)
+- == Final Plan ==
* SerializeFromObject (9)
+- MapGroups (8)
+- * Sort (7)
+- ShuffleQueryStage (6), Statistics(sizeInBytes=0.0 B, rowCount=0)
+- Exchange (5)
+- AppendColumnsWithObject (4)
+- Scan (3)
+- == Initial Plan ==
SerializeFromObject (12)
+- MapGroups (11)
+- Sort (10)
+- Exchange (5)
+- AppendColumnsWithObject (4)
+- Scan (3)
+- == Initial Plan ==
HashAggregate (25)
+- Exchange (24)
+- HashAggregate (23)
+- Project (22)
+- Filter (21)
+- InMemoryTableScan (1)
+- InMemoryRelation (2)
+- AdaptiveSparkPlan (13)
+- == Final Plan ==
* SerializeFromObject (9)
+- MapGroups (8)
+- * Sort (7)
+- ShuffleQueryStage (6), Statistics(sizeInBytes=0.0 B, rowCount=0)
+- Exchange (5)
+- AppendColumnsWithObject (4)
+- Scan (3)
+- == Initial Plan ==
SerializeFromObject (12)
+- MapGroups (11)
+- Sort (10)
+- Exchange (5)
+- AppendColumnsWithObject (4)
+- Scan (3)
(1) InMemoryTableScan
Output [1]: [isDir#3280421]
Arguments: [isDir#3280421], [isDir#3280421]
(2) InMemoryRelation
Arguments: [path#3280419, length#3280420L, isDir#3280421, modificationTime#3280422L], CachedRDDBuilder(org.apache.spark.sql.execution.columnar.DefaultCachedBatchSerializer@685dbe62,StorageLevel(disk, memory, deserialized, 1 replicas),AdaptiveSparkPlan isFinalPlan=true
+- == Final Plan ==
*(2) SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).path, true, false, true) AS path#3280419, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).length AS length#3280420L, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).isDir AS isDir#3280421, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).modificationTime AS modificationTime#3280422L]
+- MapGroups org.apache.spark.sql.KeyValueGroupedDataset$$Lambda$6934/0x0000000801d21280@ecda8fc, value#3280413.toString, newInstance(class org.apache.spark.sql.delta.SerializableFileStatus), [value#3280413], [path#3280403, length#3280404L, isDir#3280405, modificationTime#3280406L], obj#3280418: org.apache.spark.sql.delta.SerializableFileStatus
+- *(1) Sort [value#3280413 ASC NULLS FIRST], false, 0
+- ShuffleQueryStage 0
+- Exchange hashpartitioning(value#3280413, 200), ENSURE_REQUIREMENTS, [plan_id=2966955]
+- AppendColumnsWithObject org.apache.spark.sql.delta.commands.VacuumCommand$$$Lambda$6931/0x0000000801d1d990@4e6d2c38, [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).path, true, false, true) AS path#3280403, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).length AS length#3280404L, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).isDir AS isDir#3280405, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).modificationTime AS modificationTime#3280406L], [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false, true) AS value#3280413]
+- Scan[obj#3280402]
+- == Initial Plan ==
SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).path, true, false, true) AS path#3280419, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).length AS length#3280420L, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).isDir AS isDir#3280421, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).modificationTime AS modificationTime#3280422L]
+- MapGroups org.apache.spark.sql.KeyValueGroupedDataset$$Lambda$6934/0x0000000801d21280@ecda8fc, value#3280413.toString, newInstance(class org.apache.spark.sql.delta.SerializableFileStatus), [value#3280413], [path#3280403, length#3280404L, isDir#3280405, modificationTime#3280406L], obj#3280418: org.apache.spark.sql.delta.SerializableFileStatus
+- Sort [value#3280413 ASC NULLS FIRST], false, 0
+- Exchange hashpartitioning(value#3280413, 200), ENSURE_REQUIREMENTS, [plan_id=2966955]
+- AppendColumnsWithObject org.apache.spark.sql.delta.commands.VacuumCommand$$$Lambda$6931/0x0000000801d1d990@4e6d2c38, [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).path, true, false, true) AS path#3280403, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).length AS length#3280404L, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).isDir AS isDir#3280405, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).modificationTime AS modificationTime#3280406L], [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false, true) AS value#3280413]
+- Scan[obj#3280402]
,None)
(3) Scan
Output [1]: [obj#3280402]
Arguments: obj#3280402: org.apache.spark.sql.delta.SerializableFileStatus, MapPartitionsRDD[201872] at $anonfun$recordDeltaOperationInternal$1 at DatabricksLogging.scala:128
(4) AppendColumnsWithObject
Input [1]: [obj#3280402]
Arguments: org.apache.spark.sql.delta.commands.VacuumCommand$$$Lambda$6931/0x0000000801d1d990@4e6d2c38, [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).path, true, false, true) AS path#3280403, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).length AS length#3280404L, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).isDir AS isDir#3280405, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).modificationTime AS modificationTime#3280406L], [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false, true) AS value#3280413]
(5) Exchange
Input [5]: [path#3280403, length#3280404L, isDir#3280405, modificationTime#3280406L, value#3280413]
Arguments: hashpartitioning(value#3280413, 200), ENSURE_REQUIREMENTS, [plan_id=2966955]
(6) ShuffleQueryStage
Output [5]: [path#3280403, length#3280404L, isDir#3280405, modificationTime#3280406L, value#3280413]
Arguments: 0
(7) Sort [codegen id : 1]
Input [5]: [path#3280403, length#3280404L, isDir#3280405, modificationTime#3280406L, value#3280413]
Arguments: [value#3280413 ASC NULLS FIRST], false, 0
(8) MapGroups
Input [5]: [path#3280403, length#3280404L, isDir#3280405, modificationTime#3280406L, value#3280413]
Arguments: org.apache.spark.sql.KeyValueGroupedDataset$$Lambda$6934/0x0000000801d21280@ecda8fc, value#3280413.toString, newInstance(class org.apache.spark.sql.delta.SerializableFileStatus), [value#3280413], [path#3280403, length#3280404L, isDir#3280405, modificationTime#3280406L], obj#3280418: org.apache.spark.sql.delta.SerializableFileStatus
(9) SerializeFromObject [codegen id : 2]
Input [1]: [obj#3280418]
Arguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).path, true, false, true) AS path#3280419, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).length AS length#3280420L, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).isDir AS isDir#3280421, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).modificationTime AS modificationTime#3280422L]
(10) Sort
Input [5]: [path#3280403, length#3280404L, isDir#3280405, modificationTime#3280406L, value#3280413]
Arguments: [value#3280413 ASC NULLS FIRST], false, 0
(11) MapGroups
Input [5]: [path#3280403, length#3280404L, isDir#3280405, modificationTime#3280406L, value#3280413]
Arguments: org.apache.spark.sql.KeyValueGroupedDataset$$Lambda$6934/0x0000000801d21280@ecda8fc, value#3280413.toString, newInstance(class org.apache.spark.sql.delta.SerializableFileStatus), [value#3280413], [path#3280403, length#3280404L, isDir#3280405, modificationTime#3280406L], obj#3280418: org.apache.spark.sql.delta.SerializableFileStatus
(12) SerializeFromObject
Input [1]: [obj#3280418]
Arguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).path, true, false, true) AS path#3280419, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).length AS length#3280420L, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).isDir AS isDir#3280421, knownnotnull(assertnotnull(input[0, org.apache.spark.sql.delta.SerializableFileStatus, true])).modificationTime AS modificationTime#3280422L]
(13) AdaptiveSparkPlan
Output [4]: [path#3280419, length#3280420L, isDir#3280421, modificationTime#3280422L]
Arguments: isFinalPlan=true
(14) TableCacheQueryStage
Output [1]: [isDir#3280421]
Arguments: 0
(15) Filter [codegen id : 1]
Input [1]: [isDir#3280421]
Condition : isDir#3280421
(16) Project [codegen id : 1]
Output: []
Input [1]: [isDir#3280421]
(17) HashAggregate [codegen id : 1]
Input: []
Keys: []
Functions [1]: [partial_count(1)]
Aggregate Attributes [1]: [count#3280522L]
Results [1]: [count#3280523L]
(18) Exchange
Input [1]: [count#3280523L]
Arguments: SinglePartition, ENSURE_REQUIREMENTS, [plan_id=2967036]
(19) ShuffleQueryStage
Output [1]: [count#3280523L]
Arguments: 1
(20) HashAggregate [codegen id : 2]
Input [1]: [count#3280523L]
Keys: []
Functions [1]: [count(1)]
Aggregate Attributes [1]: [count(1)#3280459L]
Results [1]: [count(1)#3280459L AS count#3280460L]
(21) Filter
Input [1]: [isDir#3280421]
Condition : isDir#3280421
(22) Project
Output: []
Input [1]: [isDir#3280421]
(23) HashAggregate
Input: []
Keys: []
Functions [1]: [partial_count(1)]
Aggregate Attributes [1]: [count#3280522L]
Results [1]: [count#3280523L]
(24) Exchange
Input [1]: [count#3280523L]
Arguments: SinglePartition, ENSURE_REQUIREMENTS, [plan_id=2966974]
(25) HashAggregate
Input [1]: [count#3280523L]
Keys: []
Functions [1]: [count(1)]
Aggregate Attributes [1]: [count(1)#3280459L]
Results [1]: [count(1)#3280459L AS count#3280460L]
(26) AdaptiveSparkPlan
Output [1]: [count#3280460L]
Arguments: isFinalPlan=true