| Stage Id ▾ | Description | Submitted | Duration | Tasks: Succeeded/Total | Input | Output | Shuffle Read | Shuffle Write |
|---|---|---|---|---|---|---|---|---|
| 18 | Spark Connect - session_id: "61ec31aa-ec60-4847-8384-fe1f77644a71"
user_context {
user_id: "iqran"
}
plan {
command {
sql_command {
... Spark Connect - session_id: "61ec31aa-ec60-4847-8384-fe1f77644a71"
user_context {
user_id: "iqran"
}
plan {
command {
sql_command {
... session_id: "61ec31aa-ec60-4847-8384-fe1f77644a71"
user_context {
user_id: "iqran"
}
plan {
command {
sql_command {
input {
common {
plan_id: 23
}
sql {
query: "\nCREATE TABLE bronze.test.test \nUSING iceberg \nAS\nWITH base_data AS (\n -- Generates 1,000 initial rows\n SELECT \n id as raw_id,\n uuid() as session_id,\n CAST(rand() * 10000 AS INT) as category_id,\n CASE WHEN rand() > 0.5 THEN \'ACTIVE\' ELSE \'INACTIVE\' END as status,\n current_timestamp() as ts\n FROM range(1000)\n),\nexploded_data AS (\n -- Cross join to explode 1,000 rows to 1,000,000 rows\n -- Increase the range(1000) to 10000 if you want 10M rows\n SELECT a.* \n FROM base_data a\n CROSS JOIN (SELECT id FROM range(1000)) b\n)\nSELECT \n raw_id,\n session_id,\n -- Complex hashing to stress CPU/Executors\n sha2(concat(CAST(raw_id AS STRING), session_id), 256) as record_hash,\n sha2(reverse(session_id), 512) as security_token,\n -- Analytical transformations\n category_id,\n status,\n ts,\n -- Add some junk data to increase file size (S3 stress)\n repeat(sha2(CAST(raw_id AS STRING), 256), 5) as padding_data\nFROM exploded_data;\n"
}
}
}
}
}
client_type: "_SPARK_CONNECT_PYTHON spark/4.1.1 os/darwin python/3.10.18"
request_options {
result_chunking_options {
allow_arrow_batch_chunking: true
}
}
request_options {
reattach_options {
reattachable: true
}
}
operation_id: "02e1d862-d2a1-47eb-8234-735f3dc89c10"
client_observed_server_side_session_id: "6d58c63b-fd77-4e70-ac9c-f6d1a7db4baf"
| 2026/05/11 04:44:54 | 0.8 s |
8/8
| 327.8 KiB |