-- Complete Snowflake Warehouse Metrics Analysis Query
-- Ready to use - just change the days filter as needed
CREATE OR REPLACE TABLE FINOPS_WAREHOUSE_METRICS AS
WITH warehouse_base AS (
SELECT
warehouse_name,
warehouse_id,
COUNT(*) as total_queries,
COUNT(DISTINCT user_name) as unique_users,
SUM(COALESCE(credits_used_cloud_services, 0) + COALESCE(credits_used_compute, 0)) as total_credits,
AVG(COALESCE(credits_used_cloud_services, 0) + COALESCE(credits_used_compute, 0)) as avg_credits_per_query,
COUNT(DISTINCT DATE(start_time)) as active_days,
AVG(CASE WHEN execution_status = 'SUCCESS'
THEN DATEDIFF('second', start_time, end_time) END) as avg_execution_time_sec,
SUM(COALESCE(bytes_scanned, 0)) / (1024*1024*1024) as total_gb_scanned,
SUM(COALESCE(rows_produced, 0)) as total_rows_produced
FROM SNOWFLAKE.ACCOUNT_USAGE.QUERY_HISTORY
WHERE start_time >= DATEADD('day', -30, CURRENT_TIMESTAMP())
AND warehouse_name IS NOT NULL
AND warehouse_name != ''
GROUP BY warehouse_name, warehouse_id
),
performance_buckets AS (
SELECT
warehouse_name,
SUM(CASE WHEN COALESCE(execution_time_ms, 0) BETWEEN 0 AND 1000 THEN 1 ELSE 0 END) as queries_0_to_1_sec,
SUM(CASE WHEN COALESCE(execution_time_ms, 0) BETWEEN 1001 AND 10000 THEN 1 ELSE 0 END) as queries_1_to_10_sec,
SUM(CASE WHEN COALESCE(execution_time_ms, 0) BETWEEN 10001 AND 30000 THEN 1 ELSE 0 END) as queries_10_to_30_sec,
SUM(CASE WHEN COALESCE(execution_time_ms, 0) BETWEEN 30001 AND 60000 THEN 1 ELSE 0 END) as queries_30_to_60_sec,
SUM(CASE WHEN COALESCE(execution_time_ms, 0) BETWEEN 60001 AND 300000 THEN 1 ELSE 0 END) as queries_1_to_5_min,
SUM(CASE WHEN COALESCE(execution_time_ms, 0) > 300000 THEN 1 ELSE 0 END) as queries_5_min_plus
FROM SNOWFLAKE.ACCOUNT_USAGE.QUERY_HISTORY
WHERE start_time >= DATEADD('day', -30, CURRENT_TIMESTAMP())
AND warehouse_name IS NOT NULL
AND warehouse_name != ''
GROUP BY warehouse_name
),
bad_practices AS (
SELECT
warehouse_name,
SUM(CASE WHEN UPPER(COALESCE(query_text, '')) LIKE '%SELECT *%'
AND COALESCE(bytes_scanned, 0) > 1073741824 THEN 1 ELSE 0 END) as select_star_on_large_tables,
SUM(CASE WHEN COALESCE(partitions_scanned, 0) > COALESCE(partitions_total, 1) * 0.8
AND COALESCE(partitions_total, 0) > 10 THEN 1 ELSE 0 END) as unpartitioned_scan_queries,
SUM(CASE WHEN UPPER(COALESCE(query_text, '')) LIKE '%CROSS JOIN%'
OR UPPER(COALESCE(query_text, '')) LIKE '%CARTESIAN%' THEN 1 ELSE 0 END) as cartesian_join_queries,
SUM(CASE WHEN COALESCE(rows_produced, 0) = 0
AND COALESCE(execution_time_ms, 0) > 5000 THEN 1 ELSE 0 END) as zero_result_expensive_queries,
SUM(CASE WHEN execution_status IN ('FAIL', 'CANCELLED') THEN 1 ELSE 0 END) as failed_cancelled_queries,
SUM(CASE WHEN COALESCE(compilation_time_ms, 0) > 10000 THEN 1 ELSE 0 END) as high_compile_time_queries,
SUM(CASE WHEN COALESCE(bytes_spilled_to_local_storage, 0) > 0 THEN 1 ELSE 0 END) as spilled_to_local_queries,
SUM(CASE WHEN COALESCE(bytes_spilled_to_remote_storage, 0) > 0 THEN 1 ELSE 0 END) as spilled_to_remote_queries,
SUM(CASE WHEN UPPER(COALESCE(query_text, '')) NOT LIKE '%WHERE%'
AND UPPER(COALESCE(query_text, '')) LIKE '%SELECT%'
AND COALESCE(bytes_scanned, 0) > 1073741824 THEN 1 ELSE 0 END) as missing_where_clause_queries
FROM SNOWFLAKE.ACCOUNT_USAGE.QUERY_HISTORY
WHERE start_time >= DATEADD('day', -30, CURRENT_TIMESTAMP())
AND warehouse_name IS NOT NULL
AND warehouse_name != ''
GROUP BY warehouse_name
),
cost_efficiency AS (
SELECT
warehouse_name,
SUM(CASE WHEN DAYOFWEEK(start_time) IN (1, 7)
THEN COALESCE(credits_used_cloud_services, 0) + COALESCE(credits_used_compute, 0)
ELSE 0 END) as weekend_credits,
SUM(CASE WHEN HOUR(start_time) BETWEEN 22 AND 23 OR HOUR(start_time) BETWEEN 0 AND 6
THEN COALESCE(credits_used_cloud_services, 0) + COALESCE(credits_used_compute, 0)
ELSE 0 END) as off_hours_credits,
AVG(COALESCE(queued_overload_time_ms, 0) + COALESCE(queued_provisioning_time_ms, 0) + COALESCE(queued_repair_time_ms, 0)) as avg_queue_wait_time_ms,
SUM(CASE WHEN (COALESCE(queued_overload_time_ms, 0) + COALESCE(queued_provisioning_time_ms, 0) + COALESCE(queued_repair_time_ms, 0)) > 30000
THEN 1 ELSE 0 END) as high_queue_time_queries,
COUNT(CASE WHEN (COALESCE(credits_used_cloud_services, 0) + COALESCE(credits_used_compute, 0)) = 0
THEN query_id END) as zero_credit_queries
FROM SNOWFLAKE.ACCOUNT_USAGE.QUERY_HISTORY
WHERE start_time >= DATEADD('day', -30, CURRENT_TIMESTAMP())
AND warehouse_name IS NOT NULL
AND warehouse_name != ''
GROUP BY warehouse_name
),
recommendations AS (
SELECT
warehouse_name,
CASE
WHEN AVG(COALESCE(queued_overload_time_ms, 0) + COALESCE(queued_provisioning_time_ms, 0) + COALESCE(queued_repair_time_ms, 0)) > 10000
THEN 'Consider increasing warehouse size or using multi-cluster'
WHEN SUM(CASE WHEN COALESCE(execution_time_ms, 0) > 300000 THEN 1 ELSE 0 END) > 50
THEN 'Review long-running queries for optimization'
WHEN SUM(CASE WHEN COALESCE(bytes_spilled_to_remote_storage, 0) > 0 THEN 1 ELSE 0 END) > 20
THEN 'Increase warehouse size to reduce spilling'
ELSE 'Performance looks good'
END as performance_recommendation,
CASE
WHEN SUM(CASE WHEN DAYOFWEEK(start_time) IN (1, 7)
THEN COALESCE(credits_used_cloud_services, 0) + COALESCE(credits_used_compute, 0)
ELSE 0 END) >
SUM(COALESCE(credits_used_cloud_services, 0) + COALESCE(credits_used_compute, 0)) * 0.3
THEN 'High weekend usage - consider auto-suspend'
WHEN AVG(COALESCE(credits_used_cloud_services, 0) + COALESCE(credits_used_compute, 0)) < 0.1
THEN 'Consider using smaller warehouse size'
ELSE 'Cost efficiency looks reasonable'
END as cost_recommendation
FROM SNOWFLAKE.ACCOUNT_USAGE.QUERY_HISTORY
WHERE start_time >= DATEADD('day', -30, CURRENT_TIMESTAMP())
AND warehouse_name IS NOT NULL
AND warehouse_name != ''
GROUP BY warehouse_name
)
SELECT
COALESCE(wb.warehouse_id, HASH(wb.warehouse_name)) as warehouse_id,
wb.warehouse_name,
wb.total_queries,
wb.unique_users,
ROUND(wb.total_credits, 2) as total_credits,
ROUND(wb.avg_credits_per_query, 4) as avg_credits_per_query,
wb.active_days,
ROUND(wb.avg_execution_time_sec, 2) as avg_execution_time_sec,
ROUND(wb.total_gb_scanned, 2) as total_gb_scanned,
wb.total_rows_produced,
COALESCE(pb.queries_0_to_1_sec, 0) as queries_0_to_1_sec,
COALESCE(pb.queries_1_to_10_sec, 0) as queries_1_to_10_sec,
COALESCE(pb.queries_10_to_30_sec, 0) as queries_10_to_30_sec,
COALESCE(pb.queries_30_to_60_sec, 0) as queries_30_to_60_sec,
COALESCE(pb.queries_1_to_5_min, 0) as queries_1_to_5_min,
COALESCE(pb.queries_5_min_plus, 0) as queries_5_min_plus,
COALESCE(bp.select_star_on_large_tables, 0) as select_star_on_large_tables,
COALESCE(bp.unpartitioned_scan_queries, 0) as unpartitioned_scan_queries,
COALESCE(bp.cartesian_join_queries, 0) as cartesian_join_queries,
COALESCE(bp.zero_result_expensive_queries, 0) as zero_result_expensive_queries,
COALESCE(bp.failed_cancelled_queries, 0) as failed_cancelled_queries,
COALESCE(bp.high_compile_time_queries, 0) as high_compile_time_queries,
COALESCE(bp.spilled_to_local_queries, 0) as spilled_to_local_queries,
COALESCE(bp.spilled_to_remote_queries, 0) as spilled_to_remote_queries,
COALESCE(bp.missing_where_clause_queries, 0) as missing_where_clause_queries,
ROUND(COALESCE(ce.weekend_credits, 0), 2) as weekend_credits,
ROUND(COALESCE(ce.off_hours_credits, 0), 2) as off_hours_credits,
ROUND(COALESCE(ce.avg_queue_wait_time_ms, 0), 2) as avg_queue_wait_time_ms,
COALESCE(ce.high_queue_time_queries, 0) as high_queue_time_queries,
COALESCE(ce.zero_credit_queries, 0) as zero_credit_queries,
COALESCE(r.performance_recommendation, 'No recommendation') as performance_recommendation,
COALESCE(r.cost_recommendation, 'No recommendation') as cost_recommendation,
CURRENT_TIMESTAMP() as last_updated
FROM warehouse_base wb
LEFT JOIN performance_buckets pb ON wb.warehouse_name = pb.warehouse_name
LEFT JOIN bad_practices bp ON wb.warehouse_name = bp.warehouse_name
LEFT JOIN cost_efficiency ce ON wb.warehouse_name = ce.warehouse_name
LEFT JOIN recommendations r ON wb.warehouse_name = r.warehouse_name
ORDER BY wb.total_credits DESC;
-- Optional: View the results
SELECT * FROM FINOPS_WAREHOUSE_METRICS LIMIT 10;
Related Posts
How to Use MCP Inspector with Remote or Cloud‑hosted MCP Servers
In local development, MCP Inspector offers a smooth debugging interface. But what happens when your MCP server runs…
8.9 FlatMap
1. Introdução ao flatMap Objetivo: “Achatar” (flatten) Streams aninhados em um único Stream (ex: Stream> → Stream). Aplicação:…
Top DevOps Secret Best Practices That No One Tells
Hey there, fellow tech enthusiasts! Are you tired of the same old DevOps advice? Well, buckle up because…