use master go if object_id('dbo.sp_estimate_data_compression_savings_all') is null exec ('create procedure dbo.sp_estimate_data_compression_savings_all as select ''dummy placeholder'''); go alter procedure dbo.sp_estimate_data_compression_savings_all @opts int = null, -- Options that drive execution for the proc - set to 0 (zero) for no options... -- 1 bit - If set, nonclustered indexes are included in the tests (by default, only heaps/clusters are included) -- 2 bit - If set, we return information sorted by compression savings (MB) vs. object names... @data_compression nvarchar(10) = 'ALL', -- Type of compression to evaluate. Can be NONE, ROW, PAGE, or ALL (if ALL, we will check -- all compression types that the object currently isn't set in) @minimum_mbs int = 100, -- Limit testing to objects/indexes that exceed this number of MegaBytes in used space... @object_name nvarchar(300) = null -- Object to check - specify null/default to check all objects matching other paramters... /* use AdventureWorks--2008; exec dbo.sp_estimate_data_compression_savings_all @opts = 3 ,@data_compression = default ,@minimum_mbs = 20 ,@object_name = default */ as set nocount on; set transaction isolation level read uncommitted; declare @return_value int = 0, @sql nvarchar(max) = N'', @index_current int, @index_max int, @schema_name nvarchar(300), @index_id int, @partition_number int, @data_compression_test nvarchar(10), @data_compression_exec nvarchar(10); -- Format input... select @object_name = case when len(@object_name) > 0 then @object_name else null end, @data_compression = upper(@data_compression), @opts = case when @opts >= 0 then @opts else null end, @minimum_mbs = case when @minimum_mbs > 0 then @minimum_mbs else 0 end; -- Validate input.. if @opts is null goto USAGE; if @data_compression not in('NONE','ROW','PAGE','ALL') begin raiserror('Invalid @data_compression value of [%s] specified. Specify a valid value and try again.',16,1,@data_compression); select @return_value = -1; goto USAGE; end -- Meta-data initialization... if object_id('tempdb..#est_data_sizes') > 0 drop table #est_data_sizes; create table #est_data_sizes ( id int identity(1,1) not null primary key, objectName nvarchar(300), schemaName nvarchar(300), indexName nvarchar(300), partitionNumber int, indexId int, --totalPages bigint, usedPages bigint, dataPages bigint, totalSpaceMB bigint, usedSpaceMB bigint, dataSpaceMB bigint, partitionRowCnt bigint, totalRowCnt bigint, currentCompressionState nvarchar(10)); if object_id('tempdb..#est_data_comp_all_data') > 0 drop table #est_data_comp_all_data; create table #est_data_comp_all_data ( id int identity(1,1) not null primary key, objectName nvarchar(300), schemaName nvarchar(300), indexId int, partitionNumber int, compressionState nvarchar(10), size_with_current_compression_setting bigint, size_with_requested_compression_setting bigint, sample_size_with_current_compression_setting bigint, sample_size_with_requested_compression_setting bigint); -- Build the SQL text that will get the current data for structures we want to report/estimate compression on... select @sql = N' with indexSums as ( select object_name(p2.object_id) as oName, p2.object_id as object_id, p2.index_id as index_id, sum(rows) as totalRowCnt from sys.partitions p2 ' + case when @opts & 1 = 0 then 'where p2.index_id <= 1 ' else '' end + ' group by p2.object_id, p2.index_id ) select object_name(i.object_id) as objectName, schema_name(o.schema_id) as schemaName, coalesce(i.name,''HEAP'') as indexName, i.index_id as indexId, p.partition_number as partitionNumber, --sum(a.total_pages) as totalPages, sum(a.used_pages) as usedPages, sum(a.data_pages) as dataPages, --(sum(a.total_pages) * 8) / 1024 as totalSpaceMB, (sum(a.used_pages) * 8) / 1024 as usedSpaceMB, (sum(a.data_pages) * 8) / 1024 as dataSpaceMB, max(p.rows) as partitionRowCnt, max(isum.totalRowCnt) as totalRowCnt, max(p.data_compression_desc) as dataCompressionDesc from sys.indexes i join indexSums isum on i.object_id = isum.object_id and i.index_id = isum.index_id join sys.partitions p on i.object_id = p.object_id and i.index_id = p.index_id join sys.allocation_units a on p.partition_id = a.container_id join sys.objects o on i.object_id = o.object_id where o.type not in(''S'',''IT'') ' + case when @opts & 1 = 0 then 'and i.index_id <= 1 ' else '' end + ' group by i.object_id, o.schema_id, i.index_id, i.name, p.partition_number having ((sum(a.used_pages) * 8) / 1024) >= ' + cast(@minimum_mbs as nvarchar(50)) + ';'; insert #est_data_sizes (objectName, schemaName, indexName, indexId, partitionNumber, partitionRowCnt, totalRowCnt, currentCompressionState) exec (@sql); -- Process data... while 1=1 begin -- Processing thresholds... select @index_current = min(id), @index_max = max(id), @data_compression_test = case when @data_compression = 'ALL' then case when @data_compression_test is null then 'ROW' when @data_compression_test <> 'ROW' then 'ROW' when @data_compression_test = 'ROW' then 'PAGE' end else @data_compression end from #est_data_sizes; -- Execute the compression estimations... while @index_current <= @index_max begin -- Get parameters... select @object_name = d.objectName, @schema_name = d.schemaName, @index_id = d.indexId, @partition_number = d.partitionNumber, -- Get the type of compression to estimate... -- If we are doing a specific type of compression, we either get that type or don't execute (if it is already in that state). -- If we are doing ALL types, we basically get either the compression method for this loop unless -- the structure is already compressed that way, in which case we get the estimate for NONE. @data_compression_exec = case when @data_compression_test = d.currentCompressionState then case when @data_compression = 'ALL' then 'NONE' else 'NOEXEC' end else @data_compression_test end from #est_data_sizes d where d.id = @index_current; -- Get the data for this loop if necesary... if (@@rowcount > 0) and (@data_compression_exec <> 'NOEXEC') begin insert #est_data_comp_all_data (objectName, schemaName, indexId, partitionNumber, size_with_current_compression_setting, size_with_requested_compression_setting, sample_size_with_current_compression_setting, sample_size_with_requested_compression_setting) exec dbo.sp_estimate_data_compression_savings @schema_name = @schema_name, @object_name = @object_name, @index_id = @index_id, @partition_number = @partition_number, @data_compression = @data_compression_exec; -- Store the compression state just estimated... update #est_data_comp_all_data set compressionState = @data_compression_exec where id = scope_identity(); end -- Increment... select @index_current += 1; end -- while @index_current <= @index_max -- Looping complete when we've processed the desired compression type or we've processed all types (we do PAGE last)... if (@data_compression <> 'ALL') or (@data_compression_test = 'PAGE') break; end -- while 1=1 -- Report... select cs.schemaName + '.' + cs.objectName as name, cs.indexName + ' (' + case when cs.indexId = 1 then 'CL' when cs.indexId > 1 then 'NC' else 'HP' end + ')' as indexName, cs.partitionNumber as partitionNumber, cs.currentCompressionState as currentCompression, es.compressionState as estimatedCompression, (es.size_with_current_compression_setting / 1024) as currentUsedSpaceMB, (es.size_with_requested_compression_setting / 1024) as estimatedUsedSpaceMB, ((es.size_with_current_compression_setting - es.size_with_requested_compression_setting) / 1024) as estimatedSavingsMB, cast(cast(es.size_with_current_compression_setting as numeric(20,2)) / cast(es.size_with_requested_compression_setting as numeric(20,2)) as numeric(20,2)) as estimatedCompressionRatio, cast((((es.size_with_current_compression_setting - es.size_with_requested_compression_setting) / cast(es.size_with_current_compression_setting as numeric(20,2))) * 100) as numeric(20,2)) as estimatedSavingsPercent, cs.partitionRowCnt as currentPartitionRowCnt, cs.totalRowCnt as currentTotalRowCnt from #est_data_sizes cs left join #est_data_comp_all_data es on cs.schemaName = es.schemaName and cs.objectName = es.objectName and cs.indexId = es.indexId and cs.partitionNumber = es.partitionNumber order by case when @opts & 2 = 2 then (es.size_with_current_compression_setting - es.size_with_requested_compression_setting) else 1 end desc, cs.objectName, cs.schemaName, cs.indexName, cs.partitionNumber, case when es.compressionState = 'PAGE' then 1 when es.compressionState = 'NONE' then 2 else 3 end; -- Cleanup... if object_id('tempdb..#est_data_sizes') > 0 drop table #est_data_sizes; if object_id('tempdb..#est_data_comp_all_data') > 0 drop table #est_data_comp_all_data; -- All done... return @return_value; USAGE: select @sql = N' USAGE: exec dbo.sp_estimate_data_compression_savings_all @opts = _int_ ,@data_compression = _nvarchar_ ,@minimum_mbs = _int_ ,@object_name = _nvarchar_ PARAMETERS: @opts, int - Options that drive execution for the proc - set to 0 (zero) for no options... - 1 bit - If set, nonclustered indexes are included in the tests (by default, only heaps/clusters are included) - 2 bit - If set, we return information sorted by compression savings (MB) vs. object names... - REQUIRED (No Default) @data_compression, nvarchar(10) - Type of compression to evaluate. Can be NONE, ROW, PAGE, or ALL (if ALL, we will check - all compression types that the object currently isn''t set in) - DEFAULT = ''ALL'' @minimum_mbs, int - Limit testing to objects/indexes that exceed this number of MegaBytes in used space - DEFAULT = 100 @object_name, nvarchar(300) - Object to check - specify null/default to check all objects matching other paramters SAMPLES: -- Check all objects, all indexes, all compression types for structures with over 100mb in current used space... exec dbo.sp_estimate_data_compression_savings_all @opts = 0 ,@data_compression = default ,@minimum_mbs = default ,@object_name = default; -- Check all objects, only heaps/clustered indexes, all compression types for structures with over 100mb in current used space... exec dbo.sp_estimate_data_compression_savings_all @opts = 1 ,@data_compression = default ,@minimum_mbs = default ,@object_name = default; -- Check all objects, only heaps/clustered indexes, PAGE compression type only for structures with over 253mb in current used space... exec dbo.sp_estimate_data_compression_savings_all @opts = 1 ,@data_compression = N''PAGE'' ,@minimum_mbs = 253 ,@object_name = default; -- Check object(s) named ''testObject'', only heaps/clustered indexes, PAGE compression type only for structures with over 253mb in current used space... exec dbo.sp_estimate_data_compression_savings_all @opts = 1 ,@data_compression = N''PAGE'' ,@minimum_mbs = 253 ,@object_name = ''testObject''; '; print @sql; return @return_value; go