Browse Source

Merge branch 'master' into patch-1

Pedro Lopes 6 years ago
parent
commit
4a246f23df
83 changed files with 1685 additions and 137 deletions
  1. 4 1
      AdaptiveIndexDefrag/CHANGELOG.txt
  2. 2 0
      AdaptiveIndexDefrag/README.md
  3. 34 23
      AdaptiveIndexDefrag/usp_AdaptiveIndexDefrag.sql
  4. BIN
      Always-On/FailoverDetection/FailoverDetector.zip
  5. 4 0
      Always-On/FailoverDetection/README.md
  6. 6 1
      BPCheck/Changelog.txt
  7. 87 89
      BPCheck/Check_BP_Servers.sql
  8. 34 23
      MaintenanceSolution/2_usp_AdaptiveIndexDefrag.sql
  9. 143 0
      SQLOps_Ext/view_CompressionGains_singleDS.sql
  10. 1 0
      Sessions/PASS2018/README.md
  11. BIN
      Sessions/PASS2018/pre-con-modernizing-you-sql-server/auto_tuning.zip
  12. BIN
      Sessions/PASS2018/pre-con-modernizing-you-sql-server/containers.zip
  13. BIN
      Sessions/PASS2018/pre-con-modernizing-you-sql-server/memory_grants.zip
  14. BIN
      Sessions/PASS2018/pre-con-modernizing-you-sql-server/query_store.zip
  15. BIN
      Sessions/PASS2018/pre-con-modernizing-you-sql-server/tempdb_stress.zip
  16. BIN
      Sessions/PASS2018/pre-con-modernizing-you-sql-server/utilities.zip
  17. BIN
      Sessions/PASS2018/sql-server-modernization-done-the-right-way/SQL Server Migrations Done the Right Way.pdf
  18. BIN
      Sessions/PASS2018/tempdb-the-good-the-bad-and-the-ugly/TempDB - The Good, The Bad and The Ugly.pdf
  19. BIN
      Sessions/SQLDay-2017/Enhancements-that-will-make-your-SQL-database-engine-roar---SP1.pdf
  20. BIN
      Sessions/SQLDay-2017/Enhancements-that-will-make-your-SQL-database-engine-roar-Demos.zip
  21. BIN
      Sessions/SQLDay-2017/Gems-to-help-you-troubleshoot-query-performance-Demos.zip
  22. BIN
      Sessions/SQLDay-2017/Gems-to-help-you-troubleshoot-query-performance.pdf
  23. BIN
      Sessions/SQLIntersection-2018/Gems to Help You Troubleshoot Query Performance v2.pdf
  24. BIN
      Sessions/SQLIntersection-2018/Practical guidance to make your tier-1 SQL Server roar.pdf
  25. BIN
      Sessions/SQLIntersection-2018/SQL-Server-Upgrades-Done-the-Right-Way.pdf
  26. 32 0
      Sessions/SQLIntersection-2018/automatic-tuning/README.md
  27. BIN
      Sessions/SQLIntersection-2018/automatic-tuning/at-demo.zip
  28. BIN
      Sessions/SQLIntersection-2018/live_query_troubleshooting/live_query_troubleshooting.zip
  29. 130 0
      Sessions/SQLIntersection-2018/new_xevents/ExtendedEvents.sql
  30. 23 0
      Sessions/SQLIntersection-2018/query-tuning-assistant/README.md
  31. BIN
      Sessions/SQLIntersection-2018/query-tuning-assistant/qta-demo.zip
  32. 60 0
      Sessions/Winter-Ready-2019/Lab-AutoTuning.md
  33. 328 0
      Sessions/Winter-Ready-2019/Lab-Containers.md
  34. 504 0
      Sessions/Winter-Ready-2019/Lab-IQP.md
  35. 72 0
      Sessions/Winter-Ready-2019/Lab-Kubernetes.md
  36. 98 0
      Sessions/Winter-Ready-2019/Lab-Memory-OptimizedTempDB.md
  37. 6 0
      Sessions/Winter-Ready-2019/Lab-PMEM.md
  38. 117 0
      Sessions/Winter-Ready-2019/Lab-QTA.md
  39. BIN
      Sessions/Winter-Ready-2019/Labs/Lab-AutoTuning.zip
  40. BIN
      Sessions/Winter-Ready-2019/Labs/Lab-Containers.zip
  41. BIN
      Sessions/Winter-Ready-2019/Labs/Lab-Memory-OptimizedTempDB.zip
  42. BIN
      Sessions/Winter-Ready-2019/Labs/Lab-QTA.zip
  43. BIN
      Sessions/Winter-Ready-2019/media/ACD_Properties.png
  44. BIN
      Sessions/Winter-Ready-2019/media/ActualPlan.png
  45. BIN
      Sessions/Winter-Ready-2019/media/ApproxCount.png
  46. BIN
      Sessions/Winter-Ready-2019/media/ApproxCount_Warm.png
  47. BIN
      Sessions/Winter-Ready-2019/media/BMRS_Batch.png
  48. BIN
      Sessions/Winter-Ready-2019/media/BMRS_Row.png
  49. BIN
      Sessions/Winter-Ready-2019/media/CD_Properties.png
  50. BIN
      Sessions/Winter-Ready-2019/media/Container-DockerCommands.png
  51. BIN
      Sessions/Winter-Ready-2019/media/Container-DockerCompose.png
  52. BIN
      Sessions/Winter-Ready-2019/media/Container-DockerComposeUp.png
  53. BIN
      Sessions/Winter-Ready-2019/media/Container-Dockerfile.png
  54. BIN
      Sessions/Winter-Ready-2019/media/Container-ExecSQLCMD.png
  55. BIN
      Sessions/Winter-Ready-2019/media/Container-GettingStartedOpsStudio.png
  56. BIN
      Sessions/Winter-Ready-2019/media/Container-GettingStartedResults.png
  57. BIN
      Sessions/Winter-Ready-2019/media/Container-RestoredDB.png
  58. BIN
      Sessions/Winter-Ready-2019/media/MGF_Properties_FirstExec.png
  59. BIN
      Sessions/Winter-Ready-2019/media/MGF_Spill.png
  60. BIN
      Sessions/Winter-Ready-2019/media/StartupParametersNoFlag.png
  61. BIN
      Sessions/Winter-Ready-2019/media/StartupParametersWithFlag.png
  62. BIN
      Sessions/Winter-Ready-2019/media/TV_Legacy.png
  63. BIN
      Sessions/Winter-Ready-2019/media/TV_New.png
  64. BIN
      Sessions/Winter-Ready-2019/media/UDF_Inlined.png
  65. BIN
      Sessions/Winter-Ready-2019/media/UDF_NotInlined.png
  66. BIN
      Sessions/Winter-Ready-2019/media/iqpfeaturefamily.png
  67. BIN
      Sessions/Winter-Ready-2019/media/new_query.png
  68. BIN
      Sessions/Winter-Ready-2019/media/objectexplorerquerystore_sql17.png
  69. BIN
      Sessions/Winter-Ready-2019/media/qta-new-session-settings.png
  70. BIN
      Sessions/Winter-Ready-2019/media/qta-new-session-setup.png
  71. BIN
      Sessions/Winter-Ready-2019/media/qta-new-session-tuning.png
  72. BIN
      Sessions/Winter-Ready-2019/media/qta-session-management.png
  73. BIN
      Sessions/Winter-Ready-2019/media/qta-step2-substep1.png
  74. BIN
      Sessions/Winter-Ready-2019/media/qta-step2-substep2-prompt.png
  75. BIN
      Sessions/Winter-Ready-2019/media/qta-step2-substep2.png
  76. BIN
      Sessions/Winter-Ready-2019/media/qta-step2-substep3.png
  77. BIN
      Sessions/Winter-Ready-2019/media/qta-step3.png
  78. BIN
      Sessions/Winter-Ready-2019/media/qta-step4.png
  79. BIN
      Sessions/Winter-Ready-2019/media/qta-step5-rollback.png
  80. BIN
      Sessions/Winter-Ready-2019/media/qta-step5.png
  81. BIN
      Sessions/Winter-Ready-2019/media/qta-usage.png
  82. BIN
      Sessions/Winter-Ready-2019/media/query-store-force-plan.png
  83. BIN
      Sessions/Winter-Ready-2019/media/query-store-usage-5.png

+ 4 - 1
AdaptiveIndexDefrag/CHANGELOG.txt

@@ -134,4 +134,7 @@ v1.6.6.4 - 6/25/2018 - Tested with Azure SQL Managed Instance;
 						Added extra debug output.
 v1.6.6.5 - 9/23/2018 - Fixed issue where table that is compressed would become uncompressed (by d-moloney);
 						Extended row mode counter info data type in debug mode (by d-moloney);
-						Fixed issue with @statsThreshold and large tables (by AndrewG2)
+						Fixed issue with @statsThreshold and large tables (by AndrewG2).
+v1.6.6.6 - 10/28/2018 - Extended 2nd row mode counter info data type in debug mode (by CodyFitzpatrick);
+						Fixed compression data missing in working table (by ravseer).
+						

+ 2 - 0
AdaptiveIndexDefrag/README.md

@@ -11,6 +11,8 @@ Yes, but it is used as a part of a full maintenance solution that also handles d
 **On what version of SQL can I use it?**
 This procedure can be used from SQL Server 2005 SP2 onwards, because of the DMVs and DMFs involved.
 
+NOTE: no longer garanteed to work with SQL Server 2005. Use at your own volution.
+
 **How to deploy it?**
 Starting with v1.3.7, on any database context you choose to create the usp_AdaptiveIndexDefrag and its supporting objects, open the attached script, and either keep the @deploymode variable at the top to upgrade mode (preserving all historic data), or change for new deployments or overwrite old versions and objects (disregarding historic data).
 

+ 34 - 23
AdaptiveIndexDefrag/usp_AdaptiveIndexDefrag.sql

@@ -679,7 +679,10 @@ v1.6.6.4 - 6/25/2018 - Tested with Azure SQL Managed Instance;
 						Added extra debug output.
 v1.6.6.5 - 9/23/2018 - Fixed issue where table that is compressed would become uncompressed (by d-moloney);
 						Extended row mode counter info data type in debug mode (by d-moloney);
-						Fixed issue with @statsThreshold and large tables (by AndrewG2)
+						Fixed issue with @statsThreshold and large tables (by AndrewG2).
+v1.6.6.6 - 10/28/2018 - Extended 2nd row mode counter info data type in debug mode (by CodyFitzpatrick);
+						Fixed compression data missing in working table (by ravseer).
+
 IMPORTANT:
 Execute in the database context of where you created the log and working tables.			
 										
@@ -1240,7 +1243,7 @@ BEGIN SET @hasIXsOUT = 1 END ELSE BEGIN SET @hasIXsOUT = 0 END'
 				, @currCompression NVARCHAR(60)
 
 		/* Initialize variables */	
-		SELECT @AID_dbID = DB_ID(), @startDateTime = GETDATE(), @endDateTime = DATEADD(minute, @timeLimit, GETDATE()), @operationFlag = NULL, @ver = '1.6.6.5';
+		SELECT @AID_dbID = DB_ID(), @startDateTime = GETDATE(), @endDateTime = DATEADD(minute, @timeLimit, GETDATE()), @operationFlag = NULL, @ver = '1.6.6.6';
 	
 		/* Create temporary tables */	
 		IF EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.#tblIndexDefragDatabaseList'))
@@ -1712,9 +1715,6 @@ OPTION (MAXDOP 2)'
 						WHERE objectID = @objectID AND indexID = @indexID AND partitionNumber = @partitionNumber
 					END
 				END;
-				
-				IF @debugMode = 1
-				RAISERROR('    Looking up additional index information...', 0, 42) WITH NOWAIT;
 
 				/* Look up index status for various purposes */	
 				SELECT @updateSQL = N'UPDATE ids		
@@ -1727,27 +1727,38 @@ WHERE o.object_id = ids.objectID AND i.index_id = ids.indexID AND i.type > 0
 AND o.object_id NOT IN (SELECT sit.object_id FROM [' + DB_NAME(@dbID) + '].sys.internal_tables AS sit)
 AND ids.[dbID] = ' + CAST(@dbID AS NVARCHAR(10));
 
+				IF @debugMode = 1
+				BEGIN
+					RAISERROR('    Looking up additional index information (pass 1)...', 0, 42) WITH NOWAIT;
+					--PRINT @updateSQL
+				END
+				
 				EXECUTE sp_executesql @updateSQL;
 				
-				IF @scanMode = 'LIMITED'
+				IF @sqlmajorver = 9
 				BEGIN
-					IF @sqlmajorver = 9
-					BEGIN
-						SELECT @updateSQL = N'UPDATE ids
-	SET [record_count] = [rows], [compression_type] = N''''
-	FROM [' + DB_NAME(@AID_dbID) + '].dbo.tbl_AdaptiveIndexDefrag_Working ids WITH (NOLOCK)
-	INNER JOIN [' + DB_NAME(@dbID) + '].sys.partitions AS p WITH (NOLOCK) ON ids.objectID = p.[object_id] AND ids.indexID = p.index_id AND ids.partitionNumber = p.partition_number
-	WHERE ids.[dbID] = ' + CAST(@dbID AS NVARCHAR(10));
-					END
-					ELSE
-					BEGIN
-						SELECT @updateSQL = N'UPDATE ids
-	SET [record_count] = [rows], [compression_type] = [data_compression_desc] END
-	FROM [' + DB_NAME(@AID_dbID) + '].dbo.tbl_AdaptiveIndexDefrag_Working ids WITH (NOLOCK)
-	INNER JOIN [' + DB_NAME(@dbID) + '].sys.partitions AS p WITH (NOLOCK) ON ids.objectID = p.[object_id] AND ids.indexID = p.index_id AND ids.partitionNumber = p.partition_number
-	WHERE ids.[dbID] = ' + CAST(@dbID AS NVARCHAR(10));
-					END
+					SELECT @updateSQL = N'UPDATE ids
+SET [record_count] = [rows], [compression_type] = N''''
+FROM [' + DB_NAME(@AID_dbID) + '].dbo.tbl_AdaptiveIndexDefrag_Working ids WITH (NOLOCK)
+INNER JOIN [' + DB_NAME(@dbID) + '].sys.partitions AS p WITH (NOLOCK) ON ids.objectID = p.[object_id] AND ids.indexID = p.index_id AND ids.partitionNumber = p.partition_number
+WHERE ids.[dbID] = ' + CAST(@dbID AS NVARCHAR(10));
+				END
+				ELSE
+				BEGIN
+					SELECT @updateSQL = N'UPDATE ids
+SET [record_count] = [rows], [compression_type] = [data_compression_desc]
+FROM [' + DB_NAME(@AID_dbID) + '].dbo.tbl_AdaptiveIndexDefrag_Working ids WITH (NOLOCK)
+INNER JOIN [' + DB_NAME(@dbID) + '].sys.partitions AS p WITH (NOLOCK) ON ids.objectID = p.[object_id] AND ids.indexID = p.index_id AND ids.partitionNumber = p.partition_number
+WHERE ids.[dbID] = ' + CAST(@dbID AS NVARCHAR(10));
 				END
+
+				IF @debugMode = 1
+				BEGIN
+					RAISERROR('    Looking up additional index information (pass 2)...', 0, 42) WITH NOWAIT;
+					--PRINT @updateSQL
+				END
+				
+				EXECUTE sp_executesql @updateSQL;
 				
 				IF @debugMode = 1
 				RAISERROR('    Looking up additional statistic information...', 0, 42) WITH NOWAIT;
@@ -2774,7 +2785,7 @@ WHERE system_type_id IN (34, 35, 99) ' + CASE WHEN @sqlmajorver < 11 THEN 'OR ma
 
 				IF @debugMode = 1
 				BEGIN
-					SELECT @debugMessage = '     Found a row modification counter of ' + CONVERT(NVARCHAR(10), @rowmodctr) + ' and ' + CONVERT(NVARCHAR(10), CASE WHEN @rows IS NOT NULL AND @rows < @record_count THEN @rows ELSE @record_count END) + ' rows' + CASE WHEN @stats_isincremental = 1 THEN ' on partition ' + CONVERT(NVARCHAR(10), @partitionNumber) ELSE '' END + '...';
+					SELECT @debugMessage = '     Found a row modification counter of ' + CONVERT(NVARCHAR(15), @rowmodctr) + ' and ' + CONVERT(NVARCHAR(15), CASE WHEN @rows IS NOT NULL AND @rows < @record_count THEN @rows ELSE @record_count END) + ' rows' + CASE WHEN @stats_isincremental = 1 THEN ' on partition ' + CONVERT(NVARCHAR(15), @partitionNumber) ELSE '' END + '...';
 					RAISERROR(@debugMessage, 0, 42) WITH NOWAIT;
 					--select @debugMessage
 				END

BIN
Always-On/FailoverDetection/FailoverDetector.zip


+ 4 - 0
Always-On/FailoverDetection/README.md

@@ -0,0 +1,4 @@
+## Always On Availability Groups Failover Detector Utility
+Root folder for the FailoverDetector Utility. For more information refer this blog post
+https://blogs.msdn.microsoft.com/sql_server_team/failover-detection-utility-availability-group-failover-analysis-made-easy/
+

+ 6 - 1
BPCheck/Changelog.txt

@@ -345,4 +345,9 @@ v2.2.2 - 10/26/2017 - Corrected auto soft NUMA reporting wrong status (thanks Bj
 						Fixed CPU Affinity bit mask.
 v2.2.2.1 - 1/11/2018 - Fixed issues with unicode characters (thanks Brent Ozar);
 						Fixed max server memory calculations;
-						Added check for Database Health Detection in Server_checks section (thanks Anders Uhl Pedersen).
+						Added check for Database Health Detection in Server_checks section (thanks Anders Uhl Pedersen).
+v2.2.3 - 10/27/2018 - Fixed performance checks duplicate results issue on SQL 2016+. 
+v2.2.3.1 - 10/28/2018 - Fixed variable issue.
+v2.2.3.2 - 10/28/2018 - Enhanced power scheme check (thanks sivey42).
+v2.2.3.4 - 10/29/2018 - Fixed latches syntax error (thanks Dimitri Artemov);
+						Improved handling of conversions.

+ 87 - 89
BPCheck/Check_BP_Servers.sql

@@ -405,6 +405,11 @@ v2.2.2 - 10/26/2017 - Corrected auto soft NUMA reporting wrong status (thanks Bj
 v2.2.2.1 - 1/11/2018 - Fixed issues with unicode characters (thanks Brent Ozar);
 						Fixed max server memory calculations;
 						Added check for Database Health Detection in Server_checks section (thanks Anders Uhl Pedersen).
+v2.2.3 - 10/27/2018 - Fixed performance checks duplicate results issue on SQL 2016+. 
+v2.2.3.1 - 10/28/2018 - Fixed variable issue.
+v2.2.3.2 - 10/28/2018 - Enhanced power scheme check (thanks sivey42).
+v2.2.3.4 - 10/29/2018 - Fixed latches syntax error (thanks Dimitri Artemov);
+						Improved handling of conversions.
 
 PURPOSE: Checks SQL Server in scope for some of most common skewed Best Practices. Valid from SQL Server 2005 onwards.
 
@@ -544,7 +549,7 @@ RAISERROR (N'Starting Pre-requisites section', 10, 1) WITH NOWAIT
 --------------------------------------------------------------------------------------------------------------------------------
 -- Pre-requisites section
 --------------------------------------------------------------------------------------------------------------------------------
-DECLARE @sqlcmd NVARCHAR(max), @params NVARCHAR(500), @sqlmajorver int
+DECLARE @sqlcmd NVARCHAR(max), @params NVARCHAR(600), @sqlmajorver int
 
 /*
 Reference: SERVERPROPERTY for sql major, minor and build versions supported after:
@@ -3056,6 +3061,7 @@ RAISERROR (N'|-Starting Server Checks', 10, 1) WITH NOWAIT
 -- Power plan subsection
 --------------------------------------------------------------------------------------------------------------------------------
 RAISERROR (N'  |-Starting Power plan', 10, 1) WITH NOWAIT
+
 DECLARE @planguid NVARCHAR(64), @powerkey1 NVARCHAR(255), @powerkey2 NVARCHAR(255) 
 --SELECT @powerkey = 'SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\ControlPanel\NameSpace\{025A5937-A6BE-4686-A844-36FE4BEC8B6D}'
 --SELECT @powerkey = 'SYSTEM\CurrentControlSet\Control\Power\User\Default\PowerSchemes'
@@ -3065,15 +3071,13 @@ SELECT @powerkey2 = 'SYSTEM\CurrentControlSet\Control\Power\User\PowerSchemes'
 IF CONVERT(DECIMAL(3,1), @osver) >= 6.0
 BEGIN
 	BEGIN TRY
-		--EXEC master.sys.xp_regread N'HKEY_LOCAL_MACHINE', @powerkey, 'PreferredPlan', @planguid OUTPUT, NO_OUTPUT
+		-- Check if was set by GPO, if not, look in user settings 
 		EXEC master.sys.xp_regread N'HKEY_LOCAL_MACHINE', @powerkey1, 'ActivePowerScheme', @planguid OUTPUT, NO_OUTPUT
-		
-		-- Check if was set by GPO, if not, look in user settings
-		IF @planguid IS NULL
-		BEGIN
-			EXEC master.sys.xp_regread N'HKEY_LOCAL_MACHINE', @powerkey2, 'ActivePowerScheme', @planguid OUTPUT, NO_OUTPUT
-		END
 
+		IF @planguid IS NULL 
+		BEGIN 
+			EXEC master.sys.xp_regread N'HKEY_LOCAL_MACHINE', @powerkey2, 'ActivePowerScheme', @planguid OUTPUT, NO_OUTPUT 
+		END 
 	END TRY
 	BEGIN CATCH
 		SELECT ERROR_NUMBER() AS ErrorNumber, ERROR_MESSAGE() AS ErrorMessage;
@@ -8084,14 +8088,39 @@ WHERE (cntr_type = 272696576 OR cntr_type = 1073874176 OR cntr_type = 1073939712
 	HAVING (t2.wait_time_ms-t1.wait_time_ms) > 0
 	ORDER BY wait_time_s DESC;
 
+	-- SOS_SCHEDULER_YIELD = Might indicate CPU pressure if very high overall percentage. Check yielding conditions in http://technet.microsoft.com/en-us/library/cc917684.aspx
+	-- THREADPOOL = Look for high blocking or contention problems with workers. This will not show up in sys.dm_exec_requests;
+	-- LATCH = indicates contention for access to some non-page structures. ACCESS_METHODS_DATASET_PARENT, ACCESS_METHODS_SCAN_RANGE_GENERATOR or NESTING_TRANSACTION_FULL latches indicate parallelism issues;
+	-- PAGELATCH = indicates contention for access to in-memory copies of pages, like PFS, SGAM and GAM; 
+	-- PAGELATCH_UP = Does the filegroup have enough files? Contention in PFS?
+	-- PAGELATCH_EX = Contention while doing many UPDATE statements against small tables? 
+	-- PAGELATCH_EX = Many concurrent INSERT statements into a table that has an index on an IDENTITY or NEWSEQUENTIALID column? -> http://blogs.msdn.com/b/blogdoezequiel/archive/2013/05/23/pagelatch-ex-waits-and-heavy-inserts.aspx
+	-- PAGEIOLATCH = indicates IO problems, or BP pressure.
+	-- PREEMPTIVE_OS_WRITEFILEGATHERER (2008+) = usually autogrow scenarios, usually together with WRITELOG;
+	-- IO_COMPLETION = usually TempDB spilling; 
+	-- ASYNC_IO_COMPLETION = usually when not using IFI, or waiting on backups.
+	-- DISKIO_SUSPEND = High wait times here indicate the SNAPSHOT BACKUP may be taking longer than expected. Typically the delay is within the VDI application perform the snapshot backup;
+	-- BACKUPIO = check for slow backup media slow, like Tapes or Disks;
+	-- BACKUPBUFFER = usually when backing up to Tape;
+	-- Check sys.dm_os_waiting_tasks for Exchange wait types in http://technet.microsoft.com/en-us/library/ms188743.aspx;
+	-- Wait Resource e_waitPipeNewRow in CXPACKET waits Producer waiting on consumer for a packet to fill;
+	-- Wait Resource e_waitPipeGetRow in CXPACKET waits Consumer waiting on producer to fill a packet;
+	-- CXPACKET = if OLTP, check for parallelism issues if above 20 pct. If combined with a high number of PAGEIOLATCH_XX waits, it could be large parallel table scans going on because of incorrect non-clustered indexes, or out-of-date statistics causing a bad query plan;
+	-- WRITELOG = log management system waiting for a log flush to disk. Examine the IO latency for the log file
+	-- CMEMTHREAD =  indicates that the rate of insertion of entries into the plan cache is very high and there is contention -> http://blogs.msdn.com/b/psssql/archive/2012/12/20/how-it-works-cmemthread-and-debugging-them.aspx
+	-- SOS_RESERVEDMEMBLOCKLIST = look for procedures with a large number of parameters, or queries with a long list of expression values specified in an IN clause, which would require multi-page allocations
+	-- RESOURCE_SEMAPHORE_SMALL_QUERY or RESOURCE_SEMAPHORE = queries are waiting for execution memory. Look for plans with excessive hashing or sorts.
+	-- RESOURCE_SEMAPHORE_QUERY_COMPILE = usually high compilation or recompilation scenario (higher ratio of prepared plans vs. compiled plans). On x64 usually memory hungry queries and compiles. On x86 perhaps short on VAS. -> http://technet.microsoft.com/en-us/library/cc293620.aspx
+	-- DBMIRROR_DBM_MUTEX = indicates contention for the send buffer that database mirroring shares between all the mirroring sessions. 
+	
 	SELECT 'Performance_checks' AS [Category], 'Waits_Last_' + CONVERT(VARCHAR(3), @duration) + 's' AS [Information], W1.wait_type, 
-		CAST(W1.wait_time_s AS DECIMAL(12, 2)) AS wait_time_s,
-		CAST(W1.signal_wait_time_s AS DECIMAL(12, 2)) AS signal_wait_time_s,
-		CAST(W1.resource_wait_time_s AS DECIMAL(12, 2)) AS resource_wait_time_s,
-		CAST(W1.pct AS DECIMAL(12, 2)) AS pct,
-		CAST(SUM(W2.pct) AS DECIMAL(12, 2)) AS overall_running_pct,
-		CAST(W1.signal_wait_pct AS DECIMAL(12, 2)) AS signal_wait_pct,
-		CAST(W1.resource_wait_pct AS DECIMAL(12, 2)) AS resource_wait_pct,
+		CAST(W1.wait_time_s AS DECIMAL(14, 2)) AS wait_time_s,
+		CAST(W1.signal_wait_time_s AS DECIMAL(14, 2)) AS signal_wait_time_s,
+		CAST(W1.resource_wait_time_s AS DECIMAL(14, 2)) AS resource_wait_time_s,
+		CAST(W1.pct AS DECIMAL(14, 2)) AS pct,
+		CAST(SUM(W2.pct) AS DECIMAL(14, 2)) AS overall_running_pct,
+		CAST(W1.signal_wait_pct AS DECIMAL(14, 2)) AS signal_wait_pct,
+		CAST(W1.resource_wait_pct AS DECIMAL(14, 2)) AS resource_wait_pct,
 		CASE WHEN W1.wait_type = N'SOS_SCHEDULER_YIELD' THEN N'CPU' 
 			WHEN W1.wait_type = N'THREADPOOL' THEN 'CPU - Unavailable Worker Threads'
 			WHEN W1.wait_type LIKE N'LCK_%' OR W1.wait_type = N'LOCK' THEN N'Lock' 
@@ -8106,7 +8135,6 @@ WHERE (cntr_type = 272696576 OR cntr_type = 1073874176 OR cntr_type = 1073939712
 			WHEN W1.wait_type LIKE N'CLR%' OR W1.wait_type LIKE N'SQLCLR%' THEN N'SQL CLR' 
 			WHEN W1.wait_type LIKE N'DBMIRROR%' OR W1.wait_type = N'MIRROR_SEND_MESSAGE' THEN N'Mirroring' 
 			WHEN W1.wait_type LIKE N'XACT%' or W1.wait_type LIKE N'DTC%' or W1.wait_type LIKE N'TRAN_MARKLATCH_%' or W1.wait_type LIKE N'MSQL_XACT_%' or W1.wait_type = N'TRANSACTION_MUTEX' THEN N'Transaction' 
-			--WHEN W1.wait_type LIKE N'SLEEP_%' or W1.wait_type IN (N'LAZYWRITER_SLEEP', N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', N'SQLTRACE_WAIT_ENTRIES', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'XE_DISPATCHER_WAIT', N'REQUEST_FOR_DEADLOCK_SEARCH', N'LOGMGR_QUEUE', N'ONDEMAND_TASK_QUEUE', N'CHECKPOINT_QUEUE', N'XE_TIMER_EVENT') THEN N'Idle' 
 			WHEN W1.wait_type LIKE N'PREEMPTIVE_%' THEN N'External APIs or XPs' 
 			WHEN W1.wait_type LIKE N'BROKER_%' AND W1.wait_type <> N'BROKER_RECEIVE_WAITFOR' THEN N'Service Broker' 
 			WHEN W1.wait_type IN (N'LOGMGR', N'LOGBUFFER', N'LOGMGR_RESERVE_APPEND', N'LOGMGR_FLUSH', N'LOGMGR_PMM_LOG', N'CHKPT', N'WRITELOG') THEN N'Tran Log IO' 
@@ -8119,7 +8147,6 @@ WHERE (cntr_type = 272696576 OR cntr_type = 1073874176 OR cntr_type = 1073939712
 			WHEN W1.wait_type IN (N'BACKUPIO', N'BACKUPBUFFER') THEN 'Backup IO'
 			WHEN W1.wait_type LIKE N'SE_REPL_%' or W1.wait_type LIKE N'REPL_%'  or W1.wait_type IN (N'REPLICA_WRITES', N'FCB_REPLICA_WRITE', N'FCB_REPLICA_READ', N'PWAIT_HADRSIM') THEN N'Replication' 
 			WHEN W1.wait_type IN (N'LOG_RATE_GOVERNOR', N'POOL_LOG_RATE_GOVERNOR', N'HADR_THROTTLE_LOG_RATE_GOVERNOR', N'INSTANCE_LOG_RATE_GOVERNOR') THEN N'Log Rate Governor' 
-			--	WHEN W1.wait_type LIKE N'SLEEP_%' OR W1.wait_type IN(N'LAZYWRITER_SLEEP', N'SQLTRACE_BUFFER_FLUSH', N'WAITFOR', N'WAIT_FOR_RESULTS', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', N'SLEEP_TASK', N'SLEEP_SYSTEMTASK') THEN N'Sleep'
 			WHEN W1.wait_type = N'REPLICA_WRITE' THEN 'Snapshots'
 			WHEN W1.wait_type = N'WAIT_XTP_OFFLINE_CKPT_LOG_IO' OR W1.wait_type = N'WAIT_XTP_CKPT_CLOSE' THEN 'In-Memory OLTP Logging'
 			WHEN W1.wait_type LIKE N'QDS%' THEN N'Query Store'
@@ -8128,9 +8155,12 @@ WHERE (cntr_type = 272696576 OR cntr_type = 1073874176 OR cntr_type = 1073939712
 			WHEN W1.wait_type LIKE N'COLUMNSTORE%' THEN N'Columnstore'
 		ELSE N'Other' END AS 'wait_category'
 	FROM #tblFinalWaits AS W1 INNER JOIN #tblFinalWaits AS W2 ON W2.rn <= W1.rn
-	GROUP BY W1.rn, W1.wait_type, W1.wait_time_s, W1.pct, W1.signal_wait_time_s, W1.resource_wait_time_s, W1.signal_wait_pct, W1.resource_wait_pct
-	HAVING W1.wait_time_s >= 0.01 AND (SUM(W2.pct)-W1.pct) < 100  -- percentage threshold
-	ORDER BY W1.rn; 
+	GROUP BY W1.rn, W1.wait_type, CAST(W1.wait_time_s AS DECIMAL(14, 2)), CAST(W1.pct AS DECIMAL(14, 2)), CAST(W1.signal_wait_time_s AS DECIMAL(14, 2)), CAST(W1.resource_wait_time_s AS DECIMAL(14, 2)), CAST(W1.signal_wait_pct AS DECIMAL(14, 2)), CAST(W1.resource_wait_pct AS DECIMAL(14, 2))
+	HAVING CAST(W1.wait_time_s as DECIMAL(14, 2)) >= 0.01 AND (SUM(W2.pct)-CAST(W1.pct AS DECIMAL(14, 2))) < 100  -- percentage threshold
+	ORDER BY W1.rn 
+	
+	SET @params = N'@maxservermemIN bigint, @minservermemIN bigint, @systemmemIN bigint, @systemfreememIN bigint, @commit_targetIN bigint, @committedIN bigint';
+	EXECUTE sp_executesql @sqlcmd, @params, @maxservermemIN=@maxservermem
 
 	;WITH Waits AS
 	(SELECT wait_type, wait_time_ms / 1000. AS wait_time_s,
@@ -8153,26 +8183,18 @@ WHERE (cntr_type = 272696576 OR cntr_type = 1073874176 OR cntr_type = 1073939712
 			AND wait_type NOT LIKE N'SLEEP_%'
 		GROUP BY wait_type, wait_time_ms, signal_wait_time_ms)
 	SELECT 'Performance_checks' AS [Category], 'Cumulative_Waits' AS [Information], W1.wait_type, 
-		CAST(W1.wait_time_s AS DECIMAL(12, 2)) AS wait_time_s,
-		CAST(W1.signal_wait_time_s AS DECIMAL(12, 2)) AS signal_wait_time_s,
-		CAST(W1.resource_wait_time_s AS DECIMAL(12, 2)) AS resource_wait_time_s,
-		CAST(W1.pct AS DECIMAL(12, 2)) AS pct,
-		CAST(SUM(W2.pct) AS DECIMAL(12, 2)) AS overall_running_pct,
-		CAST(W1.signal_wait_pct AS DECIMAL(12, 2)) AS signal_wait_pct,
-		CAST(W1.resource_wait_pct AS DECIMAL(12, 2)) AS resource_wait_pct,
-			-- SOS_SCHEDULER_YIELD = Might indicate CPU pressure if very high overall percentage. Check yielding conditions in http://technet.microsoft.com/en-us/library/cc917684.aspx
+		CAST(W1.wait_time_s AS DECIMAL(14, 2)) AS wait_time_s,
+		CAST(W1.signal_wait_time_s AS DECIMAL(14, 2)) AS signal_wait_time_s,
+		CAST(W1.resource_wait_time_s AS DECIMAL(14, 2)) AS resource_wait_time_s,
+		CAST(W1.pct AS DECIMAL(14, 2)) AS pct,
+		CAST(SUM(W2.pct) AS DECIMAL(14, 2)) AS overall_running_pct,
+		CAST(W1.signal_wait_pct AS DECIMAL(14, 2)) AS signal_wait_pct,
+		CAST(W1.resource_wait_pct AS DECIMAL(14, 2)) AS resource_wait_pct,
 		CASE WHEN W1.wait_type = N'SOS_SCHEDULER_YIELD' THEN N'CPU' 
-			-- THREADPOOL = Look for high blocking or contention problems with workers. This will not show up in sys.dm_exec_requests;
 			WHEN W1.wait_type = N'THREADPOOL' THEN 'CPU - Unavailable Worker Threads'
 			WHEN W1.wait_type LIKE N'LCK_%' OR W1.wait_type = N'LOCK' THEN N'Lock' 
-			-- LATCH = indicates contention for access to some non-page structures. ACCESS_METHODS_DATASET_PARENT, ACCESS_METHODS_SCAN_RANGE_GENERATOR or NESTING_TRANSACTION_FULL latches indicate parallelism issues;
 			WHEN W1.wait_type LIKE N'LATCH_%' THEN N'Latch'
-			-- PAGELATCH = indicates contention for access to in-memory copies of pages, like PFS, SGAM and GAM; 
-			-- PAGELATCH_UP = Does the filegroup have enough files? Contention in PFS?
-			-- PAGELATCH_EX = Contention while doing many UPDATE statements against small tables? 
-			-- PAGELATCH_EX = Many concurrent INSERT statements into a table that has an index on an IDENTITY or NEWSEQUENTIALID column? -> http://blogs.msdn.com/b/blogdoezequiel/archive/2013/05/23/pagelatch-ex-waits-and-heavy-inserts.aspx
 			WHEN W1.wait_type LIKE N'PAGELATCH_%' THEN N'Buffer Latch'
-			-- PAGEIOLATCH = indicates IO problems, or BP pressure.
 			WHEN W1.wait_type LIKE N'PAGEIOLATCH_%' THEN N'Buffer IO'
 			WHEN W1.wait_type LIKE N'HADR_SYNC_COMMIT' THEN N'Always On - Secondary Synch' 
 			WHEN W1.wait_type LIKE N'HADR_%' OR W1.wait_type LIKE N'PWAIT_HADR_%' THEN N'Always On'
@@ -8182,8 +8204,6 @@ WHERE (cntr_type = 272696576 OR cntr_type = 1073874176 OR cntr_type = 1073939712
 			WHEN W1.wait_type LIKE N'CLR%' OR W1.wait_type LIKE N'SQLCLR%' THEN N'SQL CLR' 
 			WHEN W1.wait_type LIKE N'DBMIRROR%' OR W1.wait_type = N'MIRROR_SEND_MESSAGE' THEN N'Mirroring' 
 			WHEN W1.wait_type LIKE N'XACT%' or W1.wait_type LIKE N'DTC%' or W1.wait_type LIKE N'TRAN_MARKLATCH_%' or W1.wait_type LIKE N'MSQL_XACT_%' or W1.wait_type = N'TRANSACTION_MUTEX' THEN N'Transaction' 
-			--WHEN W1.wait_type LIKE N'SLEEP_%' or W1.wait_type IN (N'LAZYWRITER_SLEEP', N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', N'SQLTRACE_WAIT_ENTRIES', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'XE_DISPATCHER_WAIT', N'REQUEST_FOR_DEADLOCK_SEARCH', N'LOGMGR_QUEUE', N'ONDEMAND_TASK_QUEUE', N'CHECKPOINT_QUEUE', N'XE_TIMER_EVENT') THEN N'Idle' 
-			-- PREEMPTIVE_OS_WRITEFILEGATHERER (2008+) = usually autogrow scenarios, usually together with WRITELOG;
 			WHEN W1.wait_type LIKE N'PREEMPTIVE_%' THEN N'External APIs or XPs' -- Used to indicate a worker is running code that is not under the SQLOS Scheduling;
 			WHEN W1.wait_type LIKE N'BROKER_%' AND W1.wait_type <> N'BROKER_RECEIVE_WAITFOR' THEN N'Service Broker' 
 			WHEN W1.wait_type IN (N'LOGMGR', N'LOGBUFFER', N'LOGMGR_RESERVE_APPEND', N'LOGMGR_FLUSH', N'LOGMGR_PMM_LOG', N'CHKPT', N'WRITELOG') THEN N'Tran Log IO' 
@@ -8192,31 +8212,16 @@ WHERE (cntr_type = 272696576 OR cntr_type = 1073874176 OR cntr_type = 1073939712
 			WHEN W1.wait_type IN (N'WAITFOR', N'WAIT_FOR_RESULTS', N'BROKER_RECEIVE_WAITFOR') THEN N'User Wait' 
 			WHEN W1.wait_type IN (N'TRACEWRITE', N'SQLTRACE_LOCK', N'SQLTRACE_FILE_BUFFER', N'SQLTRACE_FILE_WRITE_IO_COMPLETION', N'SQLTRACE_FILE_READ_IO_COMPLETION', N'SQLTRACE_PENDING_BUFFER_WRITERS', N'SQLTRACE_SHUTDOWN', N'QUERY_TRACEOUT', N'TRACE_EVTNOTIF') THEN N'Tracing' 
 			WHEN W1.wait_type LIKE N'FT_%' OR W1.wait_type IN (N'FULLTEXT GATHERER', N'MSSEARCH', N'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN N'Full Text Search' 
-			-- IO_COMPLETION = usually TempDB spilling; 
-			-- ASYNC_IO_COMPLETION = usually when not using IFI, or waiting on backups.
-			-- DISKIO_SUSPEND = High wait times here indicate the SNAPSHOT BACKUP may be taking longer than expected. Typically the delay is within the VDI application perform the snapshot backup;
 			WHEN W1.wait_type IN (N'ASYNC_IO_COMPLETION', N'IO_COMPLETION', N'WRITE_COMPLETION', N'IO_QUEUE_LIMIT', /*N'HADR_FILESTREAM_IOMGR_IOCOMPLETION',*/ N'IO_RETRY') THEN N'Other Disk IO' 
-			-- BACKUPIO = check for slow backup media slow, like Tapes or Disks;
-			-- BACKUPBUFFER = usually when backing up to Tape;
 			WHEN W1.wait_type IN(N'BACKUPIO', N'BACKUPBUFFER') THEN 'Backup IO'
-			-- Check sys.dm_os_waiting_tasks for Exchange wait types in http://technet.microsoft.com/en-us/library/ms188743.aspx;
-			-- Wait Resource e_waitPipeNewRow in CXPACKET waits Producer waiting on consumer for a packet to fill;
-			-- Wait Resource e_waitPipeGetRow in CXPACKET waits Consumer waiting on producer to fill a packet;
-			-- CXPACKET = if OLTP, check for parallelism issues if above 20 pct. If combined with a high number of PAGEIOLATCH_XX waits, it could be large parallel table scans going on because of incorrect non-clustered indexes, or out-of-date statistics causing a bad query plan;
 			WHEN W1.wait_type IN (N'CXPACKET', N'EXCHANGE', N'CXCONSUMER') THEN N'CPU - Parallelism'
-			-- WRITELOG = log management system waiting for a log flush to disk. Examine the IO latency for the log file
 			WHEN W1.wait_type IN (N'LOGMGR', N'LOGBUFFER', N'LOGMGR_RESERVE_APPEND', N'LOGMGR_FLUSH', N'WRITELOG') THEN N'Logging'
 			WHEN W1.wait_type IN (N'NET_WAITFOR_PACKET',N'NETWORK_IO') THEN N'Network IO'
 			WHEN W1.wait_type = N'ASYNC_NETWORK_IO' THEN N'Client Network IO'
-			-- CMEMTHREAD =  indicates that the rate of insertion of entries into the plan cache is very high and there is contention -> http://blogs.msdn.com/b/psssql/archive/2012/12/20/how-it-works-cmemthread-and-debugging-them.aspx
-			-- SOS_RESERVEDMEMBLOCKLIST = look for procedures with a large number of parameters, or queries with a long list of expression values specified in an IN clause, which would require multi-page allocations
 			WHEN W1.wait_type IN (N'UTIL_PAGE_ALLOC',N'SOS_VIRTUALMEMORY_LOW',N'CMEMTHREAD', N'SOS_RESERVEDMEMBLOCKLIST') THEN N'Memory' 
-			-- RESOURCE_SEMAPHORE_SMALL_QUERY or RESOURCE_SEMAPHORE = queries are waiting for execution memory. Look for plans with excessive hashing or sorts.
 			WHEN W1.wait_type IN (N'RESOURCE_SEMAPHORE_SMALL_QUERY', N'RESOURCE_SEMAPHORE') THEN N'Memory - Hash or Sort'
-			-- RESOURCE_SEMAPHORE_QUERY_COMPILE = usually high compilation or recompilation scenario (higher ratio of prepared plans vs. compiled plans). On x64 usually memory hungry queries and compiles. On x86 perhaps short on VAS. -> http://technet.microsoft.com/en-us/library/cc293620.aspx
 			WHEN W1.wait_type LIKE N'RESOURCE_SEMAPHORE_%' OR W1.wait_type LIKE N'RESOURCE_SEMAPHORE_QUERY_COMPILE' THEN N'Memory - Compilation'
 			WHEN W1.wait_type LIKE N'CLR_%' OR W1.wait_type LIKE N'SQLCLR%' THEN N'CLR'
-			-- DBMIRROR_DBM_MUTEX = indicates contention for the send buffer that database mirroring shares between all the mirroring sessions. 
 			WHEN W1.wait_type LIKE N'DBMIRROR%' OR W1.wait_type = N'MIRROR_SEND_MESSAGE' THEN N'Mirroring'
 			WHEN W1.wait_type LIKE N'RESOURCE_SEMAPHORE_%' OR W1.wait_type LIKE N'RESOURCE_SEMAPHORE_QUERY_COMPILE' THEN N'Compilation' 
 			WHEN W1.wait_type LIKE N'XACT%' OR W1.wait_type LIKE N'DTC_%' OR W1.wait_type LIKE N'TRAN_MARKLATCH_%' OR W1.wait_type LIKE N'MSQL_XACT_%' OR W1.wait_type = N'TRANSACTION_MUTEX' THEN N'Transaction'
@@ -8229,15 +8234,24 @@ WHERE (cntr_type = 272696576 OR cntr_type = 1073874176 OR cntr_type = 1073939712
 			WHEN W1.wait_type LIKE N'COLUMNSTORE%' THEN N'Columnstore'
 		ELSE N'Other' END AS 'wait_category'
 	FROM Waits AS W1 INNER JOIN Waits AS W2 ON W2.rn <= W1.rn
-	GROUP BY W1.rn, W1.wait_type, W1.wait_time_s, W1.pct, W1.signal_wait_time_s, W1.resource_wait_time_s, W1.signal_wait_pct, W1.resource_wait_pct
-	HAVING W1.wait_time_s >= 0.01 AND (SUM(W2.pct)-W1.pct) < 100  -- percentage threshold
+	GROUP BY W1.rn, W1.wait_type, CAST(W1.wait_time_s AS DECIMAL(14, 2)), CAST(W1.pct AS DECIMAL(14, 2)), CAST(W1.signal_wait_time_s AS DECIMAL(14, 2)), CAST(W1.resource_wait_time_s AS DECIMAL(14, 2)), CAST(W1.signal_wait_pct AS DECIMAL(14, 2)), CAST(W1.resource_wait_pct AS DECIMAL(14, 2))
+	HAVING CAST(W1.wait_time_s as DECIMAL(14, 2)) >= 0.01 AND (SUM(W2.pct)-CAST(W1.pct AS DECIMAL(14, 2))) < 100  -- percentage threshold
 	ORDER BY W1.rn;
 
+	-- ACCESS_METHODS_HOBT_VIRTUAL_ROOT = This latch is used to access the metadata for an index that contains the page ID of the index's root page. Contention on this latch can occur when a B-tree root page split occurs (requiring the latch in EX mode) and threads wanting to navigate down the B-tree (requiring the latch in SH mode) have to wait. This could be from very fast population of a small index using many concurrent connections, with or without page splits from random key values causing cascading page splits (from leaf to root).
+	-- ACCESS_METHODS_HOBT_COUNT = This latch is used to flush out page and row count deltas for a HoBt (Heap-or-B-tree) to the Storage Engine metadata tables. Contention would indicate *lots* of small, concurrent DML operations on a single table. 
+	-- ACCESS_METHODS_DATASET_PARENT and ACCESS_METHODS_SCAN_RANGE_GENERATOR = These two latches are used during parallel scans to give each thread a range of page IDs to scan. The LATCH_XX waits for these latches will typically appear with CXPACKET waits and PAGEIOLATCH_XX waits (if the data being scanned is not memory-resident). Use normal parallelism troubleshooting methods to investigate further (e.g. is the parallelism warranted? maybe increase 'cost threshold for parallelism', lower MAXDOP, use a MAXDOP hint, use Resource Governor to limit DOP using a workload group with a MAX_DOP limit. Did a plan change from index seeks to parallel table scans because a tipping point was reached or a plan recompiled with an atypical SP parameter or poor statistics? Do NOT knee-jerk and set server MAXDOP to 1 – that's some of the worst advice I see on the Internet.);
+	-- NESTING_TRANSACTION_FULL  = This latch, along with NESTING_TRANSACTION_READONLY, is used to control access to transaction description structures (called an XDES) for parallel nested transactions. The _FULL is for a transaction that's 'active', i.e. it's changed the database (usually for an index build/rebuild), and that makes the _READONLY description obvious. A query that involves a parallel operator must start a sub-transaction for each parallel thread that is used – these transactions are sub-transactions of the parallel nested transaction. For contention on these, I'd investigate unwanted parallelism but I don't have a definite "it's usually this problem". Also check out the comments for some info about these also sometimes being a problem when RCSI is used.
+	-- LOG_MANAGER = you see this latch it is almost certainly because a transaction log is growing because it could not clear/truncate for some reason. Find the database where the log is growing and then figure out what's preventing log clearing using sys.databases.
+	-- DBCC_MULTIOBJECT_SCANNER  = This latch appears on Enterprise Edition when DBCC CHECK_ commands are allowed to run in parallel. It is used by threads to request the next data file page to process. Late last year this was identified as a major contention point inside DBCC CHECK* and there was work done to reduce the contention and make DBCC CHECK* run faster.
+	-- http://blogs.msdn.com/b/psssql/archive/2012/02/23/a-faster-checkdb-part-ii.aspx
+	-- FGCB_ADD_REMOVE = FGCB stands for File Group Control Block. This latch is required whenever a file is added or dropped from the filegroup, whenever a file is grown (manually or automatically), when recalculating proportional-fill weightings, and when cycling through the files in the filegroup as part of round-robin allocation. If you're seeing this, the most common cause is that there's a lot of file auto-growth happening. It could also be from a filegroup with lots of file (e.g. the primary filegroup in tempdb) where there are thousands of concurrent connections doing allocations. The proportional-fill weightings are recalculated every 8192 allocations, so there's the possibility of a slowdown with frequent recalculations over many files.
+
 	;WITH cteLatches1 (latch_class,wait_time_ms,waiting_requests_count) AS (SELECT latch_class,wait_time_ms,waiting_requests_count FROM #tblLatches WHERE [retrieval_time] = @minctr),
 		cteLatches2 (latch_class,wait_time_ms,waiting_requests_count) AS (SELECT latch_class,wait_time_ms,waiting_requests_count FROM #tblLatches WHERE [retrieval_time] = @maxctr)
 	INSERT INTO #tblFinalLatches
 	SELECT DISTINCT t1.latch_class,
-			(t2.wait_time_ms-t1.wait_time_ms) / 1000.0 AS wait_time_s,
+			CAST((t2.wait_time_ms-t1.wait_time_ms) / 1000.0 AS DECIMAL(14, 2)) AS wait_time_s,
 			(t2.waiting_requests_count-t1.waiting_requests_count) AS waiting_requests_count,
 			100.0 * (t2.wait_time_ms-t1.wait_time_ms) / SUM(t2.wait_time_ms-t1.wait_time_ms) OVER() AS pct,
 			ROW_NUMBER() OVER(ORDER BY t1.wait_time_ms DESC) AS rn
@@ -8245,12 +8259,12 @@ WHERE (cntr_type = 272696576 OR cntr_type = 1073874176 OR cntr_type = 1073939712
 	GROUP BY t1.latch_class, t1.wait_time_ms, t2.wait_time_ms, t1.waiting_requests_count, t2.waiting_requests_count
 	HAVING (t2.wait_time_ms-t1.wait_time_ms) > 0
 	ORDER BY wait_time_s DESC;
-
+	
 	SELECT 'Performance_checks' AS [Category], 'Latches_Last_' + CONVERT(VARCHAR(3), @duration) + 's' AS [Information], W1.latch_class, 
-		CAST(W1.wait_time_s AS DECIMAL(14, 2)) AS wait_time_s,
+		W1.wait_time_s,
 		W1.waiting_requests_count,
-		CAST (W1.pct AS DECIMAL(14, 2)) AS pct,
-		CAST(SUM(W1.pct) AS DECIMAL(12, 2)) AS overall_running_pct,
+		CAST(W1.pct AS DECIMAL(14, 2)) AS pct,
+		CAST(SUM(W2.pct) AS DECIMAL(14, 2)) AS overall_running_pct,
 		CAST ((W1.wait_time_s / W1.waiting_requests_count) AS DECIMAL (14, 4)) AS avg_wait_s,
 	CASE WHEN W1.latch_class LIKE N'ACCESS_METHODS_HOBT_COUNT' 
 			OR W1.latch_class LIKE N'ACCESS_METHODS_HOBT_VIRTUAL_ROOT' THEN N'[HoBT - Metadata]'
@@ -8265,12 +8279,12 @@ WHERE (cntr_type = 272696576 OR cntr_type = 1073874176 OR cntr_type = 1073939712
 		WHEN W1.latch_class LIKE N'BUFFER' THEN N'[Buffer Pool]'
 		ELSE N'[Other]' END AS 'latch_category'
 	FROM #tblFinalLatches AS W1 INNER JOIN #tblFinalLatches AS W2 ON W2.rn <= W1.rn
-	GROUP BY W1.rn, W1.latch_class, W1.wait_time_s, W1.waiting_requests_count, W1.pct
-	HAVING SUM (W2.pct) - W1.pct < 100; -- percentage threshold
+	GROUP BY W1.rn, W1.latch_class, W1.wait_time_s, W1.waiting_requests_count, CAST(W1.pct AS DECIMAL(14, 2))
+	HAVING SUM(W2.pct) - CAST(W1.pct AS DECIMAL(14, 2)) < 100; -- percentage threshold
 	
 	;WITH Latches AS
 		(SELECT latch_class,
-			 wait_time_ms / 1000.0 AS wait_time_s,
+			 CAST(wait_time_ms / 1000.0 AS DECIMAL(14, 2)) AS wait_time_s,
 			 waiting_requests_count,
 			 100.0 * wait_time_ms / SUM(wait_time_ms) OVER() AS pct,
 			 ROW_NUMBER() OVER(ORDER BY wait_time_ms DESC) AS rn
@@ -8279,27 +8293,19 @@ WHERE (cntr_type = 272696576 OR cntr_type = 1073874176 OR cntr_type = 1073939712
 				AND*/ wait_time_ms > 0
 		)
 	SELECT 'Performance_checks' AS [Category], 'Cumulative_Latches' AS [Information], W1.latch_class, 
-		CAST(W1.wait_time_s AS DECIMAL(14, 2)) AS wait_time_s,
+		W1.wait_time_s,
 		W1.waiting_requests_count,
 		CAST(W1.pct AS DECIMAL(14, 2)) AS pct,
-		CAST(SUM(W1.pct) AS DECIMAL(12, 2)) AS overall_running_pct,
+		CAST(SUM(W2.pct) AS DECIMAL(14, 2)) AS overall_running_pct,
 		CAST((W1.wait_time_s / W1.waiting_requests_count) AS DECIMAL (14, 4)) AS avg_wait_s,
-			-- ACCESS_METHODS_HOBT_VIRTUAL_ROOT = This latch is used to access the metadata for an index that contains the page ID of the index's root page. Contention on this latch can occur when a B-tree root page split occurs (requiring the latch in EX mode) and threads wanting to navigate down the B-tree (requiring the latch in SH mode) have to wait. This could be from very fast population of a small index using many concurrent connections, with or without page splits from random key values causing cascading page splits (from leaf to root).
-			-- ACCESS_METHODS_HOBT_COUNT = This latch is used to flush out page and row count deltas for a HoBt (Heap-or-B-tree) to the Storage Engine metadata tables. Contention would indicate *lots* of small, concurrent DML operations on a single table. 
 		CASE WHEN W1.latch_class LIKE N'ACCESS_METHODS_HOBT_COUNT' 
 			OR W1.latch_class LIKE N'ACCESS_METHODS_HOBT_VIRTUAL_ROOT' THEN N'[HoBT - Metadata]'
-			-- ACCESS_METHODS_DATASET_PARENT and ACCESS_METHODS_SCAN_RANGE_GENERATOR = These two latches are used during parallel scans to give each thread a range of page IDs to scan. The LATCH_XX waits for these latches will typically appear with CXPACKET waits and PAGEIOLATCH_XX waits (if the data being scanned is not memory-resident). Use normal parallelism troubleshooting methods to investigate further (e.g. is the parallelism warranted? maybe increase 'cost threshold for parallelism', lower MAXDOP, use a MAXDOP hint, use Resource Governor to limit DOP using a workload group with a MAX_DOP limit. Did a plan change from index seeks to parallel table scans because a tipping point was reached or a plan recompiled with an atypical SP parameter or poor statistics? Do NOT knee-jerk and set server MAXDOP to 1 – that's some of the worst advice I see on the Internet.);
-			-- NESTING_TRANSACTION_FULL  = This latch, along with NESTING_TRANSACTION_READONLY, is used to control access to transaction description structures (called an XDES) for parallel nested transactions. The _FULL is for a transaction that's 'active', i.e. it's changed the database (usually for an index build/rebuild), and that makes the _READONLY description obvious. A query that involves a parallel operator must start a sub-transaction for each parallel thread that is used – these transactions are sub-transactions of the parallel nested transaction. For contention on these, I'd investigate unwanted parallelism but I don't have a definite "it's usually this problem". Also check out the comments for some info about these also sometimes being a problem when RCSI is used.
 			WHEN W1.latch_class LIKE N'ACCESS_METHODS_DATASET_PARENT' 
 				OR W1.latch_class LIKE N'ACCESS_METHODS_SCAN_RANGE_GENERATOR' 
 				OR W1.latch_class LIKE N'NESTING_TRANSACTION_FULL' THEN N'[Parallelism]'
-			-- LOG_MANAGER = you see this latch it is almost certainly because a transaction log is growing because it could not clear/truncate for some reason. Find the database where the log is growing and then figure out what's preventing log clearing using sys.databases.
 			WHEN W1.latch_class LIKE N'LOG_MANAGER' THEN N'[IO - Log]'
 			WHEN W1.latch_class LIKE N'TRACE_CONTROLLER' THEN N'[Trace]'
-			-- DBCC_MULTIOBJECT_SCANNER  = This latch appears on Enterprise Edition when DBCC CHECK_ commands are allowed to run in parallel. It is used by threads to request the next data file page to process. Late last year this was identified as a major contention point inside DBCC CHECK* and there was work done to reduce the contention and make DBCC CHECK* run faster.
-			-- http://blogs.msdn.com/b/psssql/archive/2012/02/23/a-faster-checkdb-part-ii.aspx
 			WHEN W1.latch_class LIKE N'DBCC_MULTIOBJECT_SCANNER ' THEN N'[Parallelism - DBCC CHECK_]'
-			-- FGCB_ADD_REMOVE = FGCB stands for File Group Control Block. This latch is required whenever a file is added or dropped from the filegroup, whenever a file is grown (manually or automatically), when recalculating proportional-fill weightings, and when cycling through the files in the filegroup as part of round-robin allocation. If you're seeing this, the most common cause is that there's a lot of file auto-growth happening. It could also be from a filegroup with lots of file (e.g. the primary filegroup in tempdb) where there are thousands of concurrent connections doing allocations. The proportional-fill weightings are recalculated every 8192 allocations, so there's the possibility of a slowdown with frequent recalculations over many files.
 			WHEN W1.latch_class LIKE N'FGCB_ADD_REMOVE' THEN N'[IO Operations]'
 			WHEN W1.latch_class LIKE N'DATABASE_MIRRORING_CONNECTION ' THEN N'[Mirroring - Busy]'
 			WHEN W1.latch_class LIKE N'BUFFER' THEN N'[Buffer Pool - PAGELATCH or PAGEIOLATCH]'
@@ -8307,12 +8313,12 @@ WHERE (cntr_type = 272696576 OR cntr_type = 1073874176 OR cntr_type = 1073939712
 	FROM Latches AS W1
 	INNER JOIN Latches AS W2
 		ON W2.rn <= W1.rn
-	GROUP BY W1.rn, W1.latch_class, W1.wait_time_s, W1.waiting_requests_count, W1.pct
-	HAVING SUM (W2.pct) - W1.pct < 100; -- percentage threshold
+	GROUP BY W1.rn, W1.latch_class, W1.wait_time_s, W1.waiting_requests_count, CAST(W1.pct AS DECIMAL(14, 2))
+	HAVING SUM(W2.pct) - CAST(W1.pct AS DECIMAL(14, 2)) < 100; -- percentage threshold
 	
 	;WITH Latches AS
 		(SELECT latch_class,
-			 wait_time_ms / 1000.0 AS wait_time_s,
+			 CAST(wait_time_ms / 1000.0 AS DECIMAL(14, 2)) AS wait_time_s,
 			 waiting_requests_count,
 			 100.0 * wait_time_ms / SUM(wait_time_ms) OVER() AS pct,
 			 ROW_NUMBER() OVER(ORDER BY wait_time_ms DESC) AS rn
@@ -8321,27 +8327,19 @@ WHERE (cntr_type = 272696576 OR cntr_type = 1073874176 OR cntr_type = 1073939712
 				AND wait_time_ms > 0
 		)
 	SELECT 'Performance_checks' AS [Category], 'Cumulative_Latches_wo_BUFFER' AS [Information], W1.latch_class, 
-		CAST(W1.wait_time_s AS DECIMAL(14, 2)) AS wait_time_s,
+		W1.wait_time_s,
 		W1.waiting_requests_count,
 		CAST(W1.pct AS DECIMAL(14, 2)) AS pct,
-		CAST(SUM(W1.pct) AS DECIMAL(12, 2)) AS overall_running_pct,
+		CAST(SUM(W2.pct) AS DECIMAL(14, 2)) AS overall_running_pct,
 		CAST((W1.wait_time_s / W1.waiting_requests_count) AS DECIMAL (14, 4)) AS avg_wait_s,
-			-- ACCESS_METHODS_HOBT_VIRTUAL_ROOT = This latch is used to access the metadata for an index that contains the page ID of the index's root page. Contention on this latch can occur when a B-tree root page split occurs (requiring the latch in EX mode) and threads wanting to navigate down the B-tree (requiring the latch in SH mode) have to wait. This could be from very fast population of a small index using many concurrent connections, with or without page splits from random key values causing cascading page splits (from leaf to root).
-			-- ACCESS_METHODS_HOBT_COUNT = This latch is used to flush out page and row count deltas for a HoBt (Heap-or-B-tree) to the Storage Engine metadata tables. Contention would indicate *lots* of small, concurrent DML operations on a single table. 
 		CASE WHEN W1.latch_class LIKE N'ACCESS_METHODS_HOBT_COUNT' 
 			OR W1.latch_class LIKE N'ACCESS_METHODS_HOBT_VIRTUAL_ROOT' THEN N'[HoBT - Metadata]'
-			-- ACCESS_METHODS_DATASET_PARENT and ACCESS_METHODS_SCAN_RANGE_GENERATOR = These two latches are used during parallel scans to give each thread a range of page IDs to scan. The LATCH_XX waits for these latches will typically appear with CXPACKET waits and PAGEIOLATCH_XX waits (if the data being scanned is not memory-resident). Use normal parallelism troubleshooting methods to investigate further (e.g. is the parallelism warranted? maybe increase 'cost threshold for parallelism', lower MAXDOP, use a MAXDOP hint, use Resource Governor to limit DOP using a workload group with a MAX_DOP limit. Did a plan change from index seeks to parallel table scans because a tipping point was reached or a plan recompiled with an atypical SP parameter or poor statistics? Do NOT knee-jerk and set server MAXDOP to 1 – that's some of the worst advice I see on the Internet.);
-			-- NESTING_TRANSACTION_FULL  = This latch, along with NESTING_TRANSACTION_READONLY, is used to control access to transaction description structures (called an XDES) for parallel nested transactions. The _FULL is for a transaction that's 'active', i.e. it's changed the database (usually for an index build/rebuild), and that makes the _READONLY description obvious. A query that involves a parallel operator must start a sub-transaction for each parallel thread that is used – these transactions are sub-transactions of the parallel nested transaction. For contention on these, I'd investigate unwanted parallelism but I don't have a definite "it's usually this problem". Also check out the comments for some info about these also sometimes being a problem when RCSI is used.
 			WHEN W1.latch_class LIKE N'ACCESS_METHODS_DATASET_PARENT' 
 				OR W1.latch_class LIKE N'ACCESS_METHODS_SCAN_RANGE_GENERATOR' 
 				OR W1.latch_class LIKE N'NESTING_TRANSACTION_FULL' THEN N'[Parallelism]'
-			-- LOG_MANAGER = you see this latch it is almost certainly because a transaction log is growing because it could not clear/truncate for some reason. Find the database where the log is growing and then figure out what's preventing log clearing using sys.databases.
 			WHEN W1.latch_class LIKE N'LOG_MANAGER' THEN N'[IO - Log]'
 			WHEN W1.latch_class LIKE N'TRACE_CONTROLLER' THEN N'[Trace]'
-			-- DBCC_MULTIOBJECT_SCANNER  = This latch appears on Enterprise Edition when DBCC CHECK_ commands are allowed to run in parallel. It is used by threads to request the next data file page to process. Late last year this was identified as a major contention point inside DBCC CHECK* and there was work done to reduce the contention and make DBCC CHECK* run faster.
-			-- http://blogs.msdn.com/b/psssql/archive/2012/02/23/a-faster-checkdb-part-ii.aspx
 			WHEN W1.latch_class LIKE N'DBCC_MULTIOBJECT_SCANNER ' THEN N'[Parallelism - DBCC CHECK_]'
-			-- FGCB_ADD_REMOVE = FGCB stands for File Group Control Block. This latch is required whenever a file is added or dropped from the filegroup, whenever a file is grown (manually or automatically), when recalculating proportional-fill weightings, and when cycling through the files in the filegroup as part of round-robin allocation. If you're seeing this, the most common cause is that there's a lot of file auto-growth happening. It could also be from a filegroup with lots of file (e.g. the primary filegroup in tempdb) where there are thousands of concurrent connections doing allocations. The proportional-fill weightings are recalculated every 8192 allocations, so there's the possibility of a slowdown with frequent recalculations over many files.
 			WHEN W1.latch_class LIKE N'FGCB_ADD_REMOVE' THEN N'[IO Operations]'
 			WHEN W1.latch_class LIKE N'DATABASE_MIRRORING_CONNECTION ' THEN N'[Mirroring - Busy]'
 			WHEN W1.latch_class LIKE N'BUFFER' THEN N'[Buffer Pool - PAGELATCH or PAGEIOLATCH]'
@@ -8349,8 +8347,8 @@ WHERE (cntr_type = 272696576 OR cntr_type = 1073874176 OR cntr_type = 1073939712
 	FROM Latches AS W1
 	INNER JOIN Latches AS W2
 		ON W2.rn <= W1.rn
-	GROUP BY W1.rn, W1.latch_class, W1.wait_time_s, W1.waiting_requests_count, W1.pct
-	HAVING SUM (W2.pct) - W1.pct < 100; -- percentage threshold
+	GROUP BY W1.rn, W1.latch_class, W1.wait_time_s, W1.waiting_requests_count, CAST(W1.pct AS DECIMAL(14, 2))
+	HAVING SUM(W2.pct) - CAST(W1.pct AS DECIMAL(14, 2)) < 100; -- percentage threshold
 
 	;WITH cteSpinlocks1 AS (SELECT name, collisions, spins, spins_per_collision, sleep_time, backoffs FROM #tblSpinlocksBefore),
 		cteSpinlocks2 AS (SELECT name, collisions, spins, spins_per_collision, sleep_time, backoffs FROM #tblSpinlocksAfter)
@@ -8371,10 +8369,10 @@ WHERE (cntr_type = 272696576 OR cntr_type = 1073874176 OR cntr_type = 1073939712
 	SELECT 'Performance_checks' AS [Category], 'Spinlocks_Last_' + CONVERT(VARCHAR(3), @duration) + 's' AS [Information], S1.name, 
 		S1.collisions, S1.spins, S1.spins_per_collision, S1.sleep_time, S1.backoffs,
 		CAST(S1.spins_pct AS DECIMAL(14, 2)) AS spins_pct,
-		CAST(SUM(S1.spins_pct) AS DECIMAL(12, 2)) AS overall_running_spins_pct
+		CAST(SUM(S2.spins_pct) AS DECIMAL(14, 2)) AS overall_running_spins_pct
 	FROM #tblFinalSpinlocks AS S1 INNER JOIN #tblFinalSpinlocks AS S2 ON S2.rn <= S1.rn
 	GROUP BY S1.rn, S1.name, S1.collisions, S1.spins, S1.spins_per_collision, S1.sleep_time, S1.backoffs, S1.spins_pct
-	HAVING SUM(S2.spins_pct) - S1.spins_pct < 100 -- percentage threshold
+	HAVING CAST(SUM(S2.spins_pct) AS DECIMAL(14, 2)) - CAST(S1.spins_pct AS DECIMAL(14, 2)) < 100 -- percentage threshold
 	ORDER BY spins DESC;
 END;
 

+ 34 - 23
MaintenanceSolution/2_usp_AdaptiveIndexDefrag.sql

@@ -679,7 +679,10 @@ v1.6.6.4 - 6/25/2018 - Tested with Azure SQL Managed Instance;
 						Added extra debug output.
 v1.6.6.5 - 9/23/2018 - Fixed issue where table that is compressed would become uncompressed (by d-moloney);
 						Extended row mode counter info data type in debug mode (by d-moloney);
-						Fixed issue with @statsThreshold and large tables (by AndrewG2)
+						Fixed issue with @statsThreshold and large tables (by AndrewG2).
+v1.6.6.6 - 10/28/2018 - Extended 2nd row mode counter info data type in debug mode (by CodyFitzpatrick);
+						Fixed compression data missing in working table (by ravseer).
+
 IMPORTANT:
 Execute in the database context of where you created the log and working tables.			
 										
@@ -1240,7 +1243,7 @@ BEGIN SET @hasIXsOUT = 1 END ELSE BEGIN SET @hasIXsOUT = 0 END'
 				, @currCompression NVARCHAR(60)
 
 		/* Initialize variables */	
-		SELECT @AID_dbID = DB_ID(), @startDateTime = GETDATE(), @endDateTime = DATEADD(minute, @timeLimit, GETDATE()), @operationFlag = NULL, @ver = '1.6.6.5';
+		SELECT @AID_dbID = DB_ID(), @startDateTime = GETDATE(), @endDateTime = DATEADD(minute, @timeLimit, GETDATE()), @operationFlag = NULL, @ver = '1.6.6.6';
 	
 		/* Create temporary tables */	
 		IF EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.#tblIndexDefragDatabaseList'))
@@ -1712,9 +1715,6 @@ OPTION (MAXDOP 2)'
 						WHERE objectID = @objectID AND indexID = @indexID AND partitionNumber = @partitionNumber
 					END
 				END;
-				
-				IF @debugMode = 1
-				RAISERROR('    Looking up additional index information...', 0, 42) WITH NOWAIT;
 
 				/* Look up index status for various purposes */	
 				SELECT @updateSQL = N'UPDATE ids		
@@ -1727,27 +1727,38 @@ WHERE o.object_id = ids.objectID AND i.index_id = ids.indexID AND i.type > 0
 AND o.object_id NOT IN (SELECT sit.object_id FROM [' + DB_NAME(@dbID) + '].sys.internal_tables AS sit)
 AND ids.[dbID] = ' + CAST(@dbID AS NVARCHAR(10));
 
+				IF @debugMode = 1
+				BEGIN
+					RAISERROR('    Looking up additional index information (pass 1)...', 0, 42) WITH NOWAIT;
+					--PRINT @updateSQL
+				END
+				
 				EXECUTE sp_executesql @updateSQL;
 				
-				IF @scanMode = 'LIMITED'
+				IF @sqlmajorver = 9
 				BEGIN
-					IF @sqlmajorver = 9
-					BEGIN
-						SELECT @updateSQL = N'UPDATE ids
-	SET [record_count] = [rows], [compression_type] = N''''
-	FROM [' + DB_NAME(@AID_dbID) + '].dbo.tbl_AdaptiveIndexDefrag_Working ids WITH (NOLOCK)
-	INNER JOIN [' + DB_NAME(@dbID) + '].sys.partitions AS p WITH (NOLOCK) ON ids.objectID = p.[object_id] AND ids.indexID = p.index_id AND ids.partitionNumber = p.partition_number
-	WHERE ids.[dbID] = ' + CAST(@dbID AS NVARCHAR(10));
-					END
-					ELSE
-					BEGIN
-						SELECT @updateSQL = N'UPDATE ids
-	SET [record_count] = [rows], [compression_type] = [data_compression_desc] END
-	FROM [' + DB_NAME(@AID_dbID) + '].dbo.tbl_AdaptiveIndexDefrag_Working ids WITH (NOLOCK)
-	INNER JOIN [' + DB_NAME(@dbID) + '].sys.partitions AS p WITH (NOLOCK) ON ids.objectID = p.[object_id] AND ids.indexID = p.index_id AND ids.partitionNumber = p.partition_number
-	WHERE ids.[dbID] = ' + CAST(@dbID AS NVARCHAR(10));
-					END
+					SELECT @updateSQL = N'UPDATE ids
+SET [record_count] = [rows], [compression_type] = N''''
+FROM [' + DB_NAME(@AID_dbID) + '].dbo.tbl_AdaptiveIndexDefrag_Working ids WITH (NOLOCK)
+INNER JOIN [' + DB_NAME(@dbID) + '].sys.partitions AS p WITH (NOLOCK) ON ids.objectID = p.[object_id] AND ids.indexID = p.index_id AND ids.partitionNumber = p.partition_number
+WHERE ids.[dbID] = ' + CAST(@dbID AS NVARCHAR(10));
+				END
+				ELSE
+				BEGIN
+					SELECT @updateSQL = N'UPDATE ids
+SET [record_count] = [rows], [compression_type] = [data_compression_desc]
+FROM [' + DB_NAME(@AID_dbID) + '].dbo.tbl_AdaptiveIndexDefrag_Working ids WITH (NOLOCK)
+INNER JOIN [' + DB_NAME(@dbID) + '].sys.partitions AS p WITH (NOLOCK) ON ids.objectID = p.[object_id] AND ids.indexID = p.index_id AND ids.partitionNumber = p.partition_number
+WHERE ids.[dbID] = ' + CAST(@dbID AS NVARCHAR(10));
 				END
+
+				IF @debugMode = 1
+				BEGIN
+					RAISERROR('    Looking up additional index information (pass 2)...', 0, 42) WITH NOWAIT;
+					--PRINT @updateSQL
+				END
+				
+				EXECUTE sp_executesql @updateSQL;
 				
 				IF @debugMode = 1
 				RAISERROR('    Looking up additional statistic information...', 0, 42) WITH NOWAIT;
@@ -2774,7 +2785,7 @@ WHERE system_type_id IN (34, 35, 99) ' + CASE WHEN @sqlmajorver < 11 THEN 'OR ma
 
 				IF @debugMode = 1
 				BEGIN
-					SELECT @debugMessage = '     Found a row modification counter of ' + CONVERT(NVARCHAR(10), @rowmodctr) + ' and ' + CONVERT(NVARCHAR(10), CASE WHEN @rows IS NOT NULL AND @rows < @record_count THEN @rows ELSE @record_count END) + ' rows' + CASE WHEN @stats_isincremental = 1 THEN ' on partition ' + CONVERT(NVARCHAR(10), @partitionNumber) ELSE '' END + '...';
+					SELECT @debugMessage = '     Found a row modification counter of ' + CONVERT(NVARCHAR(15), @rowmodctr) + ' and ' + CONVERT(NVARCHAR(15), CASE WHEN @rows IS NOT NULL AND @rows < @record_count THEN @rows ELSE @record_count END) + ' rows' + CASE WHEN @stats_isincremental = 1 THEN ' on partition ' + CONVERT(NVARCHAR(15), @partitionNumber) ELSE '' END + '...';
 					RAISERROR(@debugMessage, 0, 42) WITH NOWAIT;
 					--select @debugMessage
 				END

+ 143 - 0
SQLOps_Ext/view_CompressionGains_singleDS.sql

@@ -0,0 +1,143 @@
+-- 2010-09-22 Pedro Lopes (Microsoft) [email protected] (http://aka.ms/sqlinsights)
+--
+-- 2013-12-03 Fixed divide by zero error
+--
+-- Recomends type of compression per object - all more trustworthy as instance uptime increases.
+--
+-- [Percent_Update]
+-- The percentage of update operations on a specific table, index, or partition, relative to total operations on that object. The lower the value of U (that is, the table, index, or partition is infrequently updated), the better candidate it is for page compression. 
+--
+-- [Percent_Scan]
+-- The percentage of scan operations on a table, index, or partition, relative to total operations on that object. The higher the value of Scan (that is, the table, index, or partition is mostly scanned), the better candidate it is for page compression.
+--
+-- [Compression_Type_Recommendation] - READ DataCompression Best Practises before implementing.
+-- When ? means ROW if object suffers mainly UPDATES, PAGE if mainly INSERTS
+-- When NO_GAIN means that according to sp_estimate_data_compression_savings no space gains will be attained when compressing.
+--
+-- based on Data Compression Whitepaper at http://msdn.microsoft.com/en-us/library/dd894051(SQL.100).aspx
+--
+-- General algorithm validated by Paul Randall IF ENOUGH CPU AND RAM AVAILABLE.
+-- 
+SET NOCOUNT ON;
+
+CREATE TABLE ##tmpCompression ([Schema] sysname,
+	[Table_Name] sysname,
+	[Index_Name] sysname NULL,
+	[Partition] int,
+	[Index_ID] int,
+	[Index_Type] VARCHAR(12),
+	[Percent_Scan] smallint,
+	[Percent_Update] smallint,
+	[ROW_estimate_Pct_of_orig] smallint,
+	[PAGE_estimate_Pct_of_orig] smallint,
+	[Compression_Type_Recommendation] VARCHAR(7)
+);
+
+CREATE TABLE ##tmpEstimateRow (
+	objname sysname,
+	schname sysname,
+	indid int,
+	partnr int,
+	size_cur bigint,
+	size_req bigint,
+	sample_cur bigint,
+	sample_req bigint
+);
+
+CREATE TABLE ##tmpEstimatePage (
+	objname sysname,
+	schname sysname,
+	indid int,
+	partnr int,
+	size_cur bigint,
+	size_req bigint,
+	sample_cur bigint,
+	sample_req bigint
+);
+
+INSERT INTO ##tmpCompression ([Schema], [Table_Name], [Index_Name], [Partition], [Index_ID], [Index_Type], [Percent_Scan], [Percent_Update])
+SELECT s.name AS [Schema], o.name AS [Table_Name], x.name AS [Index_Name],
+       i.partition_number AS [Partition], i.index_id AS [Index_ID], x.type_desc AS [Index_Type],
+       i.range_scan_count * 100.0 / (i.range_scan_count + i.leaf_insert_count + i.leaf_delete_count + i.leaf_update_count + i.leaf_page_merge_count + i.singleton_lookup_count) AS [Percent_Scan],
+       i.leaf_update_count * 100.0 / (i.range_scan_count + i.leaf_insert_count + i.leaf_delete_count + i.leaf_update_count + i.leaf_page_merge_count + i.singleton_lookup_count) AS [Percent_Update]
+FROM sys.dm_db_index_operational_stats (db_id(), NULL, NULL, NULL) i
+	INNER JOIN sys.objects o ON o.object_id = i.object_id
+	INNER JOIN sys.schemas s ON o.schema_id = s.schema_id
+	INNER JOIN sys.indexes x ON x.object_id = i.object_id AND x.index_id = i.index_id
+WHERE (i.range_scan_count + i.leaf_insert_count + i.leaf_delete_count + leaf_update_count + i.leaf_page_merge_count + i.singleton_lookup_count) <> 0
+	AND objectproperty(i.object_id,'IsUserTable') = 1
+ORDER BY [Table_Name] ASC;
+
+DECLARE @schema sysname, @tbname sysname, @ixid int
+DECLARE cur CURSOR FAST_FORWARD FOR SELECT [Schema], [Table_Name], [Index_ID] FROM ##tmpCompression
+OPEN cur
+FETCH NEXT FROM cur INTO @schema, @tbname, @ixid
+WHILE @@FETCH_STATUS = 0
+BEGIN
+	--SELECT @schema, @tbname
+	INSERT INTO ##tmpEstimateRow
+	EXEC ('sp_estimate_data_compression_savings ''' + @schema + ''', ''' + @tbname + ''', ''' + @ixid + ''', NULL, ''ROW''' );
+	INSERT INTO ##tmpEstimatePage
+	EXEC ('sp_estimate_data_compression_savings ''' + @schema + ''', ''' + @tbname + ''', ''' + @ixid + ''', NULL, ''PAGE''');
+	FETCH NEXT FROM cur INTO @schema, @tbname, @ixid
+END
+CLOSE cur
+DEALLOCATE cur;
+
+--SELECT * FROM ##tmpEstimateRow
+--SELECT * FROM ##tmpEstimatePage;
+
+WITH tmp_CTE (objname, schname, indid, pct_of_orig_row, pct_of_orig_page)
+AS (SELECT tr.objname, tr.schname, tr.indid,	
+	(tr.sample_req*100)/CASE WHEN tr.sample_cur = 0 THEN 1 ELSE tr.sample_cur END AS pct_of_orig_row,
+	(tp.sample_req*100)/CASE WHEN tp.sample_cur = 0 THEN 1 ELSE tp.sample_cur END AS pct_of_orig_page
+	FROM ##tmpEstimateRow tr INNER JOIN ##tmpEstimatePage tp ON tr.objname = tp.objname
+	AND tr.schname = tp.schname AND tr.indid = tp.indid AND tr.partnr = tp.partnr)
+UPDATE ##tmpCompression
+SET [ROW_estimate_Pct_of_orig] = tcte.pct_of_orig_row, [PAGE_estimate_Pct_of_orig] = tcte.pct_of_orig_page
+FROM tmp_CTE tcte, ##tmpCompression tcomp
+WHERE tcte.objname = tcomp.Table_Name AND
+tcte.schname = tcomp.[Schema] AND
+tcte.indid = tcomp.Index_ID;
+
+WITH tmp_CTE2 (Table_Name, [Schema], Index_ID, [Compression_Type_Recommendation])
+AS (SELECT Table_Name, [Schema], Index_ID,
+	CASE WHEN [ROW_estimate_Pct_of_orig] >= 100 AND [PAGE_estimate_Pct_of_orig] >= 100 THEN 'NO_GAIN'
+		WHEN [Percent_Update] >= 10 THEN 'ROW' 
+		WHEN [Percent_Scan] <= 1 AND [Percent_Update] <= 1 AND [ROW_estimate_Pct_of_orig] < [PAGE_estimate_Pct_of_orig] THEN 'ROW'
+		WHEN [Percent_Scan] <= 1 AND [Percent_Update] <= 1 AND [ROW_estimate_Pct_of_orig] > [PAGE_estimate_Pct_of_orig] THEN 'PAGE'
+		WHEN [Percent_Scan] >= 60 AND [Percent_Update] <= 5 THEN 'PAGE'
+		WHEN [Percent_Scan] <= 35 AND [Percent_Update] <= 5 THEN '?'
+		ELSE 'ROW'
+		END
+	FROM ##tmpCompression)
+UPDATE ##tmpCompression
+SET [Compression_Type_Recommendation] = tcte2.[Compression_Type_Recommendation]
+FROM tmp_CTE2 tcte2, ##tmpCompression tcomp2
+WHERE tcte2.Table_Name = tcomp2.Table_Name AND
+tcte2.[Schema] = tcomp2.[Schema] AND
+tcte2.Index_ID = tcomp2.Index_ID;
+
+SET NOCOUNT ON;
+DECLARE @UpTime VARCHAR(12), @StartDate DATETIME, @sqlmajorver int, @sqlcmd NVARCHAR(500), @params NVARCHAR(500)
+SELECT @sqlmajorver = CONVERT(int, (@@microsoftversion / 0x1000000) & 0xff);
+
+IF @sqlmajorver = 9
+BEGIN
+	SET @sqlcmd = N'SELECT @StartDateOUT = login_time, @UpTimeOUT = DATEDIFF(mi, login_time, GETDATE()) FROM master..sysprocesses WHERE spid = 1';
+END
+ELSE
+BEGIN
+	SET @sqlcmd = N'SELECT @StartDateOUT = sqlserver_start_time, @UpTimeOUT = DATEDIFF(mi,sqlserver_start_time,GETDATE()) FROM sys.dm_os_sys_info';
+END
+
+SET @params = N'@StartDateOUT DATETIME OUTPUT, @UpTimeOUT VARCHAR(12) OUTPUT';
+
+EXECUTE sp_executesql @sqlcmd, @params, @StartDateOUT=@StartDate OUTPUT, @UpTimeOUT=@UpTime OUTPUT;
+
+SELECT @StartDate AS Collecting_Data_Since, * FROM ##tmpCompression;
+
+DROP TABLE ##tmpCompression
+DROP TABLE ##tmpEstimateRow
+DROP TABLE ##tmpEstimatePage;
+GO

+ 1 - 0
Sessions/PASS2018/README.md

@@ -0,0 +1 @@
+These are the presentations and demo files for the Tiger PASS Summit 2018 sessions.

BIN
Sessions/PASS2018/pre-con-modernizing-you-sql-server/auto_tuning.zip


BIN
Sessions/PASS2018/pre-con-modernizing-you-sql-server/containers.zip


BIN
Sessions/PASS2018/pre-con-modernizing-you-sql-server/memory_grants.zip


BIN
Sessions/PASS2018/pre-con-modernizing-you-sql-server/query_store.zip


BIN
Sessions/PASS2018/pre-con-modernizing-you-sql-server/tempdb_stress.zip


BIN
Sessions/PASS2018/pre-con-modernizing-you-sql-server/utilities.zip


BIN
Sessions/PASS2018/sql-server-modernization-done-the-right-way/SQL Server Migrations Done the Right Way.pdf


BIN
Sessions/PASS2018/tempdb-the-good-the-bad-and-the-ugly/TempDB - The Good, The Bad and The Ugly.pdf


BIN
Sessions/SQLDay-2017/Enhancements-that-will-make-your-SQL-database-engine-roar---SP1.pdf


BIN
Sessions/SQLDay-2017/Enhancements-that-will-make-your-SQL-database-engine-roar-Demos.zip


BIN
Sessions/SQLDay-2017/Gems-to-help-you-troubleshoot-query-performance-Demos.zip


BIN
Sessions/SQLDay-2017/Gems-to-help-you-troubleshoot-query-performance.pdf


BIN
Sessions/SQLIntersection-2018/Gems to Help You Troubleshoot Query Performance v2.pdf


BIN
Sessions/SQLIntersection-2018/Practical guidance to make your tier-1 SQL Server roar.pdf


BIN
Sessions/SQLIntersection-2018/SQL-Server-Upgrades-Done-the-Right-Way.pdf


+ 32 - 0
Sessions/SQLIntersection-2018/automatic-tuning/README.md

@@ -0,0 +1,32 @@
+This is a repro package to demonstrate the Automatic Tuning (Auto Plan Correction) in SQL Server 2017. 
+This feature is using telemtry from the Query Store feature we launched with Azure SQL Database and SQL Server 2016 to provide built-in intelligence.
+
+This repro assumes the following:
+
+- SQL Server 2017 installed (pick at minimum Database Engine) on Windows. This feature requires Developer or Enterprise Edition.
+- You have installed SQL Server Management Studio or SQL Operations Studio (https://docs.microsoft.com/en-us/sql/sql-operations-studio/download)
+- You have downloaded the RML Utilities from https://www.microsoft.com/en-us/download/details.aspx?id=4511.
+- These demos use a named instance called SQL2017. You will need to edit the .cmd scripts which connect to SQL Server to change to a default instance or whatever named instance you have installed.
+
+0. Install ostress from the package RML_Setup_AMD64.msi. Add C:\Program Files\Microsoft Corporation\RMLUtils to your path.
+
+1. Restore the WideWorldImporters database backup to your SQL Server 2017 instance. The WideWorldImporters can be found in https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/wide-world-importers
+
+2. Run Scenario.cmd to customize the WideWorldImporters database and start the demo. Leave it running...
+
+3. Setup Performance Monitor on Windows to track SQL Statistics/Batch Requests/sec
+
+4. While Scenario.cmd is running, run Regression.cmd (you may need to run this a few times for timing reasons). Notice the drop in batch requests/sec which shows a performance regression in your workload.
+
+5. Load recommendations.sql into SQL Server Management Studio or SQL Operations Studio and review the results. Notice the time difference under the reason column and value of state_transition_reason which should be AutomaticTuningOptionNotEnabled. This means we found a regression but are recommending it only, not automatically fixing it. The script column shows a query that could be used to fix the problem.
+
+6. Stop Scenario.cmd workload by pressing CTRL+C, and then choose "N" when prompted to terminate the batch.
+
+7. Now let's see what happens with automatic plan correction which uses this command in SQL Server 2017:
+
+ALTER DATABASE <db>
+SET AUTOMATIC_TUNING ( FORCE_LAST_GOOD_PLAN = ON )
+
+8. Run Auto_tune.cmd which uses the above command to set automatic plan correct ON for WideWorldImporters, and starts same workload as Scenario.cmd
+
+9. Repeat steps 4-6 as above. In Performance Monitor you will see the batch requests/sec dip but within a second go right back up. This is because SQL Server detected the regression and automatically reverted to "last known good" or the last known good query plan as found in the Query Store. Note in the output of recommendations.sql the state_transition_reason now says LastGoodPlanForced.

BIN
Sessions/SQLIntersection-2018/automatic-tuning/at-demo.zip


BIN
Sessions/SQLIntersection-2018/live_query_troubleshooting/live_query_troubleshooting.zip


+ 130 - 0
Sessions/SQLIntersection-2018/new_xevents/ExtendedEvents.sql

@@ -0,0 +1,130 @@
+-- LWP related extended events Demo
+
+CREATE EVENT SESSION [PerfStats_Node] ON SERVER
+ADD EVENT sqlserver.query_thread_profile(
+ACTION(sqlos.scheduler_id,sqlserver.database_id,sqlserver.is_system,sqlserver.plan_handle,sqlserver.query_hash_signed,sqlserver.query_plan_hash_signed,sqlserver.server_instance_name,sqlserver.session_id,sqlserver.session_nt_username,sqlserver.sql_text))
+--ADD TARGET package0.ring_buffer(SET max_memory=(25600))
+ADD TARGET package0.event_file(SET filename=N'C:\IP\Tiger\SQL Intersection\SQL Intersection Winter 2018\Gems to Help You Troubleshoot Query Performance\new_xevents\PerfStats_Node.xel',max_file_size=(50),max_rollover_files=(2))
+WITH (MAX_MEMORY=4096 KB,EVENT_RETENTION_MODE=ALLOW_SINGLE_EVENT_LOSS,MAX_DISPATCH_LATENCY=30 SECONDS,MAX_EVENT_SIZE=0 KB,MEMORY_PARTITION_MODE=NONE,TRACK_CAUSALITY=OFF,STARTUP_STATE=OFF)
+GO
+
+USE AdventureWorks2016CTP3
+GO
+
+DROP EVENT SESSION [PerfStats_Node] ON SERVER 
+GO
+ALTER EVENT SESSION [PerfStats_Node] ON SERVER STATE = start;  
+GO  
+
+-- Execute plan with many nodes
+SELECT e.[BusinessEntityID], 
+       p.[Title], 
+       p.[FirstName], 
+       p.[MiddleName], 
+       p.[LastName], 
+       p.[Suffix], 
+       e.[JobTitle], 
+       pp.[PhoneNumber], 
+       pnt.[Name] AS [PhoneNumberType], 
+       ea.[EmailAddress], 
+       p.[EmailPromotion], 
+       a.[AddressLine1], 
+       a.[AddressLine2], 
+       a.[City], 
+       sp.[Name] AS [StateProvinceName], 
+       a.[PostalCode], 
+       cr.[Name] AS [CountryRegionName], 
+       p.[AdditionalContactInfo] 
+FROM   [HumanResources].[Employee] AS e 
+       INNER JOIN [Person].[Person] AS p 
+       ON RTRIM(LTRIM(p.[BusinessEntityID])) = RTRIM(LTRIM(e.[BusinessEntityID])) 
+       INNER JOIN [Person].[BusinessEntityAddress] AS bea 
+       ON RTRIM(LTRIM(bea.[BusinessEntityID])) = RTRIM(LTRIM(e.[BusinessEntityID])) 
+       INNER JOIN [Person].[Address] AS a 
+       ON RTRIM(LTRIM(a.[AddressID])) = RTRIM(LTRIM(bea.[AddressID])) 
+       INNER JOIN [Person].[StateProvince] AS sp 
+       ON RTRIM(LTRIM(sp.[StateProvinceID])) = RTRIM(LTRIM(a.[StateProvinceID])) 
+       INNER JOIN [Person].[CountryRegion] AS cr 
+       ON RTRIM(LTRIM(cr.[CountryRegionCode])) = RTRIM(LTRIM(sp.[CountryRegionCode])) 
+       LEFT OUTER JOIN [Person].[PersonPhone] AS pp 
+       ON RTRIM(LTRIM(pp.BusinessEntityID)) = RTRIM(LTRIM(p.[BusinessEntityID])) 
+       LEFT OUTER JOIN [Person].[PhoneNumberType] AS pnt 
+       ON RTRIM(LTRIM(pp.[PhoneNumberTypeID])) = RTRIM(LTRIM(pnt.[PhoneNumberTypeID])) 
+       LEFT OUTER JOIN [Person].[EmailAddress] AS ea 
+       ON RTRIM(LTRIM(p.[BusinessEntityID])) = RTRIM(LTRIM(ea.[BusinessEntityID]))
+GO
+
+ALTER EVENT SESSION [PerfStats_Node] ON SERVER STATE = stop;  
+GO 
+
+-- Choose any event and let's open the associated cached plan.
+-- I want to see which operator this one is, and where in the plan it sits
+
+SELECT qp.query_plan 
+FROM sys.dm_exec_query_stats qs
+CROSS APPLY sys.dm_exec_query_plan(qs.plan_handle) qp
+WHERE CAST(qs.query_plan_hash AS BIGINT) = -832496756154281217
+GO
+
+-- How will I search for my node_id? Use showplan search in SSMS.
+
+-------------------------------
+
+-- Now for a new event. A LWP-based "query_post_execution_showplan"
+
+DROP EVENT SESSION [PerfStats_LWP_Plan] ON SERVER 
+GO
+CREATE EVENT SESSION [PerfStats_LWP_Plan] ON SERVER
+ADD EVENT sqlserver.query_plan_profile(
+ACTION(sqlos.scheduler_id,sqlserver.database_id,sqlserver.is_system,sqlserver.plan_handle,sqlserver.query_hash_signed,sqlserver.query_plan_hash_signed,sqlserver.server_instance_name,sqlserver.session_id,sqlserver.session_nt_username,sqlserver.sql_text))
+--ADD TARGET package0.ring_buffer(SET max_memory=(25600))
+ADD TARGET package0.event_file(SET filename=N'C:\IP\Tiger\SQL Intersection\SQL Intersection Winter 2018\Gems to Help You Troubleshoot Query Performance\new_xevents\PerfStats_LWP_Plan.xel',max_file_size=(50),max_rollover_files=(2))
+WITH (MAX_MEMORY=4096 KB,EVENT_RETENTION_MODE=ALLOW_SINGLE_EVENT_LOSS,MAX_DISPATCH_LATENCY=30 SECONDS,MAX_EVENT_SIZE=0 KB,MEMORY_PARTITION_MODE=NONE,TRACK_CAUSALITY=OFF,STARTUP_STATE=OFF)
+GO
+
+ALTER EVENT SESSION [PerfStats_LWP_Plan] ON SERVER STATE = start;  
+GO   
+
+-- Let's run the following query
+SELECT e.[BusinessEntityID], 
+       p.[Title], 
+       p.[FirstName], 
+       p.[MiddleName], 
+       p.[LastName], 
+       p.[Suffix], 
+       e.[JobTitle], 
+       pp.[PhoneNumber], 
+       pnt.[Name] AS [PhoneNumberType], 
+       ea.[EmailAddress], 
+       p.[EmailPromotion], 
+       a.[AddressLine1], 
+       a.[AddressLine2], 
+       a.[City], 
+       sp.[Name] AS [StateProvinceName], 
+       a.[PostalCode], 
+       cr.[Name] AS [CountryRegionName], 
+       p.[AdditionalContactInfo] 
+FROM   [HumanResources].[Employee] AS e 
+       INNER JOIN [Person].[Person] AS p 
+       ON RTRIM(LTRIM(p.[BusinessEntityID])) = RTRIM(LTRIM(e.[BusinessEntityID])) 
+       INNER JOIN [Person].[BusinessEntityAddress] AS bea 
+       ON RTRIM(LTRIM(bea.[BusinessEntityID])) = RTRIM(LTRIM(e.[BusinessEntityID])) 
+       INNER JOIN [Person].[Address] AS a 
+       ON RTRIM(LTRIM(a.[AddressID])) = RTRIM(LTRIM(bea.[AddressID])) 
+       INNER JOIN [Person].[StateProvince] AS sp 
+       ON RTRIM(LTRIM(sp.[StateProvinceID])) = RTRIM(LTRIM(a.[StateProvinceID])) 
+       INNER JOIN [Person].[CountryRegion] AS cr 
+       ON RTRIM(LTRIM(cr.[CountryRegionCode])) = RTRIM(LTRIM(sp.[CountryRegionCode])) 
+       LEFT OUTER JOIN [Person].[PersonPhone] AS pp 
+       ON RTRIM(LTRIM(pp.BusinessEntityID)) = RTRIM(LTRIM(p.[BusinessEntityID])) 
+       LEFT OUTER JOIN [Person].[PhoneNumberType] AS pnt 
+       ON RTRIM(LTRIM(pp.[PhoneNumberTypeID])) = RTRIM(LTRIM(pnt.[PhoneNumberTypeID])) 
+       LEFT OUTER JOIN [Person].[EmailAddress] AS ea 
+       ON RTRIM(LTRIM(p.[BusinessEntityID])) = RTRIM(LTRIM(ea.[BusinessEntityID]))
+OPTION (RECOMPILE, USE HINT('QUERY_PLAN_PROFILE'))
+GO
+
+ALTER EVENT SESSION [PerfStats_LWP_Plan] ON SERVER STATE = stop;  
+GO 
+
+-- Let's see the event and what it provides

+ 23 - 0
Sessions/SQLIntersection-2018/query-tuning-assistant/README.md

@@ -0,0 +1,23 @@
+This is a repro package to demonstrate how to upgrade a database compatibility level using Query Tuning Assistant. 
+This feature is using telemtry from the Query Store feature we launched with Azure SQL Database and SQL Server 2016 to detect upgrade-related regressions.
+
+This repro assumes the following:
+
+- SQL Server 2016+ installed (pick at minimum Database Engine) on Windows.
+- You have installed SQL Server Management Studio v18
+- You have downloaded the RML Utilities from https://www.microsoft.com/en-us/download/details.aspx?id=4511.
+- These demos use a named instance called SQL2017. You will need to edit the .cmd scripts which connect to SQL Server to change to a default instance or whatever named instance you have installed.
+
+0. Install ostress from the package RML_Setup_AMD64.msi. Add C:\Program Files\Microsoft Corporation\RMLUtils to your path.
+
+1. Attach the AdventureWorksDW2012 database to your SQL Server 2016+ instance. The adventure-works-2012-dw-data-file.mdf is provided in https://github.com/Microsoft/sql-server-samples/releases/tag/adventureworks2012
+
+3. Start QTA following instructions in https://docs.microsoft.com/en-us/sql/relational-databases/performance/upgrade-dbcompat-using-qta.
+
+4. When requested to run a baseline collection, run PreUpgrade.cmd to customize the AdventureWorksDW2012 database and start the demo. Run it to completion...
+
+5. After its completed, continue QTA workflow. 
+
+6. When requested to re-run the same workload, run PostUpgrade.cmd. Run it to completion...
+
+7. After its completed, continue QTA workflow.

BIN
Sessions/SQLIntersection-2018/query-tuning-assistant/qta-demo.zip


+ 60 - 0
Sessions/Winter-Ready-2019/Lab-AutoTuning.md

@@ -0,0 +1,60 @@
+---
+title: "Auto Tuning"
+date: "11/21/2018"
+author: Pedro Lopes
+---
+# Auto Tuning Lab 
+
+## Intro - Defining the problem and goal
+When migrating from an older version of SQL Server and [upgrading the database compatibility level](https://docs.microsoft.com/sql/relational-databases/databases/view-or-change-the-compatibility-level-of-a-database) to the latest available, a workload may be exposed to the risk of performance regression. 
+
+Starting with SQL Server 2016, all query optimizer changes are gated to the latest database compatibility level, which in combination with Query Store gives you a great level of control over the query performance in the upgrade process if the upgrade follows the recommended workflow seen below. 
+
+![Recommended database upgrade workflow using Query Store](./media/query-store-usage-5.png "Recommended database upgrade workflow using Query Store") 
+
+In SQL Server 2016, users are able to use SSMS Query Store reports to identify **regressed queries** that occur post-database upgrade, and manually force a specific pre-upgrade plan to be used. This maps to the last step in the recommended workflow above. 
+
+![SSMS Query Store Reports](./media/objectexplorerquerystore_sql17.png "SSMS Query Store Reports") 
+
+This control over upgrades was further improved with SQL Server 2017 where [Automatic Tuning](https://docs.microsoft.com/sql/relational-databases/automatic-tuning/automatic-tuning.md) was introduced and allows automating the last step in the recommended workflow above. This lab allows you to see the feature in action.
+
+## Lab requirements (pre-installed)
+The following are requirements to run this lab:
+
+- SQL Server 2017 or higher is installed. This feature requires Developer or Enterprise Edition.
+- You have installed SQL Server Management Studio or Azure Data Studio.
+- You have installed the RML Utilities from https://www.microsoft.com/en-us/download/details.aspx?id=4511. Install ostress from the package *RML_Setup_AMD64.msi*. Add `C:\Program Files\Microsoft Corporation\RMLUtils` to your path variable.
+- Restore the WideWorldImporters database to your SQL Server instance. The `WideWorldImporters` database is available in https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/wide-world-importers.
+
+## Lab
+
+1. Open the folder `C:\Labs\Lab-AutoTuning\SCENARIO` and double-click the file `Scenario.cmd` to execute it. This starts the workload. Leave it running...
+
+2. Setup Performance Monitor on Windows to track `SQL Statistics/Batch Requests/sec` counter. Alternatively, open the file `C:\Labs\Lab-AutoTuning\SCENARIO\Perfmon.htm` with Internet Explorer. Note you may receive a prompt warning that Internet Explorer blocked ActiveX content. Click on **Allow blocked content**. 
+
+    - Once the file opens, click on the green arrow button to **Unfreeze display**.
+    - You receive a prompt warning *this action will erase the data in the graph*. Click **Yes** to continue. This resumes the performance counter session previously saved for convenience.
+    - Leave it open...
+
+3. While the scenario workload is running, double-click the file `Regression.cmd` in the same folder to execute it. Notice the drop in the *batch requests/sec* counter which means a performance regression was introduced in the workload.
+
+4. Open the file `C:\Labs\Lab-AutoTuning\SCENARIO\recommendations.sql` with SQL Server Management Studio or SQL Operations Studio, execute it and review the results:
+    - The time difference under the *reason* column.
+    - The value of *state_transition_reason*, which should be ***AutomaticTuningOptionNotEnabled***. This means SQL Server found a regression but is in recommendation mode only, not automatically tuning anything. 
+    - The *script* column shows a query that could be used to fix the problem manually.
+
+5. Stop the scenario workload by pressing **CTRL+C**, and then choose **"N"** when prompted to terminate the batch.
+
+6. Now let's see what happens with automatic plan correction which uses this command in SQL Server 2017:
+
+    ```sql
+    ALTER DATABASE WideWorldImporters
+    SET AUTOMATIC_TUNING ( FORCE_LAST_GOOD_PLAN = ON )
+    ```
+
+7. Double-click the file `Auto_tune.cmd`  in the same folder to execute it. This uses the above command to set automatic plan correct ON for WideWorldImporters, and restarts the workload file `Scenario.cmd`.
+
+8. Repeat steps 3-5 as above, and note the following:     
+    - In the Performance Monitor window you will see the **Batch Requests/sec** counter dip, but within a second go right back up.    
+    This is because SQL Server detected the regression and automatically reverted to the last known good query plan found in the Query Store. 
+    - If closed, re-open the file `C:\Labs\Lab-AutoTuning\SCENARIO\recommendations.sql` with SQL Server Management Studio or SQL Operations Studio, execute it and notice the *state_transition_reason* column value is now ***LastGoodPlanForced***.

+ 328 - 0
Sessions/Winter-Ready-2019/Lab-Containers.md

@@ -0,0 +1,328 @@
+---
+title: "SQL Server Containers Lab"
+date: "11/21/2018"
+author: Vin Yu
+---
+# SQL Server Containers Lab
+
+## Pre Lab
+1. Install docker engine by running the following:
+
+```
+sudo yum install -y yum-utils device-mapper-persistent-data lvm2
+
+sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+
+sudo yum install http://mirror.centos.org/centos/7/extras/x86_64/Packages/pigz-2.3.3-1.el7.centos.x86_64.rpm
+
+sudo yum install docker-ce
+```
+
+check status of docker engine:
+```
+sudo systemctl status docker
+ ```
+
+if is not running, start it by running:
+``` 
+sudo systemctl start docker
+```
+>Note: for this lab, we are installing docker for CentOS, this will work on CentOS or RHEL due to the similarity of the OS’s. For production usage on RHEL, install Docker EE for RHEL: https://docs.docker.com/install/linux/docker-ee/rhel/.
+ 
+2. clone this repo by running the following:
+
+    Note: If you have already done this in the prelab you can skip this step
+
+```
+sudo yum install git
+git clone https://github.com/Microsoft/sqllinuxlabs.git
+```
+---
+
+## Lab
+### 1. Getting started with SQL Server in Containers
+
+#### Introduction
+In this section you will run SQL Server in a container and connect to it with SSMS/SQL Operations Studio. This is the easiest way to get started with SQL Server in containers.  
+ 
+#### Steps
+1. Change the `SA_PASSWORD` in the command below and run it in your terminal:
+``` 
+sudo docker run -e 'ACCEPT_EULA=Y' -e 'SA_PASSWORD=YourStrong!Passw0rd' \
+      -p 1500:1433 --name sql1 \
+      -d microsoft/mssql-server-linux:2017-latest
+ ```
+
+> Tip: edit commands in a text editor prior to pasting in the terminal to easily edit the commands.
+>
+> Note: By default, the password must be at least 8 characters long and contain characters from three of the following four sets: Uppercase letters, Lowercase letters, Base 10 digits, and Symbols.
+
+ 
+2. Check that SQL Server is running:
+```
+sudo docker ps
+```
+
+![GettingStartedResults.PNG](./media/Container-GettingStartedResults.png)
+
+3. Connect to SQL Server in container using SSMS or SQL Ops Studio.
+
+Open SSMS or Ops studio and connect to the SQL Server in container instance by connecting host:
+
+```
+<host IP>, 1500
+```
+>Note: If you are running this in an Azure VM, the host IP is the public Azure VM IP. You will also need to open port 1500 external traffic. [go here to learn how to open ports in Azure VMs](/open_azure_vm_port)
+
+![GettingStartedOpsStudio.PNG](./media/Container-GettingStartedOpsStudio.png)
+
+3. Run SQLCMD inside the container. First run bash interactively in the container with docker execute 'bash' inside 'sql1' container. 
+
+```
+sudo docker exec -it sql1 bash
+```
+Use SQLCMD within the container to connect to SQL Server:
+```
+/opt/mssql-tools/bin/sqlcmd -U SA -P 'YourStrong!Passw0rd'
+```
+![sqlcmd.PNG](./media/Container-ExecSQLCMD.png)
+
+Exit SQLCMD and the container with exit:
+```
+exit
+```
+
+
+> **Key Takeaway**
+> 
+>SQL Server running in a container is the same SQL Server engine as it is on Linux OS or Windows.
+ 
+---
+
+### 2. Explore Docker Basics
+#### Introduction
+ In this section you'll learn the basics of how to navigate container images and active containers on your host.
+
+#### Steps
+Enter the following commands in your terminal.
+
+See the active container instances:
+```
+sudo docker ps
+```
+List all container images:
+```
+sudo docker image ls
+```
+Stop the SQL Server container:
+```
+sudo docker stop sql1
+```
+See that `sql1` is no longer running by listing all containers: 
+```
+sudo docker ps -a
+```
+Delete the container:
+```
+sudo docker rm sql1
+```
+See that the container no longer exists:
+```
+sudo docker container ls
+```
+![DockerCommands.PNG](./media/Container-DockerCommands.png)
+
+> **Key Takeaway**
+>
+> A container is launched by running an image. An **image** is an executable package that includes everything needed to run an application--the code, a runtime, libraries, environment variables, and configuration files.
+>
+>A **container** is a runtime instance of an image--what the image becomes in memory when executed (that is, an image with state, or a user process). You can see a list of your running containers with the command, docker ps, just as you would in Linux.
+> 
+> -- https://docs.docker.com/get-started/
+ 
+---
+
+### 3.  Build your own container 
+
+#### Introduction:
+In the past, if you were to set up a new SQL Server environment or dev test, your first order of business was to install a SQL Server onto your machine. But, that creates a situation where the environment on your machine might not match test/production.
+
+With Docker, you can get SQL Server as an image, no installation necessary. Then, your build can include the base SQL Server image right alongside any additional environment needs, ensuring that your SQL Server instance, its dependencies, and the runtime, all travel together.
+
+In this section you will build your a own container layered on top of the SQL Server image. 
+
+Scenario: Let's say for testing purposes you want to start the container with the same state. We’ll copy a .bak file into the container which can be restored with T-SQL.  
+
+ 
+#### Steps:
+
+1. Change directory to the `mssql-custom-image-example folder`.
+```
+cd sqllinuxlabs/containers/mssql-custom-image-example/
+```
+
+
+2. Create a Dockerfile with the following contents
+```
+cat <<EOF>> Dockerfile
+FROM microsoft/mssql-server-linux:latest
+COPY ./SampleDB.bak /var/opt/mssql/data/SampleDB.bak
+CMD ["/opt/mssql/bin/sqlservr"]
+EOF
+```
+
+3. View the contents of the Dockerfile 
+
+```
+cat Dockerfile
+```
+![dockerfile.PNG](./media/Container-Dockerfile.png)
+
+4. Run the following to build your container
+```
+sudo docker build . -t mssql-with-backup-example
+```
+![GettingStartedOpsStudio.PNG](./media/Container-BuildOwnContainer.png)
+
+5. Start the container by running the following command after replacing `SA_PASSWORD` with your password
+```
+sudo docker run -e 'ACCEPT_EULA=Y' -e 'SA_PASSWORD=YourStrong!Passw0rd' \
+      -p 1500:1433 --name sql2 \
+      -d mssql-with-backup-example
+```
+
+6. Edit the `-P` with the value used for `SA_PASSWORD` used in the previous command and view the contents of the backup file built in the image:
+
+```
+   sudo docker exec -it sql2 /opt/mssql-tools/bin/sqlcmd -S localhost \
+   -U SA -P 'YourStrong!Passw0rd' \
+   -Q 'RESTORE FILELISTONLY FROM DISK = "/var/opt/mssql/data/SampleDB.bak"' \
+   -W \
+   | tr -s ' ' | cut -d ' ' -f 1-2
+```
+
+the output of this command should be similar to this:
+>LogicalName PhysicalName
+>----------- ------------
+>ProductCatalog /var/opt/mssql/data/ProductCatalog.mdf
+>ProductCatalog_log /var/opt/mssql/data/ProductCatalog_log.ldf
+
+7. Edit the `-P` with the value of `SA_PASSWORD` used to start the container and restore the database in the container:
+```
+sudo docker exec -it sql2 /opt/mssql-tools/bin/sqlcmd \
+   -S localhost -U SA -P YourStrong!Passw0rd \
+   -Q 'RESTORE DATABASE ProductCatalog FROM DISK = "/var/opt/mssql/data/SampleDB.bak" WITH MOVE "ProductCatalog" TO "/var/opt/mssql/data/ProductCatalog.mdf", MOVE "ProductCatalog_log" TO "/var/opt/mssql/data/ProductCatalog.ldf"'
+
+```
+the output of this command should be similar to 
+
+>Processed 384 pages for database 'ProductCatalog', file  'ProductCatalog' on file 1.
+>
+>Processed 8 pages for database 'ProductCatalog', file 'ProductCatalog_log' on file 1.
+>
+>RESTORE DATABASE successfully processed 392 pages in 0.278 seconds (11.016 MB/sec).
+
+If you connect to the instance, you should see that the database was restored.
+ 
+![RestoredDB.PNG](./media/Container-RestoredDB.png)
+
+7. Clean up the container
+```
+sudo docker stop sql2
+sudo docker container rm sql2
+```
+
+
+> **Key Takeaway**
+>
+> A **Dockerfile** defines what goes on in the environment inside your container. Access to resources like networking interfaces and disk drives is virtualized inside this environment, which is isolated from the rest of your system, so you need to map ports to the outside world, and be specific about what files you want to “copy in” to that environment. However, after doing that, you can expect that the build of your app defined in this Dockerfile behaves exactly the same wherever it runs.
+>
+> -- https://docs.docker.com/get-started/part2/#your-new-development-environment
+---
+
+ ### 4. Run a Containerized Application with SQL Server
+ 
+ #### Introduction
+  
+Most applications involve multiple containers. 
+
+#### Steps
+
+1. Install docker-compose:
+```
+sudo curl -L https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose
+
+sudo chmod +x /usr/local/bin/docker-compose
+
+sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
+```
+
+
+1. Change directory to the mssql-aspcore-example.
+
+```
+cd sqllinuxlabs/containers/mssql-aspcore-example 
+```
+
+>note: if you just finished the **Build your own container** lab, you can navigate to this folder with the following command:
+>
+> `cd ../../containers/mssql-aspcore-example `
+
+2. Open the docker-compose.yml file 
+```
+nano docker-compose.yml
+```
+
+3. Edit the `SA_PASSWORD` SQL Server environment variables then save the file with `ctrl + x`
+
+![DockerCompose.PNG](./media/Container-DockerCompose.png)
+
+4. Edit the `-P` parameter in the `./mssql-aspcore-example-db/db-init.sh` file with the `SA_PASSWORD` that you used in the previous step 
+```
+nano ./mssql-aspcore-example-db/db-init.sh
+```
+
+![db-sh.PNG](./media/Container-db-sh.png)
+
+4. Run the containers with docker-compose:
+```
+sudo docker-compose up
+```
+>note: this will take approx. 15 seconds
+
+
+5. At this point, you will have two containers up and running: an application container that is able to query the database container. Connect to the 
+
+```
+http:<host IP>:5000
+```
+>Note: If you are running this in an Azure VM, the host IP is the Azure VM Public IP. You will also need to open port 5000 external traffic. [go here to learn how to open ports in Azure VMs](/open_azure_vm_port) (be sure to open port 5000!)
+
+![DockerComposeUp.PNG](./media/Container-DockerComposeUp.png)
+
+To stop the docker compose application, press `ctrl + c` in the terminal. 
+To remove the containers run the following command:
+
+```
+sudo docker-compose down
+```
+
+
+### Start-up Explanation
+
+1. Running `docker-compose up` builds/pulls containers and run them with parameters defined in docker-compose.yml
+2. The .Net Core application container starts up  
+3. The SQL Server container starts up with `entrypoint.sh`
+
+    a. The sqlservr process starts 
+
+    b. A start script is run to apply schema needed by application     
+
+4. The .Net Core application is now able to connect to the SQL Server container with all necessary 
+
+
+> **Key Takeaway**
+>
+> **Compose** is a tool for defining and running multi-container Docker applications. With Compose, you use a YAML file to configure your application’s services. Then, with a single command, you create and start all the services from your configuration.
+>
+> --https://docs.docker.com/compose/overview/

+ 504 - 0
Sessions/Winter-Ready-2019/Lab-IQP.md

@@ -0,0 +1,504 @@
+---
+title: "Intelligent Query Processing"
+date: "11/21/2018"
+author: Pedro Lopes and Joe Sack
+---
+# Intelligent Query Processing Lab 
+
+## Intro - Defining the problem and goal
+ [Intelligent query processing in SQL databases](https://docs.microsoft.com/sql/relational-databases/performance/intelligent-query-processing) means that critical parallel workloads improve when running at scale, while remaining adaptive to the constantly changing world of data. 
+
+Intelligent Query Processing is available by default on the latest Database Compatibility Level setting and delivers broad impact that improves the performance of existing workloads with minimal implementation effort.
+
+Intelligent Query Processing in SQL Server 2019 expands upon the Adaptive Query Processing feature family in SQL Server 2017. 
+
+The Intelligent Query Processing suite is meant to rectify some of the common query performance problems by taking some automatic corrective approaches during runtime. It leverages a feedback loop based on statistics collected from past executions to improve subsequent executions.  
+
+![Intelligent query processing feature suite](./media/iqpfeaturefamily.png "Intelligent query processing feature suite") 
+
+## Lab requirements (pre-installed)
+The following are requirements to run this lab:
+
+- SQL Server 2019 is installed. 
+- You have installed SQL Server Management Studio.
+- Restore the **tpch** and **WideWorldImportersDW** databases to your SQL Server instance. The `WideWorldImportersDW` database is available in https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/wide-world-importers. The **tpch** database can be procured at http://www.tpc.org.
+
+## Lab
+
+### Exercise 1 - Batch Mode Memory Grant Feedback (MGF)
+
+Queries may spill to disk or take too much memory based on poor cardinality estimates. MGF will adjust memory grants based on execution feedback, and remove spills to improve concurrency for repeating queries. In SQL Server 2017, MGF was only available for BatchMode (which means Columnstore had to be in use). In SQL Server 2019 and Azure SQL DB, MGF was extended to also work on RowMode which means it's available for all queries running on SQL Server Database Engine.
+
+1. Open SSMS and connect to the SQL Server 2019 instance (default instance). Click on **New Query** or press CTRL+N.
+
+    ![New Query](./media/new_query.png "New Query") 
+
+2. Setup the database to ensure the latest database compatibility level is set, by running the commands below in the query window:
+
+    ```sql
+    USE master;
+    GO
+
+    ALTER DATABASE WideWorldImportersDW 
+	SET COMPATIBILITY_LEVEL = 150;
+    GO
+
+    ALTER DATABASE SCOPED CONFIGURATION CLEAR PROCEDURE_CACHE;
+    GO
+    ```
+
+3. Simulate an outdated statistics scenario and create a stored procedure, by running the commands below in the query window:
+
+    ```sql
+    USE WideWorldImportersDW;
+    GO
+    
+    UPDATE STATISTICS [Fact].[Order] 
+    WITH ROWCOUNT = 1, PAGECOUNT = 1;
+    GO
+
+    CREATE OR ALTER PROCEDURE dbo.StockItems
+    AS
+    SELECT [Color], SUM([Quantity]) AS SumQty
+    FROM [Fact].[Order] AS [fo]
+    INNER JOIN [Dimension].[Stock Item] AS [si] 
+        ON [fo].[Stock Item Key] = [si].[Stock Item Key]
+    GROUP BY  [Color];
+    GO
+    ```
+
+4. For the next steps, looking at the query execution plan is needed. Click on **Include Actual Plan** or press CTRL+M.
+
+    ![Include Actual Plan](./media/ActualPlan.png "Include Actual Plan") 
+
+5. Execute the stored procedure once, by running the command below in the query window: 
+
+    ```sql
+    EXEC dbo.StockItems;
+    ```
+
+6. Notice the query execution plan, namely the yellow warning sign over the join. Hovering over exposes a number of properties such as the details of a spill to TempDB, which slowed down the query's performance. Spills happen when the granted query memory was not enough to process entirely in memory.
+
+    ![MGF Spill](./media/MGF_Spill.png "MGF Spill") 
+
+7. Right-click the query execution plan root node - the **SELECT** - and click on **Properties**.     
+    In the ***Properties*** window, expand **MemoryGrantInfo**. Note that:
+    - The property ***LastRequestedMemory*** is zero because this is the first execution. 
+    - The current status of whether this query has been adjusted by MGF is exposed by the ***IsMemoryGrantFeedbackAdjusted*** property. In this case value is **NoFirstExecution**. This means there was no adjustment because it is the 1st time the query is executing.
+
+    ![MGF Properties - 1st Exec](./media/MGF_Properties_FirstExec.png "MGF Properties - 1st Exec") 
+
+8. Execute the stored procedure again. 
+
+9. Click on the query execution plan root node - the **SELECT**. Observe:
+    - The ***IsMemoryGrantFeedbackAdjusted*** property value is **YesAdjusting**.
+    - The ***LastRequestedMemory*** property is now populated with the previous requested memory grant. 
+    - The ***GrantedMemory*** property is greater than the ***LastRequestedMemory*** property. This indicates that more memory was granted, although the spill might still occur, which means SQL Server is still adjusting to the runtime feedback.
+
+10. Execute the stored procedure again, and repeat until the yellow warning sign over the join disappears. This will indicate that there are no more spills. Then execute one more time.
+
+11. Click on the query execution plan root node - the **SELECT**. Observe:
+    - The ***IsMemoryGrantFeedbackAdjusted*** property value is **YesStable**.
+    - The ***GrantedMemory*** property is now the same as the ***LastRequestedMemory*** property. This indicates that the optimal memory grant was found and adjusted by MGF.
+
+Note that different parameter values may also require different query plans in order to remain optimal. This type of query is defined as **parameter-sensitive**. For parameter-sensitive plans (PSP), MGF will disable itself on a query if it has unstable memory requirements over a few executions.
+
+---
+
+### Exercise 2 - Table Variable (TV) Deferred Compilation
+
+Table Variables are suitable for small intermediate result sets, usually no more than a few hundred rows. However, if these constructs have more rows, the legacy behavior of handling a TV is prone to performance issues.    
+
+The legacy behavior mandates that a statement that references a TV is compiled along with all other statements, before any statement that populates the TV is executed. Because of this, SQL Server estimates that only 1 rows would be present in a TV at compilation time.    
+
+Starting with SQL Server 2019, the behavior is that the compilation of a statement that references a TV that doesn’t exist is deferred until the first execution of the statement. This means that SQL Server estimates more accurately and produces optimized query plans based on the actual number of rows in the TV in its first execution.
+
+1. Open SSMS and connect to the SQL Server 2019 instance (default instance). Click on **New Query** or press CTRL+N.
+
+    ![New Query](./media/new_query.png "New Query") 
+
+2. Setup the database to ensure the database compatibility level of SQL Server 2017 is set, by running the commands below in the query window:
+
+    > **Note:**
+    > This ensures the database engine behavior related to Table Variables is mapped to a version lower than SQL Server 2019.
+
+    ```sql
+    USE master;
+    GO
+
+    ALTER DATABASE [tpch10g-btree] 
+    SET COMPATIBILITY_LEVEL = 140;
+    GO
+    ```
+
+3. For the next steps, looking at the query execution plan is needed. Click on **Include Actual Plan** or press CTRL+M.
+
+    ![Include Actual Plan](./media/ActualPlan.png "Include Actual Plan") 
+
+4. Execute the command below in the query window: 
+
+    > **Note:**
+    > This should take between 1 and 5 minutes.
+
+    ```sql
+    USE [tpch10g-btree];
+    GO
+
+    DECLARE @LINEITEMS TABLE 
+	(
+        L_OrderKey INT NOT NULL,
+	    L_Quantity INT NOT NULL
+	);
+
+    INSERT @LINEITEMS
+    SELECT TOP 750000 L_OrderKey, L_Quantity
+    FROM dbo.lineitem
+    WHERE L_Quantity = 43;
+
+    SELECT O_OrderKey, O_CustKey, O_OrderStatus, L_QUANTITY
+    FROM ORDERS, @LINEITEMS
+    WHERE O_ORDERKEY = L_ORDERKEY
+        AND O_OrderStatus = 'O';
+    GO
+    ```
+
+5. Observe the shape of the query execution plan, that it is a serial plan, and that Nested Loops Joins were chosen given the estimated low number of rows.
+
+6. Click on the **Table Scan** operator in the query execution plan, and hover your mouse over the operator. Observe:
+    - The ***Actual Number of Rows*** is 750000.
+    - The ***Estimated Number of Rows*** is 1. 
+    This indicates the legacy behavior of misusing a TV, with the huge estimation skew.
+
+    ![Table Variable legacy behavior](./media/TV_Legacy.png "Table Variable legacy behavior") 
+
+
+7. Setup the database to ensure the latest database compatibility level is set, by running the commands below in the query window:
+
+    > **Note:**
+    > This ensures the database engine behavior related to Table Variables is mapped to SQL Server 2019.
+
+    ```sql
+    USE master;
+    GO
+
+    ALTER DATABASE [tpch10g-btree] 
+    SET COMPATIBILITY_LEVEL = 150;
+    GO
+    ```
+
+8. Execute the same command as step 5. 
+
+9. Observe the shape of the query execution plan now, that it is a parallel plan, and that a single Hash Joins was chosen given the estimated high number of rows.
+
+10. Click on the **Table Scan** operator in the query execution plan, and hover your mouse over the operator. Observe:
+    - The ***Actual Number of Rows*** is 750000.
+    - The ***Estimated Number of Rows*** is 750000. 
+    This indicates the new behavior of TV deferred compilation, with no estimation skew and a better query execution plan, which also executed much faster (~20 seconds).
+
+    ![Table Variable deferred compilation](./media/TV_New.png "Table Variable deferred compilation") 
+
+---
+
+### Exercise 3 - Batch Mode on Rowstore
+
+The query optimizer has (until now) considered batch-mode processing only for queries that involve at least one table with a Columnstore index. SQL Server 2019 is introducing **Batch Mode on Rowstore**, which means that use of columnstore indexes is not a condition to use batch mode processing anymore.    
+
+However, Batch Mode is especially useful when processing large number of rows such as analytical queries, which means users won't be seeing Batch Mode used on every query. A rough initial check involves table sizes, operators used, and estimated cardinalities in the input query. Additional checkpoints are used, as the optimizer discovers new, cheaper plans for the query. And if plans do not make significant use of batch mode, the optimizer will stop exploring batch mode alternatives.
+
+1. Open SSMS and connect to the SQL Server 2019 instance (default instance). Click on **New Query** or press CTRL+N.
+
+    ![New Query](./media/new_query.png "New Query") 
+
+2. Setup the database to ensure the latest database compatibility level is set, by running the commands below in the query window:
+
+    > **Note:**
+    > This ensures the database engine behavior related to Table Variables is mapped to SQL Server 2019.
+
+    ```sql
+    USE master;
+    GO
+
+    ALTER DATABASE [tpch10g-btree] 
+    SET COMPATIBILITY_LEVEL = 150;
+    GO
+
+    USE [tpch10g-btree];
+    GO
+    
+    ALTER DATABASE SCOPED CONFIGURATION CLEAR PROCEDURE_CACHE;
+    GO
+    ```
+
+3. For the next steps, looking at the query execution plan is needed. Click on **Include Actual Plan** or press CTRL+M.
+
+    ![Include Actual Plan](./media/ActualPlan.png "Include Actual Plan") 
+
+4. Execute the command below in the query window: 
+
+    > **Note:**
+    > The hint forces row mode and should take about 1 to 2 minutes.
+
+    ```sql
+    SELECT L_RETURNFLAG,
+        L_LINESTATUS,
+        SUM(L_QUANTITY) AS SUM_QTY,
+        SUM(L_EXTENDEDPRICE) AS SUM_BASE_PRICE,
+        COUNT(*) AS COUNT_ORDER
+    FROM LINEITEM
+    WHERE L_SHIPDATE <= dateadd(dd, -73, '1998-12-01')
+    GROUP BY L_RETURNFLAG,
+            L_LINESTATUS
+    ORDER BY L_RETURNFLAG,
+            L_LINESTATUS
+    OPTION (RECOMPILE, USE HINT('DISALLOW_BATCH_MODE'));
+    ```
+
+5. Observe the query execution plan and note there is no columnstore in use.     
+    Click on the **Clustered Index Seek** operator in the query execution plan, and hover your mouse over the operator. Observe:
+    - The ***Actual Number of Rows*** is over 59 Million.
+    - The ***Estimated Number of Rows*** is the same. This indicates no estimation skews.
+    - The ***Actual Execution Mode*** and ***Estimated Execution Mode*** show "Row", with ***Storage*** being "RowStore".
+
+    ![Batch Mode on Rowstore disabled](./media/BMRS_Row.png "Batch Mode on Rowstore disabled")  
+
+6. Execute the command below in the query window: 
+
+    ```sql
+    SELECT L_RETURNFLAG,
+        L_LINESTATUS,
+        SUM(L_QUANTITY) AS SUM_QTY,
+        SUM(L_EXTENDEDPRICE) AS SUM_BASE_PRICE,
+        COUNT(*) AS COUNT_ORDER
+    FROM LINEITEM
+    WHERE L_SHIPDATE <= dateadd(dd, -73, '1998-12-01')
+    GROUP BY L_RETURNFLAG,
+            L_LINESTATUS
+    ORDER BY L_RETURNFLAG,
+            L_LINESTATUS
+    OPTION (RECOMPILE);
+    ```
+
+7. Observe query execution plan and note there is still no columnstore in use.    
+    Click on the **Clustered Index Seek** operator in the query execution plan, and hover your mouse over the operator. Observe:
+    - The ***Actual Number of Rows*** and ***Estimated Number of Rows*** remain the same as before. Over 59 Million rows.
+    - The ***Actual Execution Mode*** and ***Estimated Execution Mode*** show "Batch", with ***Storage*** still being "RowStore". 
+    This indicates the new behavior of allowing eligible queries to execute in Batch Mode over Rowstore, whereas up to SQL Server 2017 this was only allowed over Columnstore. It also executed much faster (~3 seconds).
+
+    ![Batch Mode on Rowstore](./media/BMRS_Batch.png "Batch Mode on Rowstore") 
+
+---
+
+### Exercise 4 - Scalar UDF Inlining 
+
+User-Defined Functions that are implemented in Transact-SQL and return a single data value are referred to as T-SQL Scalar User-Defined Functions. T-SQL UDFs are an elegant way to achieve code reuse and modularity across SQL queries, and help in building up complex logic without requiring expertise in writing complex SQL queries.
+
+However, Scalar UDF can introduce performance issues in workloads. Here are a few reasons why:
+
+- **Iterative invocation**: Invoked once per qualifying row. Repeated context switching – and even worse for UDFs that execute SQL queries in their definition
+- **Lack of costing**: Scalar operators are not costed (realistically).
+- **Interpreted execution**: Each statement itself is compiled, and the compiled plan is cached. No cross-statement optimizations are carried out.
+- **Serial execution**: SQL Server does not allow intra-query parallelism in queries that invoke UDFs.
+
+In SQL Server 2019, the ability to inline Scalar UDFs means we can enable the benefits of UDFs without the performance penalty, for queries that invoke scalar UDFs where UDF execution is the main bottleneck. Using query rewriting techniques, UDFs are transformed into equivalent relational expressions that are “inlined” into the calling query with which the query optimizer can work to find more efficient plans.
+
+> **Note:**
+> Not all T-SQL constructs are inlineable, such as when the UDF is:
+> - Invoking any intrinsic function that is either time-dependent (such as `GETDATE()`) or has side effects (such as `NEWSEQUENTIALID()`)
+> - Referencing table variables or table-valued parameters
+> - Referencing scalar UDF call in its `GROUP BY` clause
+> - Natively compiled (interop is supported)
+> - Used in a computed column or a check constraint definition
+> - References user-defined types
+> - Used in a partition function
+
+> **Important:**
+> If a scalar UDF is inlineable, it does not imply that it will always be inlined. SQL Server will decide (on a per-query, per-UDF basis) whether to inline a UDF or not.
+
+1. Open SSMS and connect to the SQL Server 2019 instance (default instance). Click on **New Query** or press CTRL+N.
+
+    ![New Query](./media/new_query.png "New Query") 
+
+2. Setup the database to ensure the latest database compatibility level is set, by running the commands below in the query window:
+
+    > **Note:**
+    > This ensures the database engine behavior related to Table Variables is mapped to SQL Server 2019.
+
+    ```sql
+    USE master;
+    GO
+
+    ALTER DATABASE [tpch10g-btree] 
+    SET COMPATIBILITY_LEVEL = 150;
+    GO
+
+    USE [tpch10g-btree];
+    GO
+    
+    ALTER DATABASE SCOPED CONFIGURATION CLEAR PROCEDURE_CACHE;
+    GO
+    ```
+
+3. Create a UDF that does data access, by running the commands below in the query window:
+
+    ```sql
+    CREATE OR ALTER FUNCTION dbo.CalcAvgQuantityByPartKey
+        (@PartKey INT)
+    RETURNS INT
+    AS
+    BEGIN
+            DECLARE @Quantity INT
+
+            SELECT @Quantity = AVG([L_Quantity])
+            FROM [dbo].[lineitem]
+            WHERE [L_PartKey] = @PartKey
+
+            RETURN (@Quantity)
+    END
+    GO
+    ```
+
+4. For the next steps, looking at the query execution plan is needed. Click on **Include Actual Plan** or press CTRL+M.
+
+    ![Include Actual Plan](./media/ActualPlan.png "Include Actual Plan") 
+
+5. Execute the command below in the query window: 
+
+    > **Note:**
+    > The hint forcibly disables UDF inlining and should take between 20 and 30 seconds.
+
+    ```sql
+    SELECT TOP 1000
+        L_OrderKey,
+        L_PartKey,
+        L_SuppKey,
+        L_ExtendedPrice,
+        dbo.CalcAvgQuantityByPartKey(L_PartKey)
+    FROM dbo.lineitem
+    WHERE L_Quantity > 44
+    ORDER BY L_Tax DESC
+    OPTION (RECOMPILE,USE HINT('DISABLE_TSQL_SCALAR_UDF_INLINING'));
+    ```
+
+6. Observe the query execution plan shape and note:
+    - The overall elapsed time.
+    - The time spent on each operator. 
+    - The fact that the plan executed in serial mode.
+    - The **Compute Scalar** operator obfuscates the logic inside, and the estimated cost is low, as evidenced by the estimated cost of zero percent as it relates to the entire plan.
+
+    ![Scalar UDF not inlined](./media/UDF_NotInlined.png "Scalar UDF not inlined") 
+
+7. Now execute the command below in the query window: 
+
+    ```sql
+    SELECT TOP 1000
+        L_OrderKey,
+        L_PartKey,
+        L_SuppKey,
+        L_ExtendedPrice,
+        dbo.CalcAvgQuantityByPartKey(L_PartKey)
+    FROM dbo.lineitem
+    WHERE L_Quantity > 44
+    ORDER BY L_Tax DESC
+    OPTION (RECOMPILE);
+    ```
+
+8. Observe the query execution plan shape and note:
+    - The overall elapsed time dropped.
+        > **Note:**
+        > The metrics you observed are for a first time execution only and because the hint **RECOMPILE** is used.
+        > Removing the hint **RECOMPILE** and executing the same statements multiple times should yield lower execution times, while maintaining the relative performance difference. 
+
+    - The plan has inlined all the logic that was obfuscated by the UDF in the previous plan. 
+    - The fact that the plan executed in parallel.
+    - The Database Engine was able to identify a potentially missing index with a higher projected impact, precisely because it was able to inline the UDF.
+    - The inlined scalar UDF allowed us to see there is a **SORT** operator that is spilling. MGF can resolve this after a few executions if the hint **RECOMPILE** wasn't used.
+
+    ![Scalar UDF inlined](./media/UDF_Inlined.png "Scalar UDF inlined") 
+
+---
+
+### Exercise 5 - Approximate QP
+
+Obtaining row counts serves numerous dashboard-type scenarios. When these queries are executed against big data sets with many distinct values (for example, distinct orders counts over a time period) – and many concurrent users, this may introduce performance issues, increased resource usage such as memory, and blocking.
+
+For some of these scenarios, approximate data is good enough. For example for data scientists doing big data set exploration and trend analysis. There's a need to understand data distributions quickly but exact values are not paramount.
+
+SQL Server 2019 introduces the ability to do approximate `COUNT DISTINCT` operations for big data scenarios, with the benefit of high performance and a (very) low memory footprint.
+
+> **Important:**
+> Approximate QP does NOT target banking applications or any scenario where an exact value is required! 
+
+1. Open SSMS and connect to the SQL Server 2019 instance (default instance). Click on **New Query** or press CTRL+N.
+
+    ![New Query](./media/new_query.png "New Query") 
+
+2. Setup the database to ensure the latest database compatibility level is set, by running the commands below in the query window:
+
+    ```sql
+    USE master;
+    GO
+
+    ALTER DATABASE [tpch10g-btree] 
+    SET COMPATIBILITY_LEVEL = 150;
+    GO
+
+    USE [tpch10g-btree];
+    GO
+    
+    ALTER DATABASE SCOPED CONFIGURATION CLEAR PROCEDURE_CACHE;
+    GO
+    ```
+
+3. Now execute the commands below in the query window: 
+
+    ```sql
+    DBCC DROPCLEANBUFFERS;
+    GO
+    SELECT COUNT(DISTINCT [L_OrderKey])
+    FROM [dbo].[lineitem];
+    GO
+
+    DBCC DROPCLEANBUFFERS;
+    GO
+    SELECT APPROX_COUNT_DISTINCT([L_OrderKey])
+    FROM [dbo].[lineitem];
+    GO
+    ```
+
+4. Observe the query execution plan shape and note:
+    - The plans look exactly the same.
+    - Execution time is very similar.
+
+    ![Approximate Count Distinct](./media/ApproxCount.png "Approximate Count Distinct") 
+
+
+5. Right-click *Query 1* execution plan root node - the **SELECT** - and click on **Properties**.     
+    In the ***Properties*** window, expand **MemoryGrantInfo**. Note that:
+    - The property ***GrantedMemory*** is almost 3 GB. 
+    - The property ***MaxUsedMemory*** is almost 700 MB.
+
+    ![Normal Count Distinct - Properties](./media/CD_Properties.png "Normal Count Distinct - Properties") 
+
+6. Now right-click *Query 2* execution plan root node - the **SELECT** - and click on **Properties**.     
+    In the ***Properties*** window, expand **MemoryGrantInfo**. Note that:
+    - The property ***GrantedMemory*** is just under 40 MB. 
+    - The property ***MaxUsedMemory*** is about 8 MB.
+
+    ![Approximate Count Distinct - Properties](./media/ACD_Properties.png "Approximate Count Distinct - Properties") 
+
+    > **Important:**
+    > This means that for scenarios where approximate count is enough, with the much lower memory footprint, these types of queries can be executed often with less concerns about concurrency and memory resource bottlenecks.
+
+7. If you would execute again but removing the `DBCC DROPCLEANBUFFERS` command, because pages are cached now, the queries would be much faster, but still equivalent in the above observations.
+
+    ```sql
+    SELECT COUNT(DISTINCT [L_OrderKey])
+    FROM [dbo].[lineitem];
+    GO
+
+    SELECT APPROX_COUNT_DISTINCT([L_OrderKey])
+    FROM [dbo].[lineitem];
+    GO
+    ```
+
+    ![Approximate Count Distinct with warm cache](./media/ApproxCount_Warm.png "Approximate Count Distinct with warm cache")

+ 72 - 0
Sessions/Winter-Ready-2019/Lab-Kubernetes.md

@@ -0,0 +1,72 @@
+---
+Title: "SQL Server High Availability on Kubernetes"
+Date: "2/9/2019"
+Author: Sourabh Agarwal
+Purpose: In this lab you will deploy multiple SQL Server pods along with Availability Groups on a Kubernetes Cluster in Azure using Azure Kubernetes Services
+---
+## SQL Server High Availability On Kubernetes
+
+**Step 1 -** Connecting to the kubernetes Kubernetes Cluster
+
+
+**Step 2 -** Deploying SQL Server pods using kubectl and the SQL manifest file. For this deployment you'll be using the SQl_Server_Deployment.yaml file, which is available under C:\Labs. 
+ 
+  **Step 2.1 -** Create Kubernetes secret for SQL Server SA password and Master Key Password. All SQL Server Pods use these passwords. In the following command please replace <YourStrongPassword> with a valid Strong Password within double quotes "".
+  
+  `kubectl create namespace ag1`
+  
+  `kubectl create secret generic sql-secrets --from-literal=sapassword= <YourStrongPassword> --from-literal=masterkeypassword=<YourStrongPassword> -n ag1`
+  
+>Note: This deployment uses Namespace ag1. If you'd like to use a different namespace, replace the namespace name "ag1" with a name of your choosing. 
+  
+  **Step 2.2 -** Deploy SQL Server pods using the SQL_Server_Deployment.yaml file. Please deploy the SQL Server pods in the same namespace as the previous step. The default deployment used namespace ag1. If you are using a different namespace, open the SQL_Server_Deployment.yaml file and replace ALL occurrences of "ag1" 
+
+ `Kubectl apply -f <"Localtion of SQL Server Deployment YAML file"> -n ag1`
+  
+  > Note: The script creates the SQL Server operator along with 3 SQL Server Pods with an Availability Group. The script also creates 5 kubernetes service (3 for the SQL Server pods, 1 for AG Primary Replica and 1 for AG Secondary Replica). The Primary Replica Service provides the same functionality as an AG listener, while the secondary replica service provides load balancing capability across the readable secondaries. It may take a few minutes (generally less than 5 minutes) for the entire deployment to finish.
+
+  **Step 2.3 -** Execute the below command to get a list of all the deployments in your namespace. 
+  
+  `kubectl get all -n <namespace_name>`
+    
+**Step 3 -** Connect to the SQL Server Primary Replica to create a database and add the database to the Availability Group.
+
+  From the output of above command, identify the External IP address associated with the AG primary relica service. The Service has the following naming convention - **svc/AGName-primary**. Connect to the external IP and the password from step 2.1, using SSMS or Azure Data Studio.
+  
+ Open a new query window and run the following commands
+ 
+ ```SQL
+Create Database TestDB1
+Go
+Alter Database TestDB1 Set Recovery Full
+Go
+Backup Database TestDB1 to Disk = 'Nul'
+Go
+Alter Availability Group <AG_Name> Add Database TestDB1
+Go
+ 
+```
+  >Note: Replace the <AG_Name> in the script above with the name of your Availability Group
+ 
+ **Step 4 -** Intiate Automatic Failover of the AG by crashing the Primary Replica of the AG. 
+ 
+ Connect to the Primary Replica of the AG using the primary service IP and execute the below query 
+ ```SQL
+ Select @@ServerName
+ go
+ ```
+ 
+ From a cmd window, execute the below command to crash the primary replica pod
+ 
+ `kubectl delete pod <primary_replica_pod_name> -n <namespace_name>`
+ 
+ >Note: Please replace the Primary Replica Name from the output of the previous command. Also replace the <namespace_name> with the name of your namespace. 
+ 
+ Killing the primary replica pod will initiate an automatic Failover of the AG to one of the synchornous secondary replicas. Reconnect to the Primary Replica IP and execute the below command again. 
+ 
+  ```SQL
+ Select @@ServerName
+ go
+ ```
+  
+  

+ 98 - 0
Sessions/Winter-Ready-2019/Lab-Memory-OptimizedTempDB.md

@@ -0,0 +1,98 @@
+---
+title: "SQL Server 2019 Memory-Optimized TempDB Lab"
+date: "11/21/2018"
+author: Pam Lahoud
+---
+# SQL Server 2019 Memory-Optimized TempDB Lab
+
+1.  Connect to the Windows VM in Azure using the information provided in the LODS portal instructions. If you are performing this lab in your own environment, create a VM with a minimum of 32 cores and SQL Server 2019 CTP 2.3.
+
+2.  Open SQL Server Management Studio (SSMS) and connect to the **HkTempDBTestVM\\SQL2019\_CTP23** instance. Verify that the **AdventureWorks** database exists. If it does not, perform the following steps:
+
+    1.  Open and execute the script `C:\Labs\Memory-OptimizedTempDB\00-AdventureWorks\_Setup.sql`.    
+        This will restore the AdventureWorks database and make configuration changes needed for the rest of the lab.
+
+    2.  Open and execute the script `C:\Labs\Memory-OptimizedTempDB\01-SalesAnalysis\_Optimized.sql` to create the **SalesAnalysis\_Optimized** stored procedure.
+
+3.  Verify that there are no startup trace flags set:
+
+    1.  Open **SQL Server Configuration Manager**.
+
+    2.  Click "SQL Server Services" in the left pane.
+
+    3.  Right-Click "SQL Server (SQL2019\_CTP23)" and choose
+        ***Properties***.
+
+    4.  Click the ***Startup Parameters*** tab.
+
+    5.  Verify that there are no lines that being with "-T" in the list of existing parameters:    
+
+        ![Startup Parameters No Flag](./media/StartupParametersNoFlag.png "Startup Parameters No Flag")
+
+        If any exist, highlight them and click "Remove".
+
+    6.  Click "OK" to close the *Properties* window, then click "OK" on the Warning box that pops up.
+
+    7.  Restart the SQL Server service by right-clicking on "SQL Server (SQL2019\_CTP23)" and choosing **Restart**.
+
+4.  Browse to the `C:\Labs\Memory-OptimizedTempDB` folder and double-click the file `SQL2019_CTP23_PerfMon.htm` file to open it in Internet Explorer.    
+    
+    - Once the file opens, right-click anywhere in the white area of the window to clear the existing data.
+    - You will receive a prompt warning *this action will erase the data in the graph*.    
+        Click **Yes** to continue, then click **Clear** to clear the window in preparation for the lab.
+    - Leave it open...
+
+5.  In SSMS, open the following script files:
+
+    - `02-get waiting tasks and executing command.sql`
+    - `02-get summary of current waits.sql`
+    - `02-get object info from page resource sql 2019.sql`
+
+    These will be used to monitor the server while the workload is running.
+
+6.  Start the workload:
+
+    1.  Open a Command Prompt and browse to `C:\Labs\Memory-OptimizedTempDB`
+
+    2.  Go back to the Internet Explorer window that has the Performance Monitor collector open and click the play button (green arrow) to start the collection.
+
+    3.  From the Command Prompt window, execute `SQL2019_CTP21_Run_SalesAnalysis_Optimized.bat` by typing or pasting the file name and hitting **Enter**.
+
+7.  While the workload is running, watch the counters in Performance Monitor. You should see **Batch Requests/sec** around 500 and there should be **Page Latch** waits throughout the workload.    
+    
+    You can then go to SSMS and run the various scripts to monitor the workload.   You should see several sessions waiting on `PAGELATCH`, and when using the `02-get object info from page resource sql 2019.sql` you should see the sessions are waiting on pages that belong to TempDB system tables, most often `sysschobjs`.    
+    This is TempDB metadata contention and is the scenario that this SQL Server 2019 improvement is targeted to correct.    
+    
+    Feel free to run the workload a few more times to examine the different scripts and performance counters. Each time you run it, the runtime will be reported in the Command Prompt window. It should take about 1 minute to run each time.
+
+8.  Once you have finished examining the contention, make sure the Command Prompt scripts are complete and pause the Performance Monitor collection. We'll use the same Performance Monitor window for the next part of the lab, so it's a good idea to have at least one collection of the workload on the screen when you pause it in order to compare before and after the change.
+
+9.  Turn on Memory-Optimized TempDB:
+
+    1. Open **SQL Server Configuration Manager**.
+    2. Click "SQL Server Services" in the left pane.
+    3. Right-Click "SQL Server (SQL2019\_CTP23)" and choose ***Properties***.
+    4. Click the ***Startup Parameters*** tab.
+    5. In the "Specify a startup parameter:" box, type "-T3895" and click the "Add" button.
+    6. The "Existing parameters:" box should now look like this:
+    
+       ![Startup Parameters With Flag](./media/StartupParametersWithFlag.png "Startup Parameters With Flag")
+
+    7. Click "OK" to close the *Properties* window, then click "OK" on the Warning box that pops up.
+    8. Restart the SQL Server service by right-clicking on "SQL Server (SQL2019\_CTP23)" and choosing **Restart**.
+
+10. Go back to the Performance Monitor collector and click play to start the collection.
+
+11. Start the workload the same way you did in Step 5.
+
+12. Again, watch the Performance Monitor counters. You should see **Batch Requests/sec** higher this time, around 600, and there should be no Page Latch waits.   
+
+    > **Note:**
+    > You may see a small bump of Page Latch waits the first time you run the workload after the restart. This should disappear the second time you run it.
+
+    Running the scripts from step 6 during the workload should show that no sessions are waiting for any resources. Again, feel free to run the workload multiple times. It should run faster this time, around 52 seconds vs. 1 minute.
+
+    > **Note:**
+    > The amount of improvement you will see on a real-world workload will depend on how much contention is seen and the size of the SQL Server (i.e. how many cores and how much memory).     
+    > Small servers without a high level of concurrency will not see much of an improvement, if any at all.     
+    > This improvement is designed to provide greater scalability, so while a single run won't get much faster, you should be able to run a lot more concurrent threads without increasing the runtime of each batch.

+ 6 - 0
Sessions/Winter-Ready-2019/Lab-PMEM.md

@@ -0,0 +1,6 @@
+---
+title: "PMEM"
+date: "11/21/2018"
+author: Argenis Fernandez
+---
+# PMEM Lab 

+ 117 - 0
Sessions/Winter-Ready-2019/Lab-QTA.md

@@ -0,0 +1,117 @@
+---
+title: "Upgrading Databases by using the Query Tuning Assistant"
+date: "11/21/2018"
+author: Pedro Lopes
+---
+# Upgrading Database Compatibility Level using QTA Lab 
+
+## Intro - Defining the problem and goal
+When migrating from an older version of SQL Server and [upgrading the database compatibility level](https://docs.microsoft.com/sql/relational-databases/databases/view-or-change-the-compatibility-level-of-a-database) to the latest available, a workload may be exposed to the risk of performance regression. 
+
+Starting with SQL Server 2016, all query optimizer changes are gated to the latest database compatibility level, which in combination with Query Store gives you a great level of control over the query performance in the upgrade process if the upgrade follows the recommended workflow seen below. 
+
+![Recommended database upgrade workflow using Query Store](./media/query-store-usage-5.png "Recommended database upgrade workflow using Query Store") 
+
+This control over upgrades was further improved with SQL Server 2017 where [Automatic Tuning](https://docs.microsoft.com/sql/relational-databases/automatic-tuning/automatic-tuning.md) was introduced and allows automating the last step in the recommended workflow above.
+
+Starting with SSMS v18, the new **Query Tuning Assistant (QTA)** feature will guide users through the recommended workflow to keep performance stability during database upgrades. See below how QTA essentially only changes the last steps of the recommended workflow for upgrading the compatibility level using Query Store seen above. Instead of having the option to choose between the currently inneficient execution plan and the last known good execution plan, QTA presents tuning options that are specific for the selected regressed queries, to create a new improved state with tuned execution plans.
+
+![Recommended database upgrade workflow using QTA](./media/qta-usage.png "Recommended database upgrade workflow using QTA")
+
+> **Note:** 
+> QTA does not generate user workload so users must ensure that a representative test workload can be executed on the target instance. 
+
+## Lab requirements (pre-installed)
+The following are requirements to run this lab:
+
+- SQL Server 2016 or higher is installed.
+- You have installed SQL Server Management Studio v18 Preview 5 or higher.
+- You have installed the RML Utilities from https://www.microsoft.com/en-us/download/details.aspx?id=4511. Install ostress from the package *RML_Setup_AMD64.msi*. Add `C:\Program Files\Microsoft Corporation\RMLUtils` to your path variable.
+- Restore the AdventureWorksDW2012 database to your SQL Server instance. The `AdventureWorksDW2012.bak` is available in https://github.com/Microsoft/sql-server-samples/releases/tag/adventureworks.
+
+## Lab
+
+### 1. Configure an upgrade session
+
+1.  In SSMS, open the Object Explorer and connect to your local SQL Server instance.
+
+2.  For the database that is intended to upgrade the database compatibility level (AdventureWorks2012DW), right-click the database name, select **Tasks**, select **Database Upgrade**, and click on **New Database Upgrade Session**.
+
+3.  In the **Setup** window, configure Query Store to capture the equivalent of one full business cycle of worload data to analyze and tune. 
+    -  Enter **1** as the expected workload duration in days (minimum is 1 day). This will be used to propose recommended Query Store settings to tentatively allow the entire baseline to be collected. Capturing a good baseline is important to ensure any regressed queries found after changing the database compatibility level are able to be analyzed. 
+    -  Set the intended target database compatibility level to **140** or **150**. This is the setting that the user database should be at, after the QTA workflow has completed. 
+    -  Once complete, click **Next**.
+    
+       ![New database upgrade session setup window](./media/qta-new-session-setup.png "New database upgrade setup window")  
+  
+4.  In the **Settings** window, two columns show the **Current** state of Query Store in the targeted database, as well as the **Recommended** settings. Click on the **Recommended** button (if not selected by default). 
+
+       ![New database upgrade settings window](./media/qta-new-session-settings.png "New database upgrade settings window")
+
+5.  The **Tuning** window concludes the session configuration, and instructs on next steps to open and proceed with the session. Once complete, click **Finish**.
+
+    ![New database upgrade tuning window](./media/qta-new-session-tuning.png "New database upgrade tuning window")
+
+### 2. Executing the database upgrade workflow
+1.  For the database that is intended to upgrade the database compatibility level (AdventureWorks2012DW), right-click the database name, select **Tasks**, select **Database Upgrade**, and click on **Monitor Sessions**.
+
+2.  The **session management** page lists current and past sessions for the database in scope. Select the desired session, and click on **Details**.
+    
+    ![QTA Session Management page](./media/qta-session-management.png "QTA Session Management page")
+
+3.  The entry point for a new session is the **Data Collection** step. This step has 3 substeps:
+
+    1.  **Baseline Data Collection** requests the user to run the representative workload cycle, so that Query Store can collect a baseline.    
+        
+        Open `C:\Labs\Lab-QTA\SCENARIO` and double-click the file `PreUpgrade.cmd` to execute it. Once that workload has completed, check the **Done with workload run** and click **Next**.   
+
+        ![QTA Step 2 Substep 1](./media/qta-step2-substep1.png "QTA Step 2 Substep 1")
+
+    2.  **Upgrade Database** will prompt for permission to upgrade the database compatibility level to the desired target. To proceed to the next substep, click **Yes**.
+
+        ![QTA Step 2 Substep 2 - Upgrade database compatibility level](./media/qta-step2-substep2-prompt.png "QTA Step 2 Substep 2 - Upgrade database compatibility level")
+
+        The following page confirms that the database compatibility level was successfully upgraded.
+
+        ![QTA Step 2 Substep 2](./media/qta-step2-substep2.png "QTA Step 2 Substep 2")
+
+    3.  **Observed Data Collection** requests the user to re-run the representative workload cycle, so that Query Store can collect a comparative baseline that will be used to search for optimization opportunities. Open `C:\Labs\Lab-QTA\SCENARIO` and double-click the file `PostUpgrade.cmd` to execute it.    
+
+        As the workload executes, use the **Refresh** button to keep updating the list of regressed queries. You can change the **Queries to show** value to limit the number of queries displayed. The order of the list is affected by the **Metric** (Duration or CpuTime) and the **Aggregation** (Average is default).    
+
+        Once that workload has completed, check the **Done with workload run** and click **Next**.
+
+        ![QTA Step 2 Substep 3](./media/qta-step2-substep3.png "QTA Step 2 Substep 3")
+
+        For each query, notice:
+        - The columns **Baseline Metric** and **Observed Metric**: these show the performance difference between the pre-upgrade and post-upgrade status for the same workload.     
+        - The **% Change** column, which shows the percentual change for the selected metric between the before and after database compatibility upgrade state. A negative number represents the amount of measured regression for the query.     
+        - The **Tunable** column where you can see whether QTA can experiment on how to tune this query. Not all query types are eligible.
+
+4.  **View Analysis** allows selection of which queries to experiment and find optimization opportunities. The **Queries to show** value becomes the scope of eligible queries to experiment on.     
+
+    Check all queries that are Tunable, and click **Next** to start experimentation. A prompt advises that once QTA moves to the experimentation phase, returning to the View Analysis page will not be possible.
+
+    ![QTA Step 3](./media/qta-step3.png "QTA Step 3")
+
+5.  After experimentation is complete, the **View Findings** allows selection of which queries to deploy the proposed optimization as a plan guide. 
+
+    For each query, notice:
+    - The columns **Baseline Metric** and **Observed Metric**: these show the performance difference between the post-upgrade and the after-experimentation status.
+    - The **% Change** column, which shows the percentual change for the selected metric between the before and after-experimentation state, representing the amount of measured improvement for the query with the proposed optimization.
+    - The **Query Option** column links to online documentation about the proposed hint that improves query execution metric.
+    - The **Can Deploy** column where you can see whether the proposed query optimization can be deployed as a plan guide.
+
+    Select all queries in this screen and click on **Deploy**.
+
+    ![QTA Step 4](./media/qta-step4.png "QTA Step 4")
+
+6.  **Verification** shows the deployment status of previously selected queries for this session. The list in this page differs from the previous page by changing the **Can Deploy** column to **Can Rollback**.
+
+    ![QTA Step 5](./media/qta-step5.png "QTA Step 5")
+
+    This allows users to rollback on a proposed optimization if the results in production differed from our experimentation.     
+    
+    Select a query and click **Rollback**. That query plan guide is removed and the list updated to remove the rolled back query. Note in the picture below that query 8 was removed.
+
+    ![QTA Step 5 - Rollback](./media/qta-step5-rollback.png "QTA Step 5 - Rollback") 

BIN
Sessions/Winter-Ready-2019/Labs/Lab-AutoTuning.zip


BIN
Sessions/Winter-Ready-2019/Labs/Lab-Containers.zip


BIN
Sessions/Winter-Ready-2019/Labs/Lab-Memory-OptimizedTempDB.zip


BIN
Sessions/Winter-Ready-2019/Labs/Lab-QTA.zip


BIN
Sessions/Winter-Ready-2019/media/ACD_Properties.png


BIN
Sessions/Winter-Ready-2019/media/ActualPlan.png


BIN
Sessions/Winter-Ready-2019/media/ApproxCount.png


BIN
Sessions/Winter-Ready-2019/media/ApproxCount_Warm.png


BIN
Sessions/Winter-Ready-2019/media/BMRS_Batch.png


BIN
Sessions/Winter-Ready-2019/media/BMRS_Row.png


BIN
Sessions/Winter-Ready-2019/media/CD_Properties.png


BIN
Sessions/Winter-Ready-2019/media/Container-DockerCommands.png


BIN
Sessions/Winter-Ready-2019/media/Container-DockerCompose.png


BIN
Sessions/Winter-Ready-2019/media/Container-DockerComposeUp.png


BIN
Sessions/Winter-Ready-2019/media/Container-Dockerfile.png


BIN
Sessions/Winter-Ready-2019/media/Container-ExecSQLCMD.png


BIN
Sessions/Winter-Ready-2019/media/Container-GettingStartedOpsStudio.png


BIN
Sessions/Winter-Ready-2019/media/Container-GettingStartedResults.png


BIN
Sessions/Winter-Ready-2019/media/Container-RestoredDB.png


BIN
Sessions/Winter-Ready-2019/media/MGF_Properties_FirstExec.png


BIN
Sessions/Winter-Ready-2019/media/MGF_Spill.png


BIN
Sessions/Winter-Ready-2019/media/StartupParametersNoFlag.png


BIN
Sessions/Winter-Ready-2019/media/StartupParametersWithFlag.png


BIN
Sessions/Winter-Ready-2019/media/TV_Legacy.png


BIN
Sessions/Winter-Ready-2019/media/TV_New.png


BIN
Sessions/Winter-Ready-2019/media/UDF_Inlined.png


BIN
Sessions/Winter-Ready-2019/media/UDF_NotInlined.png


BIN
Sessions/Winter-Ready-2019/media/iqpfeaturefamily.png


BIN
Sessions/Winter-Ready-2019/media/new_query.png


BIN
Sessions/Winter-Ready-2019/media/objectexplorerquerystore_sql17.png


BIN
Sessions/Winter-Ready-2019/media/qta-new-session-settings.png


BIN
Sessions/Winter-Ready-2019/media/qta-new-session-setup.png


BIN
Sessions/Winter-Ready-2019/media/qta-new-session-tuning.png


BIN
Sessions/Winter-Ready-2019/media/qta-session-management.png


BIN
Sessions/Winter-Ready-2019/media/qta-step2-substep1.png


BIN
Sessions/Winter-Ready-2019/media/qta-step2-substep2-prompt.png


BIN
Sessions/Winter-Ready-2019/media/qta-step2-substep2.png


BIN
Sessions/Winter-Ready-2019/media/qta-step2-substep3.png


BIN
Sessions/Winter-Ready-2019/media/qta-step3.png


BIN
Sessions/Winter-Ready-2019/media/qta-step4.png


BIN
Sessions/Winter-Ready-2019/media/qta-step5-rollback.png


BIN
Sessions/Winter-Ready-2019/media/qta-step5.png


BIN
Sessions/Winter-Ready-2019/media/qta-usage.png


BIN
Sessions/Winter-Ready-2019/media/query-store-force-plan.png


BIN
Sessions/Winter-Ready-2019/media/query-store-usage-5.png