use tempdb; set nocount on; go /* Throughout this script, you'll need to open multiple connections to the test instance...I'd recommend opening a single connection to run the "cp_myLockInfo2k5" procedure that will report lock related information for the spids you pass it, and 2 other connections for testing with. During the script, anytime a statement is preceeded with a comment like this: "--CONNECTION 1", then run that statement(s) in 1 connection. Anytime a statement is preceeded with a comment like this: "--CONNECTION 2", then run that statement(s) in the 2nd connection. Major demo sections are seperated by large comment blocks like the following: /********************************************************************************* *********************************************************************************/ which contain some info on what that section will entail. This script will cover: PESSIMISTIC MODEL of CONCURRENCY (locking based methods): - READ UNCOMMITTED - READ COMMITTED - REPEATABLE READ - SERIALIZABLE OPTIMISTIC MODEL of CONCURRENCY (row-versioning based methods): - READ_COMMITTED_SNAPSHOT - SNAPSHOT Also will cover difference between each type of model and implications of using them... */ -- Create lock reporting procedure... if object_id('dbo.cp_myLockInfo2k5') > 0 drop procedure dbo.cp_myLockInfo2k5 go create procedure dbo.cp_myLockInfo2k5 @spid1 smallint = null, @spid2 smallint = null as select @spid1 = case when @spid1 > 0 then @spid1 else @@spid end, @spid2 = case when @spid2 > 0 then @spid2 else @@spid end select cast((select count(*) from sys.dm_tran_session_transactions with(nolock) where session_id = l.request_session_id) as varchar(5)) as myTranCount, cast(db_name(l.resource_database_id) as varchar(25)) as dbName, cast(l.resource_type as varchar(15)) as resType, cast(case when l.resource_type = 'DATABASE' then 'DB:' + db_name(l.resource_database_id) when l.resource_type = 'KEY' then object_name(p.object_id) + ' : ' + cast(i.index_id as varchar(10)) + ' (' + i.name + ')' when l.resource_type = 'OBJECT' then object_name(l.resource_associated_entity_id) when l.resource_type = 'PAGE' then 'PAGE ' + quotename(cast(rtrim(l.resource_description) as varchar(100))) + ' IN ( ' + object_name(p.object_id) + ' : ' + cast(i.index_id as varchar(10)) + ' )' end as varchar(50)) as associatedEntity, cast(l.request_mode as varchar(10)) as mode, cast(l.request_status as varchar(15)) as status, cast(l.request_session_id as varchar(6)) as spid, cast(l.request_owner_type as varchar(25)) as requestOwnerType, cast(l.resource_subtype as varchar(15)) as resSubType, cast(l.resource_description as varchar(25)) as resDescrip, l.resource_associated_entity_id as resAssociatedEntityId --,l.* from sys.dm_tran_locks l with(nolock) left join sys.partitions p with(nolock) on l.resource_associated_entity_id = p.hobt_id and p.partition_id = case when l.resource_lock_partition > 0 then l.resource_lock_partition else p.partition_id end left join sys.indexes i with(nolock) on p.object_id = i.object_id and p.index_id = i.index_id where (l.request_session_id = @spid1 or l.request_session_id = @spid2) and l.resource_database_id = db_id() order by l.request_session_id go -- USE THIS BATCH FOR REPORTING ON LOCK INFO THROUGHOUT THE SCRIPT (in a separate connection)... /* set nocount on; exec dbo.cp_myLockInfo2k5 @spid1 = , @spid2 = ; go --*/ -- Create test objects... if object_id('dbo.t1') > 0 drop table dbo.t1; go create table t1 (pkid int, colA varchar(100), fillerCol char(1000)); go -- Add some data... with cte as ( select top 102 row_number() over (order by a.object_id) as id, cast(row_number() over (order by a.object_id) as varchar(10)) + ' - ' + cast(newid() as varchar(50)) as colA, 'blah' as fillerCol from sys.columns a cross join sys.columns b cross join sys.types c ) insert dbo.t1 (pkid, colA, fillerCol) select * from cte where id not in(4,8) -- Don't insert id's 4 or 8... go create unique clustered index ix_t1_c_pkid on dbo.t1 (pkid); create unique nonclustered index ix_t1_nc_colA on dbo.t1 (colA); go /********************************************************************************* PESSIMISTIC MODEL - locking...how it worked in Sql2k, and how it works by default in Sql2k5 still... *********************************************************************************/ ------------------------------------------------------------------- -- READ UNCOMMITTED isolation....and problems with it... ------------------------------------------------------------------- -- In this scenario, you get dirty data (that is the well-known part of -- a dirty read)... -- CONNECTION 1 set transaction isolation level read uncommitted; begin tran; select * from dbo.t1 a where a.pkid between 1 and 10; -- CONNECTION 2 set transaction isolation level read uncommitted; begin tran; select * from dbo.t1 a where a.pkid between 1 and 10; -- View output of the lock info procedure for connection 1 and connection 2 spids... /* Should see a single row of output for each SPID - a shared lock on the database being executed within and nothing else... */ -- Now try writing in CONNECTION 1 (keep original xact open)... update dbo.t1 set fillerCol = 'blah1' where pkid = 5; -- Works fine (as expected)...take a look at lock output... -- Should see a mix of exclusive type locks at different levels -- (IX at object/page level, X at KEY level) for SPID1... -- Try running the SELECT from SPID1 again within the existing transaction... -- Notice you won't block, and you'll read the 'dirty' value for record #5 -- seeing a fillerCol value of 'blah1' instead of the original value 'blah'... -- BOTH CONNECTIONS... while @@trancount > 0 rollback tran go -- How about some of the NOLOCK/UNCOMMITTED side effects that not everyone -- is so aware of... /* First, you should understand that SQL Server is using two kinds of scans for reading data from tables and indexes: allocation scans and range scans. An allocation scan is scanning the pages in physical (allocation) order as they are laid down on the disk. In this situation, no particular logical order of rows is guaranteed. If the optimizer requires rows to be scanned in a certain order, then a range scan is employed. This is usually when there is a predicate on the leading column of an index or if the optimizer requires visiting the rows in the logical order (for example, because of aggregation or ORDER BY in the query). Next, you should know that if a new row is inserted into an index (clustered or not), it might cause page split. This happens if the existing page is full and the new row belongs between two existing rows (because of the key value of the new row is between the key values of the two rows) and it cannot fit physically into the same page. The page is split into two (approximately ½ empty each), and the new row is inserted where it belongs. With the NOLOCK hint (or setting the isolation level of the session to READ UNCOMMITTED) you tell SQL Server that you don’t expect consistency, so there are no guarantees. Bear in mind though that “inconsistent data” does not only mean that you might see uncommitted changes that were later rolled back, or data changes in an intermediate state of the transaction. It also means that in a simple query that scans all table/index data SQL Server may lose the scan position, or you might end up getting the same row twice. */ -- CONNECTION 1 -- Simply continue to insert random data into the pkid column until -- we get a duplicate read on connection #2... use zMaint_Sql2005; set nocount on; if object_id('tempdb..##dupsNotFound', 'U') > 0 drop table ##dupsNotFound; create table ##dupsNotFound(col1 int); go while object_id('tempdb..##dupsNotFound', 'U') > 0 insert dbo.t1 (pkid, colA, fillerCol) select 1 + abs(checksum(newid()) % 1000000000), 'colA', 'filler'; go -- CONNECTION 2 use zMaint_Sql2005; set nocount on; -- Delay a bit to allow the table to grow past the boundary to introduce an -- allocation order scan vs. index order scan... waitfor delay '00:00:05'; -- Continue till we see duplicate 'unique' data... while object_id('tempdb..##dupsNotFound', 'U') > 0 begin if object_id('tempdb..#t', 'U') > 0 drop table #t; -- Scan the t1 table, outputing results into a temp table... select pkid into #t from dbo.t1 with(nolock); -- Force a dirty read... -- Check for duplicate 'unique' values... if exists( select pkid, count(*) as cnt from #t group by pkid having count(*) > 1 ) begin -- Show the dups... select pkid, count(*) as cnt from #t group by pkid having count(*) > 1; -- Drop the flag table... drop table ##dupsNotFound; end end ------------------------------------------------------------------- -- READ COMMITTED isolation.... ------------------------------------------------------------------- -- First, we'll reinitialize the t1 table... if object_id('dbo.t1') > 0 drop table dbo.t1; go create table t1 (pkid int, colA varchar(100), fillerCol char(1000)); go -- Add some data... with cte as ( select top 102 row_number() over (order by a.object_id) as id, cast(row_number() over (order by a.object_id) as varchar(10)) + ' - ' + cast(newid() as varchar(50)) as colA, 'blah' as fillerCol from sys.columns a cross join sys.columns b cross join sys.types c ) insert dbo.t1 (pkid, colA, fillerCol) select * from cte where id not in(4,8) -- Don't insert id's 4 or 8... go create unique clustered index ix_t1_c_pkid on dbo.t1 (pkid); create unique nonclustered index ix_t1_nc_colA on dbo.t1 (colA); go -- We all know that readers shouldn't block readers in -- READ COMMITTED isolation... -- CONNECTION 1 set transaction isolation level read committed; begin tran; select * from dbo.t1 a where a.pkid between 1 and 10; -- CONNECTION 2 set transaction isolation level read committed; begin tran; select * from dbo.t1 a where a.pkid between 1 and 10; -- View output of the lock info procedure for connection 1 and connection 2 spids... /* Should see a single row of output for each SPID - a shared lock on the database being executed within... */ -- Now try writing in CONNECTION 2 (keep original xact open)... update dbo.t1 set fillerCol = 'blah1' where pkid = 5; -- Works fine (as expected)...take a look at lock output... -- Should see a mix of exclusive type locks at different levels -- (IX at object/page level, X at KEY level) for SPID2... -- Try running the SELECT from SPID1 again within the existing transaction...should block... -- Running the lock output again should show SPID1 in a wait state for a shared lock... -- BOTH CONNECTIONS... while @@trancount > 0 rollback tran go /* Now, many people assume/think that in the READ COMMITTED isolation level that you'll never see data inconsistencies...this is definitely NOT the case, as concurrent updates to a given structure could still result in some interesting results...this is due to the way the READ COMMITTED isolation level is implemented... When SQL Server executes a statement at the read committed isolation level, it acquires short lived share locks on a row by row basis. The duration of these share locks is just long enough to read and process each row; the server generally releases each lock before proceeding to the next row. Thus, if you run a simple select statement under read committed and check for locks, you will typically see at most a single row lock at a time. The sole purpose of these locks is to ensure that the statement only reads and returns committed data. The locks work because updates always acquire an exclusive lock which blocks any readers trying to acquire a share lock. Now, let's suppose that we scan an entire table at read committed isolation level. Since the scan locks only one row at a time, there is nothing to prevent a concurrent update from moving a row before or after our scan reaches it. Let's see how this can happen... */ -- Take a look at the data in the t1 table...notice the pkid and col1 column values -- match (for the value in colA preceeding the _ character that is... select * from dbo.t1; -- CONNECTION 1 -- First in Connection 1, we'll lock the 2nd row... set transaction isolation level read committed; begin tran; update dbo.t1 set colA = '2 - new' where pkid = 2; -- CONNECTION 2 -- From session 2, run a simple scan of the table...this will block at row 2... set transaction isolation level read committed; select * from dbo.t1; -- CONNECTION 1 -- Back in connection 1, while the scan in session 2 is blocked, we can swap the first and 100th rows -- and then commit the transaction and release the exclusive lock blocking session 2: -- Move the 1st row (pkid = 1) to the last position (pkid = 105 will do the trick since we have 100 -- rows currently...) update dbo.t1 set pkid = 105 where pkid = 1; -- Move the 100th row (id = 102) to the 1st position... update dbo.t1 set pkid = 1 where pkid = 102; -- Show what we have for data in session 1... select * from dbo.t1; -- Should see: -- - pkid 2 with a colA value of '2 - new' -- - pkid 1 with a colA value of '102 - xxxxxxxxxxxxxxxx' -- - pkid 105 with a colA value of '1 - xxxxxxxxxxxxxxxxx' -- Commit our updates on session 1... commit tran; -- CONNECTION 2 -- Now session 2 should have finished it's scan...take a look at the results...should notice: -- - pkid 1 (1st row) was scanned prior to the updates, shows colA value of '1 - xxxxx' -- - pkid 2 (2nd row) was scanned after the updates, shows colA value of '2 - new' -- - pkid 105 (originally pkid 1) was scanned following the update, shows a colA value -- of '1 - xxxxxx' - more importantly, notice that the 'unique' colA value here -- is EXACTLY the same as pkid 1 in this result set - notice how despite the fact -- that we have a unique constraint on the t1.colA column, in this single select -- statement, we received supposedly 'duplicate' colA values... -- - pkid 102 (originally the 100th row) DOES NOT EVEN SHOW UP...was scanned after the update... /* Now, many people may be thinking this can only happen if you update your cluster key value - though this is one case, remember that PAGE SPLITS can also cause the same behavior...this same behavior can also be perceived on a non-clustered scan as well... */ -- Both... while @@trancount > 0 rollback tran go -- Let's take a look at how this affects slightly more complex query operations (joins for example) -- Create a simple customer/order schema... create table Customers (CustId int primary key, LastName varchar(30)) insert Customers values (11, 'Doe') create table Orders (OrderId int primary key, CustId int foreign key references Customers, Discount float) insert Orders values (1, 11, 0) insert Orders values (2, 11, 0) select * from Orders O join Customers C on O.CustId = C.CustId /* The plan for this query uses a nested loops join: |--Nested Loops(Inner Join, OUTER REFERENCES:([O].[CustId])) |--Clustered Index Scan(OBJECT:([Orders].[PK__Orders] AS [O])) |--Clustered Index Seek(OBJECT:([Customers].[PK__Customers] AS [C]), SEEK:([C].[CustId]= [O].[CustId])) In a nested loops join, the inner input is executed once for each row from the outer input. In this example, the Orders table is the outer table and we have two orders so we will execute two seeks of the Customers table. Moreover, both orders were placed by the same customer. What happens if we change the customer data between the two index seeks? Let's see.... */ -- CONNECTION 1 -- First, lock the 2nd order in session 1... set transaction isolation level read committed; begin tran; update Orders set Discount = 0.1 where OrderId = 2; -- CONNECTION 2 -- Now, in session 2, run the join from Customers to Orders as performed originally above: set transaction isolation level read committed; select * from Orders O join Customers C on O.CustId = C.CustId; -- The join will scan the first order and join it with the Customers table. Then it will try to -- scan the second order and block waiting for the lock held by session 1. -- CONNECTION 1 -- Back in session 1, update the customer name and commit the transaction to release the lock -- and allow the query in session 2 to complete: update Customers set LastName = 'Smith' where CustId = 11; commit tran; -- CONNECTION 2 -- Take a look at the results from the join query in session 1...notice that the customer data -- (i.e. LastName in this case) is different for the 2 orders, even though the CustomerId -- value is exactly the same!!!! -- Both... while @@trancount > 0 rollback tran go ------------------------------------------------------------------- -- REPEATABLE READ isolation... ------------------------------------------------------------------- -- Try the same things with a repeatable read isolation... -- First, we'll reinitialize the t1 table... if object_id('dbo.t1') > 0 drop table dbo.t1; go create table t1 (pkid int, colA varchar(100), fillerCol char(1000)); go -- Add some data... with cte as ( select top 102 row_number() over (order by a.object_id) as id, cast(row_number() over (order by a.object_id) as varchar(10)) + ' - ' + cast(newid() as varchar(50)) as colA, 'blah' as fillerCol from sys.columns a cross join sys.columns b cross join sys.types c ) insert dbo.t1 (pkid, colA, fillerCol) select * from cte where id not in(4,8) -- Don't insert id's 4 or 8... go create unique clustered index ix_t1_c_pkid on dbo.t1 (pkid); create nonclustered index ix_t1_nc_colA on dbo.t1 (colA); go -- CONNECTION 1 set transaction isolation level repeatable read; begin tran; select * from dbo.t1 a where a.pkid between 1 and 10; -- View output of the lock info procedure for connection 1 spid... /* Now you'll see a bunch of KEY locks (probably 10) and a PAGE lock most likely in addition to the shared DB lock from earlier...repeatable read isolation levels introduce key level locks to guarantee reads that won't change within the given key values requested... */ -- CONNECTION 2 set transaction isolation level repeatable read; begin tran; select * from dbo.t1 a where a.pkid between 1 and 10; go -- Output will now double (same information for 2nd connection as the -- first...key locks and a page... -- Now try writing in CONNECTION 2 (keep original xact open)... update dbo.t1 set fillerCol = 'blah1' where pkid = 5; /* Notice this time this blocks wheras in the read committed isolation level this didn't block...because of the key locks required to guarantee a repeatable read within the keys held by SPID1, SPID2 can't update records for those keys (even though it could read them)... */ -- Stop the update above and instead try the following in CONNECTION 2: update dbo.t1 set fillerCol = 'blah1' where pkid = 15; -- This will work, since it can be updated while still honoring the -- repeatable range for CONNECTION 1... -- How about inserting a value into the range that is being queried -- on? We'll try inserting a record with a pkid of 4 and see if we -- can do so from CONNECTION 2... -- CONNECTION 2... insert dbo.t1 (pkid, colA, fillerCol) select 4, '4 - test_repeatableReadInsert', 'blah2'; -- Works (as expected) without blocking...repeatable read isolation -- doesn't protect against phantom inserts...rerun the select statement -- within CONNECTION 1 and see what happens (can you guess?????)... -- CONNECTION 1 (with transaction running)... select * from dbo.t1 a where a.pkid between 1 and 10; -- If you thought you'd see a new record appear, you were wrong...you -- get a blocking situation...this is because CONNECTION 2 has an -- exclusive lock on the key it inserted within the range being -- queried...if you go to CONNECTION 2 and commit your transaction, -- then requery from CONNECTION 1, you'll see the phantom record -- with ID 4 appear despite the fact that it wasn't there the first time -- we queried within this transaction... -- CONNECTION 2 - commit transaction; -- CONNECTION 1 select * from dbo.t1 a where a.pkid between 1 and 10; -- BOTH CONNECTIONS... while @@trancount > 0 rollback tran go /* How about some of those pesky issues regarding data integrity that we noticed even under the READ COMMITTED isolation...how does REPEATABLE READ help us with those? Well, many but not all of these results can be avoided by running at repeatable read isolation level. Unlike a read committed scan, a repeatable read scan retains locks on every row it touches until the end of the transaction. Even rows that do not qualify for the query result remain locked. These locks ensure that the rows touched by the query cannot be updated or deleted by a concurrent session until the current transaction completes (whether it is committed or rolled back). These locks do not protect rows that have not yet been scanned from updates or deletes and do not prevent the insertion of new rows amid the rows that are already locked. */ create table t (a int primary key, b int); insert t values (1, 1); insert t values (2, 2); insert t values (3, 3); select * from t; go -- CONNECTION 1 -- First, let's see how we can move a row and cause a repeatable read scan to miss it. -- start in session 1 by locking the 2nd row... begin tran; update t set b = 2 where a = 2; -- CONNECTION 2 -- In session 2, run a simple scan in the REPEATABLE READ isolation... select * from t with (repeatableread); -- This scan reads the first row then blocks waiting for session 1 to release the lock it -- holds on the second row. -- CONNECTION 1 -- While the scan is blocked, in session 1 let's move the third row to the beginning of the -- table before committing the transaction and releasing the exclusive lock blocking session 2 update t set a = 0 where a = 3; commit tran; -- CONNECTION 2 -- Take a look at the results from the scan in session 2...you'll notice that the 3rd row is -- completely missed and only 2 rows are returned... ------------------------------------------------------------------- -- SERIALIZABLE isolation... ------------------------------------------------------------------- -- Try the same thing with a serialized isolation... -- CONNECTION 1 set transaction isolation level serializable; begin tran; select * from dbo.t1 a where a.pkid between 1 and 10; -- View output of the lock info procedure for connection 1 spid... /* Now you'll see a bunch of KEY locks (probably 10) just like you did before in the REPEATABLE READ level, however notice the difference now on the MODE of the lock that is granted...instead of the simple shared lock that was there with the REPEATABLE READ level, you now have RANGE shared locks (looks like 'RangeS-S, which means a range shared lock for a range scan). You will only see these type of locks when you are using a serialized transaction...these differ from the REPEATABLE READ level locks in that not only do they protect the exact key values returned from the statement, but they also protect the entire range of values the query scans and won't allow the phantom inserts within the queried range like the REPEATABLE READ level did... */ -- CONNECTION 2 set transaction isolation level serializable; begin tran; select * from dbo.t1 a where a.pkid between 1 and 10; go -- Output will now double (same information for 2nd connection as the -- first... -- Notice that readers still don't block readers in this case... -- Now try writing in CONNECTION 2 (keep original xact open)... update dbo.t1 set fillerCol = 'blah1' where pkid = 5; -- Blocks (as expected) just like it did in the REPEATABLE READ level... -- Stop the update above and instead try the following in CONNECTION 2: update dbo.t1 set fillerCol = 'blah1' where pkid = 15; -- Again, just as with the REPEATABLE READ level, this will work, since -- it can be updated while still honoring the range of data queried/locked -- by connection 1 (pkid values of 1 thru 10...)... -- Let's try inserting a value into the range that is being queried -- on...remember that this worked in REPEATABLE READ level...think it -- will in the serialized xact???? -- CONNECTION 2... insert dbo.t1 (pkid, colA, fillerCol) select 4, '4 - test_repeatableReadInsert', 'blah2'; -- Blocks this time (didn't in REPEATABLE READ)...this is expected in the -- serialized isolation, since you're trying to insert data within the -- RANGE of values protected by the serialized xact read from CONNECTION 1 -- Now, rollback the transaction from CONNECTION 1 to see what happens -- in CONNECTION 2's insert request: -- CONNECTION 1: rollback tran; /* Switch to CONNECTION 2 and see what happened with the insert request - should have failed with a duplicate key failure...notice that even though the insert was going to fail due to duplicate key failure, it was STILL BLOCKED due to the range locks held by CONNECTION 1. */ -- BOTH CONNECTIONS... while @@trancount > 0 rollback tran go /********************************************************************************* OPTIMISTIC MODEL - row versioning...not available in Sql2k, available in 2 forms in Sql2k5 (and 2 different isolation levels) *********************************************************************************/ /* METHOD 1 - READ COMMITTED isolation, using optimistic row versioning for concurrency vs. locking... This model implements and honors all the characteristics of the READ COMMITTED isolation level, however with an optimistic implementation, you get the benefits of not requiring a shared lock on the data being read, and hence don't block writers and also readers being able to access committed data that existed prior to a concurrent write request to the same data, hence writers not blocking readers. In this implementation of row versioning however, be aware of some of the differences from the traditional pessimistic (i.e. lock-based) model from above: 1) If a reader reads data that is/has been modified during the given statement, the prior committed value is returned - this still honors the read committed isolation, but in the pessimistic model would have resulted in a block until the write operation committed 2) This is NOT the same as a 'dirty' read (or no lock read) - the data being returned is transactionally consistent at the point in time the statement began wheras data in a dirty read scenario has no guarantees of anything at all 3) There is no built-in conflict detection for concurrent updates on data read from different versions of data (this isn't different from the pessimistic model persee, but in the pessimistic model you would have been blocked from reading data that was already being updated by a concurrent transaction) */ -- Set the row versioning model for read committed isolation... -- NOTE: you'll need to be the ONLY connection to the DB for this... alter database zMaint_Sql2005 set read_committed_snapshot on; go /* Let's see how using an optimistic model of concurrency for a READ COMMITTED isolation level differs from the default pessimistic (locking) models. We'll go through the same scenarios we went through above in the READ COMMITTED section to see how they differ, plus we'll do some additional demos to show some other behavior... */ ------------------------------------------------------------------- -- READ COMMITTED isolation (optimistic, row versioning)... ------------------------------------------------------------------- -- Readers and readers...should be the same functionality as before... -- CONNECTION 1 set transaction isolation level read committed; begin tran; select * from dbo.t1 a where a.pkid between 1 and 10; -- CONNECTION 2 set transaction isolation level read committed; begin tran; select * from dbo.t1 a where a.pkid between 1 and 10; -- No surprise there...let's move on to the other steps... -- Now try writing in CONNECTION 2 (keep original xact open)... -- CONNECTION 2 update dbo.t1 set fillerCol = 'new read committed row version value' where pkid = 5; -- Read the updated value... select * from dbo.t1 a where a.pkid = 5; -- Again, no surprises, works same as above...notice on the read -- of the value in CONNECTION 2 we get the expected value of -- "new read committed row version value" for record id 5... -- Now the differences...run the select from SPID 1 again as we did above in the -- locking model...instead of blocking, this time you should get a result set... -- CONNECTION 1 select * from dbo.t1 a where a.pkid between 1 and 10; /* In the locking model, we were blocked at this point. Here we aren't. Also, notice the value for the record with pkid = 5...in CONNECTION 1 it should still be showing as a value of 'blah1' instead of the 'new read committed row version value' value that it was updated to in CONNECTION 2...CONNECTION 2 hasn't yet committed the transaction that assigned that value to that record, so it is still 'uncommitted' data, and hence the CONNECTION 1 reverts to displaying the prior committed value of 'blah1'. This DOES NOT violate the READ COMMITTED isolation model, and also demonstrates how writers don't block readers any longer in this type of model... */ /* One thing to be aware of in this type of model is that if you have logical processing of data that performs different processing based on the value of the data in the row, here you'd progress down the path for 'blah1': */ -- Writers still block writers in this model...that still can't be avoided... -- This statement will block... -- CONNECTION 1 update dbo.t1 set fillerCol = 'back to another value' where pkid = 5; commit transaction; -- If you look at the output from the lock procedure, you should notice that there are -- still exclusive and intent exclusive locks held for the t1 table, page, and key... /* At this point, we're into a scenario where we are updating a record that has been modified within another transaction without a versioning failure and without seeing the 'most recent' copy of the fillerCol value for record ID 5...for example, when we go to CONNECTION 2 and commit the transaction, this update will succeed, despite the fact that we are updating it without knowledge of the most recent committed value (which will have been "new read committed row version value" for a microsecond or 2)... */ -- From connection 2, commit and pause for 5/100ths of a second, then read the value -- for the record we "just" updated from CONNECTION 2...it will come up as -- "back to another value" instead of what we updated it to within that connection... -- CONNECTION 2: select * from dbo.t1 a where a.pkid = 5; commit transaction; waitfor delay '000:00:00.05'; select * from dbo.t1 a where a.pkid = 5; go /* Also need to be aware that RCSI is a statement-level versioning of data...to illustrate, let's run through a scenario... */ -- CONNECTION 1 - read the data... set transaction isolation level read committed; begin tran; select * from dbo.t1 a where a.pkid between 1 and 10; -- CONNECTION 2 - same thing... set transaction isolation level read committed; begin tran; select * from dbo.t1 a where a.pkid between 1 and 10; -- At this point, both reads show a value of "back to another value" for record -- #5...let's update the date in CONNECTION 2 and commit over there... -- CONNECTION 2 update dbo.t1 set fillerCol = 'blah' where pkid = 5; commit transaction; -- Now back on connection 1, let's see what we read: -- CONNECTION 1 select * from dbo.t1 a where a.pkid between 1 and 10; /* Notice that even though we are still in the same transaction that we began earlier on CONNECTION 1 when the value for record 5 was "back to another value" we now see the newly committed value of "blah" again for record 5...RCSI is a statement level version of the data, not a transactional level version of the data... */ -- BOTH CONNECTIONS... while @@trancount > 0 rollback tran go -- Cleanup... alter database zMaint_Sql2005 set read_committed_snapshot off; go /* METHOD 2 - SNAPSHOT isolation, using optimistic row versioning for concurrency vs. locking... This model is an entirely new, complete isolation level and honors consistency at the level of the entire transaction, not at the statement level (like READ COMMITTED using row versioning as above). Concurrency effects of the SNAPSHOT isolation mode are similar to those of the serializable level without the effects of blocking - of course, it still has to worry about managing update conflicts (i.e. the scenario above when demonstrating read committed using row versioning) and does so implicitly via throwing exceptions when update conflicts occur (as you'll see in the script below). This is a complete isolation level - it operates at the transaction level instead of the statement level in terms of views of the data */ -- Set the row versioning model for read committed isolation... -- NOTE: you'll need to be the ONLY connection to the DB for this... alter database zMaint_Sql2005 set allow_snapshot_isolation on; go -- Readers and readers...should be the same functionality as before... -- CONNECTION 1 set transaction isolation level snapshot; begin tran; select * from dbo.t1 a where a.pkid between 1 and 10; -- CONNECTION 2 set transaction isolation level snapshot; begin tran; select * from dbo.t1 a where a.pkid between 1 and 10; -- No surprise there...let's move on to the other steps... -- Take note that both connections currently see record #5 with -- a value of 'blah'... -- Now try writing in CONNECTION 2 (keep original xact open)... -- CONNECTION 2 update dbo.t1 set fillerCol = 'new snapshot row version value' where pkid = 5; -- Read the updated value... select * from dbo.t1 a where a.pkid = 5; -- Again, no surprises, works same as above...notice on the read -- of the value in CONNECTION 2 we get the expected value of -- "new snapshot row version value" for record id 5... -- Read what the value for this record is within -- Run the select from CONNECTION 1 - again, we don't get blocked and -- instead get a result set back...we still show a value of "blah" for -- record #5... -- CONNECTION 1 select * from dbo.t1 a where a.pkid between 1 and 10; /* In the locking model, we were blocked at this point. Here we aren't. Also, notice the value for the record with pkid = 5...in CONNECTION 1 it should still be showing as a value of 'blah1' instead of the 'new snapshot row version value' value that it was updated to in CONNECTION 2...CONNECTION 2 hasn't yet committed the transaction that assigned that value to that record, so it is still 'uncommitted' data, and hence the CONNECTION 1 reverts to displaying the prior committed value of 'blah1'. */ -- Again, writers still block writers in this model...that still can't be avoided... -- This statement will block... -- CONNECTION 1 update dbo.t1 set fillerCol = 'back to another value again...' where pkid = 5; commit transaction; -- If you look at the output from the lock procedure, you should notice that there are -- still exclusive and intent exclusive locks held for the t1 table, page, and key... /* As above with RCSI, we are at this point, into a scenario where we are updating a record that has been modified within another transaction without a versioning failure and without seeing the 'most recent' copy of the fillerCol value for record ID 5... */ -- From connection 2, commit and pause for 5/100ths of a second, then read the value -- for the record we "just" updated from CONNECTION 2... -- CONNECTION 2: select * from dbo.t1 a where a.pkid = 5; commit transaction; waitfor delay '000:00:00.05'; select * from dbo.t1 a where a.pkid = 5; go -- If you expected the same thing to occur that happened above with RCSI, you were wrong :-)... -- Notice that the transaction in CONNECTION 2 committed fine and both the before and -- after reads of the data show the value from that transaction of "new snapshot row version value" -- CONNECTION 1 should have failed it's commit with an exception that stated: /* Msg 3960, Level 16, State 2, Line 1 Snapshot isolation transaction aborted due to update conflict. You cannot use snapshot isolation to access table 'dbo.t1' directly or indirectly in database 'zMaint_Sql2005' to update, delete, or insert the row that has been modified or deleted by another transaction. Retry the transaction or change the isolation level for the update/delete statement. */ -- This is the built-in update conflict detection included with the SNAPSHOT isolation level in action... /* Let's illustrate how SNAPSHOT isolation differs from RCSI in that it is a view of the data versioned at the transaction level and not the statement level (as RCSI is)... */ -- BOTH CONNECTIONS... while @@trancount > 0 rollback tran go -- CONNECTION 1 - read the data... begin tran; select * from dbo.t1 a where a.pkid between 1 and 10; -- CONNECTION 2 - same thing... begin tran; select * from dbo.t1 a where a.pkid between 1 and 10; -- At this point, both reads show a value of "new snapshot row version value" for -- record #5...let's update the date in CONNECTION 2 and commit over there... -- CONNECTION 2 update dbo.t1 set fillerCol = 'blah' where pkid = 5; commit transaction; select * from dbo.t1 a where a.pkid = 5; -- In CONNECTION 2, you should notice that record 5 has been set back to a value -- of "blah" for record #5... -- Now back on connection 1, let's see what we read: -- CONNECTION 1 select * from dbo.t1 a where a.pkid between 1 and 10; /* Notice how, unlike RCSI above, when we read here, we get the orignal value of "new snapshot row version value" for record #5...in RCSI at this point we would have read a value of "blah" for record #5 since RCSI is a statement level view of the committed version of data...SNAPSHOT isolation is a transactional view of the data...since we started a transaction when the value of row #5 was "new snapshot row version value", then that's the value we will see for the duration of this transaction... */ -- BOTH CONNECTIONS... while @@trancount > 0 rollback tran go -- Cleanup... alter database zMaint_Sql2005 set allow_snapshot_isolation off; go ------------------------------------------------------------------- -- TRANSACTION ISOLATION DIFFENCES -- - Not Concurrency Differences, but instead how the exact same transactional -- modifications may result in different final outcomes ------------------------------------------------------------------- /* It is typically relatively simple to understand SERIALIZABLE. For the outcome of two transactions to be considered SERIALIZABLE, it must be possible to achieve this outcome by running one transaction at a time in some order. Snapshot does not guarantee this level of transactional isolation. Imagine the following sample scenario: There is a bag containing a mixture of white and black marbles. Suppose that we want to run two transactions. One transaction turns each of the white marbles into black marbles. The second transaction turns each of the black marbles into white marbles. If we run these transactions under SERIALIZABLE isolation, we must run them one at a time. The first transaction will leave a bag with marbles of only one color. After that, the second transaction will change all of these marbles to the other color. There are only two possible outcomes: a bag with only white marbles or a bag with only black marbles. If we run these transactions under snapshot isolation, there is a third outcome that is not possible under SERIALIZABLE isolation. Each transaction can simultaneously take a snapshot of the bag of marbles as it exists before we make any changes. Now one transaction finds the white marbles and turns them into black marbles. At the same time, the other transactions finds the black marbles - but only those marbles that where black when we took the snapshot - not those marbles that the first transaction changed to black - and turns them into white marbles. In the end, we still have a mixed bag of marbles with some white and some black. In fact, we have precisely switched each marble. */ -- We can demonstrate this outcome using SQL Server. -- Enable snapshot isolation... alter database zMaint_Sql2005 set allow_snapshot_isolation on; -- Begin by creating a simple table with two rows representing two marbles: create table marbles (id int primary key, color char(5)); insert marbles values(1, 'Black'); insert marbles values(2, 'White'); select * from marbles; -- CONNECTION 1 -- Start a snapshot transaction...update Black marbles to White... set transaction isolation level snapshot; begin tran; update marbles set color = 'White' where color = 'Black'; -- CONNECTION 2 -- Before committing the changes, run the following in session 2 (changing White -- to Black)... set transaction isolation level snapshot; begin tran; update marbles set color = 'Black' where color = 'White'; commit tran; -- CONNECTION 1 -- Back to session 1, commit the transaction in session 1 and check the data in the table: commit tran; select * from marbles; -- As you can see marble 1 which started out black is now white and marble 2 which started out white is now black. -- If you try this same experiment with serializable isolation, one transaction will wait for the other to complete -- and, depending on the order, both marbles will end up either white or black. -- CONNECTION 1 -- Start a SERIALIZABLE transaction...update Black marbles to White... select * from marbles; set transaction isolation level serializable; begin tran; update marbles set color = 'White' where color = 'Black'; -- CONNECTION 2 -- Before committing the changes, run the following in session 2 (changing White -- to Black)...this will block waiting for session 1... set transaction isolation level serializable; begin tran; update marbles set color = 'Black' where color = 'White'; commit tran; -- CONNECTION 1 -- Back to session 1, commit the transaction in session 1 and check the data in the table: commit tran; select * from marbles; -- Should be all white... -- CONNECTION 2 -- Go back to session 2, you should see that all marbles are now black... select * from marbles; -- Cleanup... alter database zMaint_Sql2005 set allow_snapshot_isolation off;