def test_truncate_cursor_order(self): uri = self.type + self.name # A simple, one-file file or table object. ds = SimpleDataSet(self, uri, 100, key_format=self.keyfmt) ds.populate() c1 = self.session.open_cursor(uri, None) c1.set_key(ds.key(1000)) c2 = self.session.open_cursor(uri, None) c2.set_key(ds.key(2000)) self.session.truncate(None, c1, c2, None) self.assertEqual(c1.close(), 0) self.assertEqual(c2.close(), 0) self.session.drop(uri) if self.type == "table:": ds = ComplexDataSet(self, uri, 100, key_format=self.keyfmt) ds.populate() c1 = self.session.open_cursor(uri, None) c1.set_key(ds.key(1000)) c2 = self.session.open_cursor(uri, None) c2.set_key(ds.key(2000)) self.session.truncate(None, c1, c2, None) self.assertEqual(c1.close(), 0) self.assertEqual(c2.close(), 0) self.session.drop(uri)
def test_log_ts(self): if wiredtiger.diagnostic_build(): self.skipTest('requires a non-diagnostic build') # Create an object that's never written, it's just used to generate valid k/v pairs. ds = SimpleDataSet( self, 'file:notused', 10, key_format=self.key_format, value_format=self.value_format) # Open the object, configuring write_timestamp usage. uri = 'table:ts' config = ',write_timestamp_usage=' config += 'always' if self.always else 'never' self.session.create(uri, 'key_format={},value_format={}'.format(self.key_format, self.value_format) + config) c = self.session.open_cursor(uri) # Commit with a timestamp. self.session.begin_transaction() c[ds.key(1)] = ds.value(1) self.session.breakpoint() self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(10)) # Commit without a timestamp. self.session.begin_transaction() c[ds.key(2)] = ds.value(2) self.session.commit_transaction()
def test_modify_many(self): ds = SimpleDataSet(self, self.uri, 20, key_format=self.keyfmt, value_format=self.valuefmt) ds.populate() c = self.session.open_cursor(self.uri, None) self.session.begin_transaction("isolation=snapshot") c.set_key(ds.key(10)) orig = self.make_value('abcdefghijklmnopqrstuvwxyz') c.set_value(orig) self.assertEquals(c.update(), 0) for i in range(0, 50000): new = self.make_value("".join([random.choice(string.digits) \ for i in range(5)])) orig = orig[:10] + new + orig[15:] mods = [] mod = wiredtiger.Modify(new, 10, 5) mods.append(mod) mods = self.fix_mods(mods) self.assertEquals(c.modify(mods), 0) self.session.commit_transaction() c.set_key(ds.key(10)) self.assertEquals(c.search(), 0) self.assertEquals(c.get_value(), orig)
def test_missing(self): ds = SimpleDataSet(self, self.uri, self.nentries, config=self.config, key_format=self.keyfmt) ds.populate() c = self.session.open_cursor(self.uri, None) for i in range(self.nentries + 3000, self.nentries + 5001): c[ds.key(i)] = ds.value(i) self.reopen_conn() c = self.session.open_cursor(self.uri, None) self.forward(c, ds, self.nentries + 5000, list(range(self.nentries + 1, self.nentries + 3000))) self.backward(c, ds, self.nentries + 5000, list(range(self.nentries + 1, self.nentries + 3000))) # Insert into the empty space so we test searching inserted items. for i in range(self.nentries + 1000, self.nentries + 2001): c[ds.key(i)] = ds.value(i) self.forward(c, ds, self.nentries + 5000, list(list(range(self.nentries + 1, self.nentries + 1000)) +\ list(range(self.nentries + 2001, self.nentries + 3000)))) self.backward(c, ds, self.nentries + 5000, list(list(range(self.nentries + 1, self.nentries + 1000)) +\ list(range(self.nentries + 2001, self.nentries + 3000))))
def test_insert_over_delete_replace(self): msg = '/WT_CACHE_FULL.*/' ds = SimpleDataSet(self, self.uri, 10000000, key_format=self.keyfmt, value_format=self.valuefmt, config=self.table_config) self.assertRaisesHavingMessage(wiredtiger.WiredTigerError, ds.populate, msg) cursor = self.session.open_cursor(self.uri, None) cursor.prev() last_key = int(cursor.get_key()) # Now that the database contains as much data as will fit into # the configured cache, verify removes succeed. cursor = self.session.open_cursor(self.uri, None) for i in range(1, last_key // 4, 1): cursor.set_key(ds.key(i)) cursor.remove() cursor.reset() # Spin inserting to give eviction a chance to reclaim space sleeps = 0 inserted = False for i in range(1, 1000): try: cursor[ds.key(1)] = ds.value(1) except wiredtiger.WiredTigerError: cursor.reset() sleeps = sleeps + 1 self.assertLess(sleeps, 60 * 5) sleep(1) continue inserted = True break self.assertTrue(inserted)
def test_insert_over_delete_replace(self): msg = '/WT_CACHE_FULL.*/' ds = SimpleDataSet(self, self.uri, 10000000, key_format=self.keyfmt, value_format=self.valuefmt, config=self.table_config) self.assertRaisesHavingMessage(wiredtiger.WiredTigerError, ds.populate, msg) cursor = self.session.open_cursor(self.uri, None) cursor.prev() last_key = int(cursor.get_key()) # Now that the database contains as much data as will fit into # the configured cache, verify removes succeed. cursor = self.session.open_cursor(self.uri, None) for i in range(1, last_key / 4, 1): cursor.set_key(ds.key(i)) cursor.remove() cursor.reset() # Spin inserting to give eviction a chance to reclaim space inserted = False for i in range(1, 1000): try: cursor[ds.key(1)] = ds.value(1) except wiredtiger.WiredTigerError: cursor.reset() sleep(1) continue inserted = True break self.assertTrue(inserted)
def test_search_invisible_two(self): # Populate the tree and reopen the connection, forcing it to disk # and moving the records to an on-page format. ds = SimpleDataSet(self, self.uri, 100, key_format=self.key_format, value_format=self.value_format) ds.populate() self.reopen_conn() # Add some additional visible records. cursor = self.session.open_cursor(self.uri, None) for i in range(100, 120): cursor[ds.key(i)] = ds.value(i) cursor.close() # Begin a transaction, and add some additional records. self.session.begin_transaction() cursor = self.session.open_cursor(self.uri, None) for i in range(120, 140): cursor[ds.key(i)] = ds.value(i) # Open a separate session and cursor. s = self.conn.open_session() cursor = s.open_cursor(self.uri, None) # Search for an invisible record. cursor.set_key(ds.key(130)) if self.empty: # Invisible updates to fixed-length column-store objects are # invisible to the reader, but the fact that they exist past # the end of the initial records causes the instantiation of # empty records: confirm successful return of an empty row. cursor.search() self.assertEqual(cursor.get_key(), 130) self.assertEqual(cursor.get_value(), 0) else: # Otherwise, we should not find any matching records. self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND) # Search-near for an invisible record, which should succeed, returning # the last visible record. cursor.set_key(ds.key(130)) cursor.search_near() if self.empty: # Invisible updates to fixed-length column-store objects are # invisible to the reader, but the fact that they exist past # the end of the initial records causes the instantiation of # empty records: confirm successful return of an empty row. cursor.search() self.assertEqual(cursor.get_key(), 130) self.assertEqual(cursor.get_value(), 0) else: # Otherwise, we should find the closest record for which we can see # the value. self.assertEqual(cursor.get_key(), ds.key(119)) self.assertEqual(cursor.get_value(), ds.value(119))
def test_timestamp_inconsistent_update(self): if wiredtiger.diagnostic_build(): self.skipTest('requires a non-diagnostic build') # Create an object that's never written, it's just used to generate valid k/v pairs. ds = SimpleDataSet( self, 'file:notused', 10, key_format=self.key_format, value_format=self.value_format) # Create the table with the key consistency checking turned on. That checking will verify # any individual key is always or never used with a timestamp. And if it is used with a # timestamp that the timestamps are in increasing order for that key. uri = 'table:ts' self.session.create(uri, 'key_format={},value_format={}'.format(self.key_format, self.value_format) + ',write_timestamp_usage=ordered') c = self.session.open_cursor(uri) key = ds.key(1) # Insert an item at timestamp 2. self.session.begin_transaction() c[key] = ds.value(1) self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(2)) # Upate the data item at timestamp 1, which should fail. self.session.begin_transaction() self.session.timestamp_transaction('commit_timestamp=' + self.timestamp_str(1)) c[key] = ds.value(2) msg = '/updates a value with an older timestamp/' self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.commit_transaction(), msg) # Make sure we can successfully add a different key at timestamp 1. self.session.begin_transaction() self.session.timestamp_transaction('commit_timestamp=' + self.timestamp_str(1)) c[ds.key(2)] = ds.value(3) self.session.commit_transaction() # Insert key1 at timestamp 10 and key2 at 15. Then update both keys in one transaction at # timestamp 13, and we should get a complaint about usage. key1 = ds.key(3) key2 = ds.key(4) self.session.begin_transaction() c[key1] = ds.value(3) self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(10)) self.session.begin_transaction() c[key2] = ds.value(4) self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(15)) self.session.begin_transaction() self.session.timestamp_transaction('commit_timestamp=' + self.timestamp_str(13)) c[key1] = ds.value(5) c[key2] = ds.value(6) msg = '/updates a value with an older timestamp/' self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.commit_transaction(), msg)
def test_rollback_to_stable38(self): nrows = 1000000 # Create a table. uri = "table:rollback_to_stable38" ds = SimpleDataSet(self, uri, 0, key_format=self.key_format, value_format=self.value_format, config=self.extraconfig) ds.populate() if self.value_format == '8t': value_a = 97 else: value_a = "aaaaa" * 100 # Pin a transaction session2 = self.conn.open_session() session2.begin_transaction() # Write a value to table. cursor1 = self.session.open_cursor(ds.uri) for i in range(1, nrows + 1): self.session.begin_transaction() cursor1[ds.key(i)] = value_a self.session.commit_transaction() # Write another value to table. cursor1 = self.session.open_cursor(ds.uri) for i in range(1, nrows + 1): self.session.begin_transaction() cursor1[ds.key(i)] = value_a self.session.commit_transaction() # Do a checkpoint self.session.checkpoint() session2.rollback_transaction() session2.close() # Roll back via crashing. simulate_crash_restart(self, ".", "RESTART") stat_cursor = self.session.open_cursor('statistics:', None, None) hs_btree_truncate = stat_cursor[stat.conn.cache_hs_btree_truncate][2] fastdelete_pages = stat_cursor[stat.conn.rec_page_delete_fast][2] self.assertGreater(hs_btree_truncate, 0) self.assertGreater(fastdelete_pages, 0) stat_cursor.close()
def test_in_memory_ts(self): if wiredtiger.diagnostic_build(): self.skipTest('requires a non-diagnostic build') # Create an object that's never written, it's just used to generate valid k/v pairs. ds = SimpleDataSet(self, 'file:notused', 10, key_format=self.key_format, value_format=self.value_format) # Open the object, configuring write_timestamp usage. uri = 'table:ts' config = ',' + self.obj_config config += ',write_timestamp_usage=' config += 'ordered' if self.always else 'never' self.session.breakpoint() self.session.create( uri, 'key_format={},value_format={}'.format( self.key_format, self.value_format) + config) c = self.session.open_cursor(uri) # Commit with a timestamp. self.session.begin_transaction() c[ds.key(1)] = ds.value(1) if self.always == True or self.obj_ignore == True: self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(1)) else: msg = '/unexpected timestamp usage/' self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: self.session.commit_transaction( 'commit_timestamp=' + self.timestamp_str(1)), msg) # Commit without a timestamp (but first with a timestamp if in ordered mode so we get # a failure). if self.always: self.session.begin_transaction() c[ds.key(2)] = ds.value(2) self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(2)) self.session.begin_transaction() c[ds.key(2)] = ds.value(2) if self.always == False or self.obj_ignore == True: self.session.commit_transaction() else: msg = '/no timestamp provided/' self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: self.session.commit_transaction(), msg)
def test_rollback_reason(self): uri = "table:txn27" # Create a very basic table. ds = SimpleDataSet(self, uri, 10, key_format='S', value_format='S') ds.populate() # Update key 5 in the first session. session1 = self.session cursor1 = session1.open_cursor(uri) session1.begin_transaction() cursor1[ds.key(5)] = "aaa" # Update the same key in the second session, expect a conflict error to be produced. session2 = self.conn.open_session() cursor2 = session2.open_cursor(uri) session2.begin_transaction() cursor2.set_key(ds.key(5)) cursor2.set_value("bbb") msg1 = '/conflict between concurrent operations/' self.assertRaisesException(wiredtiger.WiredTigerError, lambda: cursor2.update(), msg1) self.assertEquals('/' + session2.get_rollback_reason() + '/', msg1) # Rollback the transactions, check that session2's rollback error was cleared. session2.rollback_transaction() self.assertEquals(session2.get_rollback_reason(), None) session1.rollback_transaction() # Start a new transaction and insert a value far too large for cache. session1.begin_transaction() cursor1.set_key(ds.key(1)) cursor1.set_value("a" * 1024 * 5000) self.assertEqual(0, cursor1.update()) # Let WiredTiger's accounting catch up. time.sleep(2) # Attempt to insert another value with the same transaction. This will result in the # application thread being pulled into eviction and getting rolled back. cursor1.set_key(ds.key(2)) cursor1.set_value("b" * 1024) # This is the message that we expect to be raised when a thread is rolled back due to # cache pressure. msg2 = 'oldest pinned transaction ID rolled back for eviction' # Expect stdout to give us the true reason for the rollback. with self.expectedStdoutPattern(msg2): # This reason is the default reason for WT_ROLLBACK errors so we need to catch it. self.assertRaisesException(wiredtiger.WiredTigerError, lambda: cursor1.update(), msg1) # Expect the rollback reason to give us the true reason for the rollback. self.assertEquals(session1.get_rollback_reason(), msg2)
def test_truncate_cursor_order(self): uri = self.type + self.name ds = SimpleDataSet(self, uri, 100, key_format=self.keyfmt) ds.populate() c1 = self.session.open_cursor(uri, None) c2 = self.session.open_cursor(uri, None) c1.set_key(ds.key(20)) c2.set_key(ds.key(10)) msg = "/the start cursor position is after the stop cursor position/" self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.truncate(None, c1, c2, None), msg) c2.set_key(ds.key(20)) self.session.truncate(None, c1, c2, None)
def test_modify_abort(self): ds = SimpleDataSet(self, self.uri, 20, key_format=self.keyfmt, value_format=self.valuefmt) ds.populate() # Start a transaction. self.session.begin_transaction("isolation=snapshot") # Insert a new record. c = self.session.open_cursor(self.uri, None) c.set_key(ds.key(30)) c.set_value(ds.value(30)) self.assertEquals(c.insert(), 0) # Test that we can successfully modify our own record. mods = [] mod = wiredtiger.Modify('ABCD', 3, 3) mods.append(mod) c.set_key(ds.key(30)) mods = self.fix_mods(mods) self.assertEqual(c.modify(mods), 0) # Test that another transaction cannot modify our uncommitted record. xs = self.conn.open_session() xc = xs.open_cursor(self.uri, None) xs.begin_transaction("isolation=snapshot") xc.set_key(ds.key(30)) xc.set_value(ds.value(30)) mods = [] mod = wiredtiger.Modify('ABCD', 3, 3) mods.append(mod) mods = self.fix_mods(mods) xc.set_key(ds.key(30)) self.assertEqual(xc.modify(mods), wiredtiger.WT_NOTFOUND) xs.rollback_transaction() # Rollback our transaction. self.session.rollback_transaction() # Test that we can't modify our aborted insert. self.session.begin_transaction("isolation=snapshot") mods = [] mod = wiredtiger.Modify('ABCD', 3, 3) mods.append(mod) mods = self.fix_mods(mods) c.set_key(ds.key(30)) self.assertEqual(c.modify(mods), wiredtiger.WT_NOTFOUND) self.session.rollback_transaction()
def test_search_empty(self): # Create the object and open a cursor. ds = SimpleDataSet(self, self.uri, 0, key_format=self.key_format, value_format=self.value_format) ds.create() cursor = self.session.open_cursor(self.uri, None) # Search for a record past the end of the table, which should fail. cursor.set_key(ds.key(100)) self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND) # Search-near for a record past the end of the table, which should fail. cursor.set_key(ds.key(100)) self.assertEqual(cursor.search_near(), wiredtiger.WT_NOTFOUND)
def test_smoke(self): ds = SimpleDataSet(self, self.uri, self.nentries, config=self.config, key_format=self.keyfmt) ds.populate() self.reopen_conn() c = self.session.open_cursor(self.uri, None) c.set_key(ds.key(100)) self.assertEqual(c.search(), 0) self.assertEqual(c.get_value(), ds.value(100)) c.set_key(ds.key(101)) self.assertEqual(c.search(), 0) self.assertEqual(c.get_value(), ds.value(101)) c.set_key(ds.key(9999)) self.assertEqual(c.search(), 0) self.assertEqual(c.get_value(), ds.value(9999))
def test_modify_delete(self): ds = SimpleDataSet(self, self.uri, 20, key_format=self.keyfmt, value_format=self.valuefmt) ds.populate() c = self.session.open_cursor(self.uri, None) c.set_key(ds.key(10)) self.assertEquals(c.remove(), 0) mods = [] mod = wiredtiger.Modify('ABCD', 3, 3) mods.append(mod) c.set_key(ds.key(10)) self.assertEqual(c.modify(mods), wiredtiger.WT_NOTFOUND)
def test_truncate_cursor_order(self): uri = self.type + self.name ds = SimpleDataSet(self, uri, 100, key_format=self.keyfmt) ds.populate() c1 = self.session.open_cursor(uri, None) c2 = self.session.open_cursor(uri, None) c1.set_key(ds.key(20)) c2.set_key(ds.key(10)) msg = '/the start cursor position is after the stop cursor position/' self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: self.session.truncate(None, c1, c2, None), msg) c2.set_key(ds.key(20)) self.session.truncate(None, c1, c2, None)
def test_prepare18(self): uri = "table:prepare18" ds = SimpleDataSet(self, uri, 100, key_format='S', value_format='S') ds.populate() cursor = self.session.open_cursor(uri, None) self.session.begin_transaction() cursor[ds.key(10)] = ds.value(20) self.session.commit_transaction() self.session.begin_transaction() cursor[ds.key(10)] = ds.value(20) msg = '/a prepared transaction cannot include a logged table/' self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: self.session.prepare_transaction('prepare_timestamp=1'), msg)
def test_modify_delete(self): ds = SimpleDataSet(self, self.uri, 20, key_format=self.keyfmt, value_format='u') ds.populate() c = self.session.open_cursor(self.uri, None) c.set_key(ds.key(10)) self.assertEquals(c.remove(), 0) mods = [] mod = wiredtiger.Modify('ABCD', 3, 3) mods.append(mod) c.set_key(ds.key(10)) self.assertEqual(c.modify(mods), wiredtiger.WT_NOTFOUND)
def test_wtu_never(self): if wiredtiger.diagnostic_build(): self.skipTest('requires a non-diagnostic build') # Create an object that's never written, it's just used to generate valid k/v pairs. ds = SimpleDataSet( self, 'file:notused', 10, key_format=self.key_format, value_format=self.value_format) # Open the object, configuring write_timestamp usage. uri = 'table:ts' self.session.create(uri, 'key_format={},value_format={}'.format(self.key_format, self.value_format) + ',write_timestamp_usage=never') c = self.session.open_cursor(uri) self.session.begin_transaction() c[ds.key(7)] = ds.value(8) # Commit with a timestamp. if self.with_ts: # Check both an explicit timestamp set and a set at commit. commit_ts = 'commit_timestamp=' + self.timestamp_str(10) if not self.commit_ts: self.session.timestamp_transaction(commit_ts) commit_ts = '' msg = '/set when disallowed/' self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.commit_transaction(commit_ts), msg) # Commit without a timestamp. else: self.session.commit_transaction()
def test_timestamp_ts_then_nots(self): if wiredtiger.diagnostic_build(): self.skipTest('requires a non-diagnostic build') # Create an object that's never written, it's just used to generate valid k/v pairs. ds = SimpleDataSet( self, 'file:notused', 10, key_format=self.key_format, value_format=self.value_format) # Create the table with the key consistency checking turned on. That checking will verify # any individual key is always or never used with a timestamp. And if it is used with a # timestamp that the timestamps are in increasing order for that key. uri = 'table:ts' self.session.create(uri, 'key_format={},value_format={}'.format(self.key_format, self.value_format) + ',write_timestamp_usage=ordered') c = self.session.open_cursor(uri) key = ds.key(5) self.session.begin_transaction() c[key] = ds.value(11) self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(20)) self.session.begin_transaction() c[key] = ds.value(12) msg ='/configured to always use timestamps once they are first used/' self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.commit_transaction(), msg)
def test_hs(self): # Create a file that contains active history (content newer than the oldest timestamp). table_uri = 'table:hs27' ds = SimpleDataSet( self, table_uri, 0, key_format='r', value_format='S', config='log=(enabled=false)') ds.populate() self.session.checkpoint() # Write the initial values, if requested. if self.doinit: self.initialize(ds.uri, ds) # Pin oldest and stable to timestamp 1. self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(1) + ',stable_timestamp=' + self.timestamp_str(1)) # Create a long running read transaction in a separate session. session_read = self.conn.open_session() session_read.begin_transaction('read_timestamp=' + self.timestamp_str(2)) # Check that the initial writes (at timestamp 1) are seen (at timestamp 2). self.check(session_read, ds.uri, ds, 2, make_own_txn=False) # Write more values at assorted timestamps. self.updateall(ds.uri, ds) # Check that the new updates are appropriately visible. self.checkall(self.session, ds.uri, ds) # Now forcibly evict, so that all the pages are RLE-encoded and then read back in. # There doesn't seem to be any way to just forcibly evict an entire table, so what # I'm going to do is assume that what we care about is evicting the updates (the # initial values are not so interesting) and they are on a maximum of two pages, # so we can evict the first and last key. If this evicts the same page twice, it # won't really hurt anything. (This also avoids having to worry about whether we # wrote initial values or not.) evict_cursor = self.session.open_cursor(ds.uri, None, "debug=(release_evict)") self.session.begin_transaction() firstkey = self.get_key(0, 0) lastkey = self.get_key(self.nkeys - 1, self.ntimes - 1) for k in [firstkey, lastkey]: # Search the key to evict it. v = evict_cursor[ds.key(k)] self.assertEqual(v, self.value_2) self.assertEqual(evict_cursor.reset(), 0) self.session.rollback_transaction() # Check that the long-running read transaction still reads the correct data. self.check(session_read, ds.uri, ds, 2, make_own_txn=False) # Check that our main session reads the correct data. self.checkall(self.session, ds.uri, ds) # Drop the long running read transaction. session_read.rollback_transaction() # Check that our main session can still read the latest data. self.check(self.session, ds.uri, ds, 100)
def test_prepare_hs(self): ds = SimpleDataSet(self, self.uri, self.nrows, key_format=self.key_format, value_format=self.value_format) ds.populate() if self.value_format == '8t': bigvalue = 97 else: bigvalue = b"aaaaa" * 100 # Initially load huge data cursor = self.session.open_cursor(self.uri) for i in range(1, 10000): cursor.set_key(ds.key(self.nrows + i)) cursor.set_value(bigvalue) self.assertEquals(cursor.insert(), 0) cursor.close() self.session.checkpoint() # We put prepared updates in multiple sessions so that we do not hang # because of cache being full with uncommitted updates. self.prepare_updates(ds)
def test_hs(self): # Create a small table. uri = "table:test_hs01" nrows = 100 ds = SimpleDataSet(self, uri, nrows, key_format=self.key_format, value_format='u') ds.populate() bigvalue = b"aaaaa" * 100 # Initially load huge data. cursor = self.session.open_cursor(uri) for i in range(1, 10000): cursor.set_key(ds.key(nrows + i)) cursor.set_value(bigvalue) self.assertEqual(cursor.insert(), 0) cursor.close() self.session.checkpoint() # Scenario: 1 # Check to see if the history store is working with the old reader. bigvalue2 = b"ccccc" * 100 session2 = self.conn.open_session() session2.begin_transaction('isolation=snapshot') self.large_updates(self.session, uri, bigvalue2, ds, nrows) # Check to see the value after recovery. self.durable_check(bigvalue2, uri, ds, nrows) session2.rollback_transaction() session2.close() # Scenario: 2 # Check to see the history store working with modify operations. bigvalue3 = b"ccccc" * 100 bigvalue3 = b'AA' + bigvalue3[2:] session2 = self.conn.open_session() session2.begin_transaction('isolation=snapshot') # Apply two modify operations - replacing the first two items with 'A'. self.session.begin_transaction() self.large_modifies(self.session, uri, 0, ds, nrows) self.large_modifies(self.session, uri, 1, ds, nrows) self.session.commit_transaction() # Check to see the value after recovery. self.durable_check(bigvalue3, uri, ds, nrows) session2.rollback_transaction() session2.close() # FIXME-WT-7120: Rollback to stable support for column store is not implemented, and it # fails only when it is used with timestamps. if self.key_format == 'r': return # Scenario: 3 # Check to see if the history store is working with the old timestamp. bigvalue4 = b"ddddd" * 100 self.conn.set_timestamp('stable_timestamp=' + timestamp_str(1)) self.large_updates(self.session, uri, bigvalue4, ds, nrows, timestamp=True) # Check to see data can be see only till the stable_timestamp self.durable_check(bigvalue3, uri, ds, nrows) self.conn.set_timestamp('stable_timestamp=' + timestamp_str(i + 1)) # Check that the latest data can be seen. self.durable_check(bigvalue4, uri, ds, nrows)
def test_eviction(self): cursors = [] datasets = [] for i in range(0, self.ntables): this_uri = 'table:%s-%05d' % (self.table_name, i) ds = SimpleDataSet(self, this_uri, self.nrows, config='allocation_size=1KB,leaf_page_max=1KB') ds.populate() datasets.append(ds) # Switch over to on-disk trees with multiple leaf pages self.reopen_conn() # Make sure we have a cursor for every table so it stays in cache. for i in range(0, self.ntables): this_uri = 'table:%s-%05d' % (self.table_name, i) cursors.append(self.session.open_cursor(this_uri, None)) # Make use of the cache. for i in range(0, self.nops): for i in range(0, self.ntables): cursors[i].set_key(ds.key(random.randint(0, self.nrows - 1))) cursors[i].search() cursors[i].reset()
def test_prepare_lookaside(self): if not wiredtiger.timestamp_build(): self.skipTest('requires a timestamp build') # Create a small table. uri = "table:test_prepare_lookaside01" nrows = 100 ds = SimpleDataSet(self, uri, nrows, key_format="S", value_format='u') ds.populate() bigvalue = "aaaaa" * 100 # Initially load huge data cursor = self.session.open_cursor(uri) for i in range(1, 10000): cursor.set_key(ds.key(nrows + i)) cursor.set_value(bigvalue) self.assertEquals(cursor.insert(), 0) cursor.close() self.session.checkpoint() # Check if lookaside is working properly with prepare transactions. # We put prepared updates in multiple sessions so that we do not hang # because of cache being full with uncommitted updates. # TODO: Increase the nsessions below to start testing lookaside eviction # of prepared updates. nsessions = 1 nkeys = 4000 self.prepare_updates(uri, ds, nrows, nsessions, nkeys)
def test_checkpoint_snapshot(self): ds = SimpleDataSet(self, self.uri, self.nrows, key_format="S", value_format='u') ds.populate() value = b"aaaaa" * 100 sessions = [0] * self.nsessions cursors = [0] * self.nsessions for j in range(0, self.nsessions): sessions[j] = self.conn.open_session() cursors[j] = sessions[j].open_cursor(self.uri) sessions[j].begin_transaction('isolation=snapshot') start = (j * self.nkeys) end = start + self.nkeys for i in range(start, end): cursors[j].set_key(ds.key(self.nrows + i)) cursors[j].set_value(value) self.assertEquals(cursors[j].insert(), 0) session_p2 = self.conn.open_session() session_p2.checkpoint() #Simulate a crash by copying to a new directory(RESTART). copy_wiredtiger_home(".", "RESTART") # Open the new directory. self.conn = self.setUpConnectionOpen("RESTART") self.session = self.setUpSessionOpen(self.conn)
def test_modify_txn_api(self): ds = SimpleDataSet(self, self.uri, 100, key_format=self.keyfmt, value_format=self.valuefmt) ds.populate() c = self.session.open_cursor(self.uri, None) c.set_key(ds.key(10)) msg = '/not supported/' self.session.begin_transaction("isolation=read-uncommitted") mods = [] mods.append(wiredtiger.Modify('-', 1, 1)) self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: c.modify(mods), msg) self.session.rollback_transaction() self.session.begin_transaction("isolation=read-committed") mods = [] mods.append(wiredtiger.Modify('-', 1, 1)) self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: c.modify(mods), msg) self.session.rollback_transaction()
def test_prepare_hs(self): # Create a small table. uri = "table:test_prepare_hs01" nrows = 100 ds = SimpleDataSet( self, uri, nrows, key_format=self.key_format, value_format=self.value_format) ds.populate() if self.value_format == '8t': bigvalue = 97 else: bigvalue = b"aaaaa" * 100 # Initially load huge data cursor = self.session.open_cursor(uri) for i in range(1, 10000): cursor.set_key(ds.key(nrows + i)) cursor.set_value(bigvalue) self.assertEquals(cursor.insert(), 0) cursor.close() self.session.checkpoint() # Check if the history store is working properly with prepare transactions. # We put prepared updates in multiple sessions so that we do not hang # because of cache being full with uncommitted updates. nsessions = 3 nkeys = 4000 self.prepare_updates(uri, ds, nrows, nsessions, nkeys)
def test_prepare_lookaside(self): if not wiredtiger.timestamp_build(): self.skipTest('requires a timestamp build') # Create a small table. uri = "table:test_prepare_lookaside01" nrows = 100 ds = SimpleDataSet(self, uri, nrows, key_format="S", value_format='u') ds.populate() bigvalue = "aaaaa" * 100 # Initially load huge data cursor = self.session.open_cursor(uri) for i in range(1, 10000): cursor.set_key(ds.key(nrows + i)) cursor.set_value(bigvalue) self.assertEquals(cursor.insert(), 0) cursor.close() self.session.checkpoint() # Check if lookaside is working properly with prepare transactions. # We put prepared updates in multiple sessions so that we do not hang # because of cache being full with uncommitted updates. nsessions = 3 nkeys = 4000 self.prepare_updates(uri, ds, nrows, nsessions, nkeys)
def test_older_prepare_updates(self): # Create a small table. uri = "table:test" nrows = 100 ds = SimpleDataSet(self, uri, nrows, key_format=self.key_format, value_format=self.value_format) ds.populate() if self.value_format == '8t': value_a = 97 value_b = 98 else: value_a = b"aaaaa" * 100 value_b = b"bbbbb" * 100 # Initially load huge data cursor = self.session.open_cursor(uri) for i in range(1, 10000): cursor.set_key(ds.key(nrows + i)) cursor.set_value(value_a) self.assertEquals(cursor.insert(), 0) cursor.close() self.session.checkpoint() # Check if txn_visible_all is working properly, when an active oldest # transaction is a prepared transaction and the oldest timestamp # advances beyond the prepared timestamp. self.older_prepare_updates(uri, ds, nrows, value_a, value_b)
def test_checkpoint(self): uri = 'table:checkpoint20' nrows = 10000 # Create a table. ds = SimpleDataSet(self, uri, 0, key_format=self.key_format, value_format=self.value_format, config=self.extraconfig) ds.populate() if self.value_format == '8t': value_a = 97 value_b = 98 value_c = 99 else: value_a = "aaaaa" * 100 value_b = "bbbbb" * 100 value_c = "ccccc" * 100 # Pin oldest and stable timestamps to 5. self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(5) + ',stable_timestamp=' + self.timestamp_str(5)) # Write some data at time 10. self.large_updates(uri, ds, nrows, value_a, 10) # Prepare some more data at time 20. session2 = self.conn.open_session() cursor2 = session2.open_cursor(uri) session2.begin_transaction() for i in range(nrows // 2 + 1, nrows): cursor2[ds.key(i)] = value_b session2.prepare_transaction('prepare_timestamp=' + self.timestamp_str(20)) # Evict the lot. Otherwise the checkpoint won't write the prepared data. # Read at 10 to do the eviction to avoid tripping on the prepared transaction. self.evict(ds, 1, nrows + 1, value_a, 10) # Checkpoint. self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(self.stable_ts)) self.do_checkpoint(self.first_checkpoint) # Commit the prepared transaction so it isn't in the way. session2.timestamp_transaction('commit_timestamp=' + self.timestamp_str(20)) session2.commit_transaction('durable_timestamp=' + self.timestamp_str(30)) # Read the checkpoint. # We decided that checkpoint cursors should always use ignore_prepare, so we # should always see value_a. self.check(ds, self.first_checkpoint, nrows, value_a, 10) self.check(ds, self.first_checkpoint, nrows, value_a, 20) self.check(ds, self.first_checkpoint, nrows, value_a, None)
def test_checkpoint_snapshot(self): ds = SimpleDataSet(self, self.uri, 0, key_format="S", value_format="S",config='log=(enabled=false)') ds.populate() valuea = "aaaaa" * 100 valueb = "bbbbb" * 100 valuec = "ccccc" * 100 valued = "ddddd" * 100 cursor = self.session.open_cursor(self.uri) self.large_updates(self.uri, valuea, ds, self.nrows) self.check(valuea, self.uri, self.nrows) session1 = self.conn.open_session() session1.begin_transaction() cursor1 = session1.open_cursor(self.uri) for i in range(self.nrows, self.nrows*2): cursor1.set_key(ds.key(i)) cursor1.set_value(valuea) self.assertEqual(cursor1.insert(), 0) # Create a checkpoint thread done = threading.Event() ckpt = checkpoint_thread(self.conn, done) try: ckpt.start() # Sleep for sometime so that checkpoint starts before committing last transaction. time.sleep(2) session1.commit_transaction() finally: done.set() ckpt.join() #Simulate a crash by copying to a new directory(RESTART). copy_wiredtiger_home(self, ".", "RESTART") # Open the new directory. self.conn = self.setUpConnectionOpen("RESTART") self.session = self.setUpSessionOpen(self.conn) # Check the table contains the last checkpointed value. self.check(valuea, self.uri, self.nrows) stat_cursor = self.session.open_cursor('statistics:', None, None) inconsistent_ckpt = stat_cursor[stat.conn.txn_rts_inconsistent_ckpt][2] keys_removed = stat_cursor[stat.conn.txn_rts_keys_removed][2] keys_restored = stat_cursor[stat.conn.txn_rts_keys_restored][2] pages_visited = stat_cursor[stat.conn.txn_rts_pages_visited][2] upd_aborted = stat_cursor[stat.conn.txn_rts_upd_aborted][2] stat_cursor.close() self.assertGreater(inconsistent_ckpt, 0) self.assertEqual(upd_aborted, 0) self.assertGreaterEqual(keys_removed, 0) self.assertEqual(keys_restored, 0) self.assertGreaterEqual(pages_visited, 0)
def test_bug(self): uri = "table:bug026" nrows = 1000 ntxns = 500 ds = SimpleDataSet(self, uri, 0, key_format=self.key_format, value_format=self.value_format, config='log=(enabled=false)') ds.populate() value_a = "aaaaa" * 100 value_b = "bbbbb" * 100 value_c = "ccccc" * 100 # Write some data. cursor = self.session.open_cursor(uri) self.session.begin_transaction() for i in range(1, nrows + 1): cursor[ds.key(i)] = value_a self.session.commit_transaction() self.session.checkpoint() # Create a bunch of transactions and leave all but one hanging. sessions = {} cursors = {} for i in range(1, ntxns + 1): sessions[i] = self.conn.open_session() cursors[i] = sessions[i].open_cursor(uri) sessions[i].begin_transaction() cursors[i][ds.key(i)] = value_b self.session.begin_transaction() cursor[ds.key(nrows)] = value_c self.session.commit_transaction() self.session.checkpoint() # Should not see value_b. self.check(ds, nrows, value_a, value_c) # Now crash. simulate_crash_restart(self, ".", "RESTART") # Should still not see value_b. self.check(ds, nrows, value_a, value_c)
def test_checkpoint(self): uri = 'table:checkpoint12' nrows = 1000 # Create a table. ds = SimpleDataSet( self, uri, 0, key_format=self.key_format, value_format=self.value_format) ds.populate() if self.value_format == '8t': value_a = 97 value_b = 98 value_c = 99 else: value_a = "aaaaa" * 100 value_b = "bbbbb" * 100 value_c = "ccccc" * 100 # Pin oldest and stable timestamps to 5. self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(5) + ',stable_timestamp=' + self.timestamp_str(5)) # Write some data at time 10. self.large_updates(uri, ds, nrows, value_a, 10) # Make a checkpoint. self.session.checkpoint() # Write some more data at time 20. self.large_updates(uri, ds, nrows, value_a, 20) # Open the checkpoint. ckpt_cursor = self.session.open_cursor(uri, None, 'checkpoint=WiredTigerCheckpoint') ckpt_cursor.set_key(ds.key(1)) # Write some further data, and prepare it at time 30. cursor = self.session.open_cursor(uri) self.session.begin_transaction() for i in range(1, nrows // 2): cursor[ds.key(i)] = value_b self.session.prepare_transaction('prepare_timestamp=' + self.timestamp_str(30)) # Now try reading the checkpoint. msg = '/Invalid argument/' self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.operate(ckpt_cursor), msg)
def test_search_eot(self): # Populate the tree and reopen the connection, forcing it to disk # and moving the records to an on-page format. ds = SimpleDataSet(self, self.uri, 100, key_format=self.key_format, value_format=self.value_format) ds.populate() self.reopen_conn() # Open a cursor. cursor = self.session.open_cursor(self.uri, None) # Search for a record at the end of the table, which should succeed. cursor.set_key(ds.key(100)) self.assertEqual(cursor.search(), 0) self.assertEqual(cursor.get_key(), ds.key(100)) self.assertEqual(cursor.get_value(), ds.value(100)) # Search-near for a record at the end of the table, which should # succeed, returning the last record. cursor.set_key(ds.key(100)) self.assertEqual(cursor.search_near(), 0) self.assertEqual(cursor.get_key(), ds.key(100)) self.assertEqual(cursor.get_value(), ds.value(100)) # Search for a record past the end of the table, which should fail. cursor.set_key(ds.key(200)) self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND) # Search-near for a record past the end of the table, which should # succeed, returning the last record. cursor.set_key(ds.key(200)) self.assertEqual(cursor.search_near(), -1) self.assertEqual(cursor.get_key(), ds.key(100)) self.assertEqual(cursor.get_value(), ds.value(100))
def test_modify_abort(self): ds = SimpleDataSet(self, self.uri, 20, key_format=self.keyfmt, value_format=self.valuefmt) ds.populate() # Start a transaction. self.session.begin_transaction() # Insert a new record. c = self.session.open_cursor(self.uri, None) c.set_key(ds.key(30)) c.set_value(ds.value(30)) self.assertEquals(c.insert(), 0) # Test that we can successfully modify our own record. mods = [] mod = wiredtiger.Modify('ABCD', 3, 3) mods.append(mod) c.set_key(ds.key(30)) self.assertEqual(c.modify(mods), 0) # Test that another transaction cannot modify our uncommitted record. xs = self.conn.open_session() xc = xs.open_cursor(self.uri, None) xs.begin_transaction() xc.set_key(ds.key(30)) xc.set_value(ds.value(30)) mods = [] mod = wiredtiger.Modify('ABCD', 3, 3) mods.append(mod) xc.set_key(ds.key(30)) self.assertEqual(xc.modify(mods), wiredtiger.WT_NOTFOUND) xs.rollback_transaction() # Rollback our transaction. self.session.rollback_transaction() # Test that we can't modify our aborted insert. self.session.begin_transaction() mods = [] mod = wiredtiger.Modify('ABCD', 3, 3) mods.append(mod) c.set_key(ds.key(30)) self.assertEqual(c.modify(mods), wiredtiger.WT_NOTFOUND) self.session.rollback_transaction()
def address_deleted(self): # Create the object, force it to disk, and verify the object. ds = SimpleDataSet(self, self.uri, self.nentries, config=self.config) ds.populate() self.reopen_conn() self.session.verify(self.uri) # Create a new session and start a transaction to force the upcoming # checkpoint operation to write address-deleted cells to disk. tmp_session = self.conn.open_session(None) tmp_session.begin_transaction("isolation=snapshot") # Truncate a big range of rows; the leaf pages aren't in memory, so # leaf page references will be deleted without being read. start = self.session.open_cursor(self.uri, None) start.set_key(ds.key(10)) end = self.session.open_cursor(self.uri, None) end.set_key(ds.key(self.nentries - 10)) self.session.truncate(None, start, end, None) self.assertEqual(start.close(), 0) self.assertEqual(end.close(), 0) # Checkpoint, forcing address-deleted cells to be written. self.session.checkpoint() # Crash/reopen the connection and verify the object. self.reopen_conn() self.session.verify(self.uri) # Open a cursor and update a record (to dirty the tree, else we won't # mark pages with address-deleted cells dirty), then walk the tree so # we get a good look at all the internal pages and the address-deleted # cells. cursor = self.session.open_cursor(self.uri, None) cursor.set_key(ds.key(5)) cursor.set_value("changed value") self.assertEqual(cursor.update(), 0) cursor.reset() for key,val in cursor: continue self.assertEqual(cursor.close(), 0) # Checkpoint, freeing the pages. self.session.checkpoint() return ds
def test_modify_many(self): ds = SimpleDataSet(self, self.uri, 20, key_format=self.keyfmt, value_format='u') ds.populate() c = self.session.open_cursor(self.uri, None) c.set_key(ds.key(10)) orig = 'abcdefghijklmnopqrstuvwxyz' c.set_value(orig) self.assertEquals(c.update(), 0) for i in range(0, 50000): new = "".join([random.choice(string.digits) for i in xrange(5)]) orig = orig[:10] + new + orig[15:] mods = [] mod = wiredtiger.Modify(new, 10, 5) mods.append(mod) self.assertEquals(c.modify(mods), 0) c.set_key(ds.key(10)) self.assertEquals(c.search(), 0) self.assertEquals(c.get_value(), orig)
def test_insert_over_delete(self): msg = '/WT_CACHE_FULL.*/' ds = SimpleDataSet(self, self.uri, 10000000, key_format=self.keyfmt, value_format=self.valuefmt, config=self.table_config) self.assertRaisesHavingMessage(wiredtiger.WiredTigerError, ds.populate, msg) # Now that the database contains as much data as will fit into # the configured cache, verify removes succeed. cursor = self.session.open_cursor(self.uri, None) for i in range(1, 100): cursor.set_key(ds.key(i)) cursor.remove()
def test_bug014(self): # Populate a table with 1000 keys on small pages. uri = 'table:test_bug014' ds = SimpleDataSet(self, uri, 1000, config='allocation_size=512,leaf_page_max=512') ds.populate() # Reopen it so we can fast-delete pages. self.reopen_conn() # Truncate a chunk of the key/value pairs inside a transaction. self.session.begin_transaction(None) start = self.session.open_cursor(uri, None) start.set_key(ds.key(250)) end = self.session.open_cursor(uri, None) end.set_key(ds.key(500)) self.session.truncate(None, start, end, None) start.close() end.close() # With the truncation uncommitted, checkpoint the database. ckpt_session = self.conn.open_session() ckpt_session.checkpoint(None) ckpt_session.close() # Simulate a crash by copying to a new directory. copy_wiredtiger_home(".", "RESTART") # Open the new directory. conn = self.setUpConnectionOpen("RESTART") session = self.setUpSessionOpen(conn) cursor = session.open_cursor(uri) # Confirm all of the records are there. for i in range(1, 1001): cursor.set_key(ds.key(i)) self.assertEqual(cursor.search(), 0) conn.close()
def test_checkpoint_last(self): # Create an object, change one record to an easily recognizable string, # then checkpoint it and open a cursor, confirming we see the correct # value. Repeat this action, we want to be sure the engine gets the # latest checkpoint information each time. uri = self.uri ds = SimpleDataSet(self, uri, 100, key_format=self.fmt) ds.populate() for value in ('FIRST', 'SECOND', 'THIRD', 'FOURTH', 'FIFTH'): # Update the object. cursor = self.session.open_cursor(uri, None, "overwrite") cursor[ds.key(10)] = value cursor.close() # Checkpoint the object. self.session.checkpoint() # Verify the "last" checkpoint sees the correct value. cursor = self.session.open_cursor( uri, None, "checkpoint=WiredTigerCheckpoint") self.assertEquals(cursor[ds.key(10)], value)
def test_las(self): if not wiredtiger.timestamp_build(): self.skipTest('requires a timestamp build') nrows = 10000 # Create a table without logging to ensure we get "skew_newest" lookaside eviction behavior. uri = "table:las02_main" ds = SimpleDataSet( self, uri, 0, key_format="S", value_format="S", config='log=(enabled=false)') ds.populate() uri2 = "table:las02_extra" ds2 = SimpleDataSet(self, uri2, 0, key_format="S", value_format="S") ds2.populate() # Pin oldest and stable to timestamp 1. self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(1) + ',stable_timestamp=' + timestamp_str(1)) bigvalue = "aaaaa" * 100 self.large_updates(uri, bigvalue, ds, nrows / 3, 1) # Check that all updates are seen self.check(bigvalue, uri, nrows / 3, 1) # Check to see lookaside working with old timestamp bigvalue2 = "ddddd" * 100 self.large_updates(uri, bigvalue2, ds, nrows, 100) # Check that the new updates are only seen after the update timestamp self.check(bigvalue, uri, nrows / 3, 1) self.check(bigvalue2, uri, nrows, 100) # Force out most of the pages by updating a different tree self.large_updates(uri2, bigvalue, ds2, nrows, 100) # Now truncate half of the records self.session.begin_transaction() end = self.session.open_cursor(uri) end.set_key(ds.key(nrows / 2)) self.session.truncate(None, None, end) end.close() self.session.commit_transaction('commit_timestamp=' + timestamp_str(200)) # Check that the truncate is visible after commit self.check(bigvalue2, uri, nrows / 2, 200) # Repeat earlier checks self.check(bigvalue, uri, nrows / 3, 1) self.check(bigvalue2, uri, nrows, 100)
def test_column_store_gap_traverse(self): uri = 'table:gap' # Initially just create tables. ds = SimpleDataSet(self, uri, 0, key_format='r') ds.populate() cursor = self.session.open_cursor(uri, None, None) self.nentries = 0 # Create a column store with key gaps. The particular values aren't # important, we just want some gaps. v = [ 1000, 1001, 2000, 2001] for i in v: cursor[ds.key(i)] = ds.value(i) self.nentries += 1 # In-memory cursor forward, backward. self.forward(cursor, v) self.backward(cursor, list(reversed(v))) self.reopen_conn() cursor = self.session.open_cursor(uri, None, None) # Disk page cursor forward, backward. self.forward(cursor, v) self.backward(cursor, list(reversed(v))) # Insert some new records, so there are in-memory updates and an # on disk image. Put them in the middle of the existing values # so the traversal walks to them. v2 = [ 1500, 1501 ] for i in v2: cursor[ds.key(i)] = ds.value(i) self.nentries += 1 # Tell the validation what to expect. v = [ 1000, 1001, 1500, 1501, 2000, 2001 ] self.forward(cursor, v) self.backward(cursor, list(reversed(v)))
def test_truncate_cursor_notset(self): uri = self.type + self.name msg = "/requires key be set/" ds = SimpleDataSet(self, uri, 100) ds.populate() c1 = self.session.open_cursor(uri, None) c2 = self.session.open_cursor(uri, None) c2.set_key(ds.key(10)) self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.truncate(None, c1, c2, None), msg) self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: self.session.truncate(None, c2, c1, None), msg) c1.close() c2.close()
def test_las(self): # Create a small table. uri = "table:test_las" nrows = 100 ds = SimpleDataSet(self, uri, nrows, key_format="S") ds.populate() bigvalue = "aaaaa" * 100 # Initially load huge data cursor = self.session.open_cursor(uri) for i in range(1, 10000): cursor.set_key(ds.key(nrows + i)) cursor.set_value(bigvalue) self.assertEquals(cursor.insert(), 0) cursor.close() self.session.checkpoint() # Scenario: 1 # Check to see LAS working with old snapshot bigvalue1 = "bbbbb" * 100 self.session.snapshot("name=xxx") # Update the values in different session after snapshot self.large_updates(self.session, uri, bigvalue1, ds, nrows) # Check to see the value after recovery self.durable_check(bigvalue1, uri, ds, nrows) self.session.snapshot("drop=(all)") # Scenario: 2 # Check to see LAS working with old reader bigvalue2 = "ccccc" * 100 session2 = self.conn.open_session() session2.begin_transaction('isolation=snapshot') self.large_updates(self.session, uri, bigvalue2, ds, nrows) # Check to see the value after recovery self.durable_check(bigvalue2, uri, ds, nrows) session2.rollback_transaction() session2.close() # Scenario: 3 # Check to see LAS working with old timestamp bigvalue3 = "ddddd" * 100 self.conn.set_timestamp('stable_timestamp=' + timestamp_str(1)) self.large_updates(self.session, uri, bigvalue3, ds, nrows, timestamp=True) # Check to see data can be see only till the stable_timestamp self.durable_check(bigvalue2, uri, ds, nrows) self.conn.set_timestamp('stable_timestamp=' + timestamp_str(i + 1)) # Check to see latest data can be seen self.durable_check(bigvalue3, uri, ds, nrows)
def test_reconfig_fail(self): uri = 'table:reconfig_fail' ds = SimpleDataSet(self, uri, 100, key_format='S') ds.populate() self.session.begin_transaction("isolation=snapshot") c = self.session.open_cursor(uri, None) c.set_key(ds.key(20)) c.set_value("abcde") self.assertEquals(c.update(), 0) compat_str = 'compatibility=(release="3.0.0")' msg = '/system must be quiescent/' self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda:self.conn.reconfigure(compat_str), msg)
def test_checkpoint_cursor_update(self): ds = SimpleDataSet(self, self.uri, 100, key_format=self.fmt) ds.populate() self.session.checkpoint("name=ckpt") cursor = self.session.open_cursor(self.uri, None, "checkpoint=ckpt") cursor.set_key(ds.key(10)) cursor.set_value("XXX") msg = "/Unsupported cursor/" self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: cursor.insert(), msg) self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: cursor.remove(), msg) self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: cursor.update(), msg) cursor.close()
def test_hazard(self): uri = "table:hazard" ds = SimpleDataSet(self, uri, 1000) ds.populate() # Open 10,000 cursors and pin a page to set a hazard pointer. cursors = [] for i in range(0, 10000): c = self.session.open_cursor(uri, None) c.set_key(ds.key(10)) c.search() cursors.append(c) # Close the cursors, clearing the hazard pointer. for c in cursors: c.close()
def test_las(self): # Create a small table. uri = "table:test_las" nrows = 100 ds = SimpleDataSet(self, uri, nrows, key_format="S") ds.populate() # Take a snapshot. self.session.snapshot("name=xxx") # Insert a large number of records, we'll hang if the lookaside table # isn't doing its thing. c = self.session.open_cursor(uri) bigvalue = "abcde" * 100 for i in range(1, 1000000): c.set_key(ds.key(nrows + i)) c.set_value(bigvalue) self.assertEquals(c.insert(), 0)
def test_checkpoint_las_reads(self): if not wiredtiger.timestamp_build(): self.skipTest('requires a timestamp build') # Create a small table. uri = "table:test_las03" nrows = 100 ds = SimpleDataSet(self, uri, nrows, key_format="S", value_format='u') ds.populate() bigvalue = "aaaaa" * 100 # Initially load huge data cursor = self.session.open_cursor(uri) for i in range(1, 10000): cursor[ds.key(nrows + i)] = bigvalue cursor.close() self.session.checkpoint() # Check to see LAS working with old timestamp bigvalue2 = "ddddd" * 100 self.conn.set_timestamp('stable_timestamp=' + timestamp_str(1)) las_writes_start = self.get_stat(stat.conn.cache_write_lookaside) self.large_updates(self.session, uri, bigvalue2, ds, nrows, 10000) # If the test sizing is correct, the history will overflow the cache self.session.checkpoint() las_writes = self.get_stat(stat.conn.cache_write_lookaside) - las_writes_start self.assertGreaterEqual(las_writes, 0) for ts in range(2, 4): self.conn.set_timestamp('stable_timestamp=' + timestamp_str(ts)) # Now just update one record and checkpoint again self.large_updates(self.session, uri, bigvalue2, ds, nrows, 1) las_reads_start = self.get_stat(stat.conn.cache_read_lookaside) self.session.checkpoint() las_reads = self.get_stat(stat.conn.cache_read_lookaside) - las_reads_start # Since we're dealing with eviction concurrent with checkpoints # and skewing is controlled by a heuristic, we can't put too tight # a bound on this. self.assertLessEqual(las_reads, 100)
def test_reconfig_fail(self): uri = 'table:reconfig_fail' ds = SimpleDataSet(self, uri, 100, key_format='S') ds.populate() # Reconfigure to an older version. compat_str = 'compatibility=(release="2.6")' self.conn.reconfigure(compat_str) self.session.begin_transaction("isolation=snapshot") c = self.session.open_cursor(uri, None) c.set_key(ds.key(20)) c.set_value("abcde") self.assertEquals(c.update(), 0) # Make sure we can reconfigure unrelated things while downgraded # and we have an active transaction. self.conn.reconfigure("cache_size=100M") compat_str = 'compatibility=(release="3.0.0")' msg = '/system must be quiescent/' self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda:self.conn.reconfigure(compat_str), msg)
def test_search_duplicate(self): if self.colvar == 0: return # Populate the tree. ds = SimpleDataSet(self, self.uri, 105, key_format=self.key_format, value_format=self.value_format) ds.populate() # Set up deleted records before and after a set of duplicate records, # and make sure search/search-near returns the correct record. cursor = self.session.open_cursor(self.uri, None) for i in range(20, 100): cursor[ds.key(i)] = '=== IDENTICAL VALUE ===' for i in range(15, 25): cursor.set_key(ds.key(i)) self.assertEqual(cursor.remove(), 0) for i in range(95, 106): cursor.set_key(ds.key(i)) self.assertEqual(cursor.remove(), 0) cursor.close() # Reopen the connection, forcing it to disk and moving the records to # an on-page format. self.reopen_conn() # Open a cursor. cursor = self.session.open_cursor(self.uri, None) # Search-near for a record in the deleted set before the duplicate set, # which should succeed, returning the first record in the duplicate set. cursor.set_key(ds.key(18)) self.assertEqual(cursor.search_near(), 1) self.assertEqual(cursor.get_key(), ds.key(25)) # Search-near for a record in the deleted set after the duplicate set, # which should succeed, returning the last record in the duplicate set. cursor.set_key(ds.key(98)) self.assertEqual(cursor.search_near(), -1) self.assertEqual(cursor.get_key(), ds.key(94))
def test_column_store_gap(self): uri = 'table:gap' # Initially just create tables. ds = SimpleDataSet(self, uri, 0, key_format='r') ds.populate() cursor = self.session.open_cursor(uri, None, None) self.nentries = 0 # Create a column-store table with large gaps in the name-space. v = [ 1000, 2000000000000, 30000000000000 ] for i in v: cursor[ds.key(i)] = ds.value(i) self.nentries += 1 # In-memory cursor forward, backward. self.forward(cursor, v) self.backward(cursor, list(reversed(v))) self.reopen_conn() cursor = self.session.open_cursor(uri, None, None) # Disk page cursor forward, backward. self.forward(cursor, v) self.backward(cursor, list(reversed(v)))
def test_truncate_simple(self): uri = self.type + self.name # layout: # the number of initial skipped records # the number of initial inserted records # the number of trailing skipped records # the number of trailing inserted records layout = [ # simple set of rows (0, 0, 0, 0), # trailing append list, no delete point overlap (0, 0, 0, self.skip - 3), # trailing append list, delete point overlap (0, 0, 0, self.skip + 3), # trailing skipped list, no delete point overlap (0, 0, self.skip - 3, 1), # trailing skipped list, delete point overlap (0, 0, self.skip + 3, 1), # leading insert list, no delete point overlap (0, self.skip - 3, 0, 0), # leading insert list, delete point overlap (0, self.skip + 3, 0, 0), # leading skipped list, no delete point overlap (self.skip - 3, 1, 0, 0), # leading skipped list, delete point overlap (self.skip + 3, 1, 0, 0), ] # list: truncation patterns applied on top of the layout. # # begin and end: -1 means pass None for the cursor arg to truncate. An # integer N, with 1 <= N < self.nentries, truncates from/to a cursor # positioned at that row. list = [ (-1, self.nentries), # begin to end, begin = None (1, -1), # begin to end, end = None (1, self.nentries), # begin to end (-1, self.nentries - self.skip), # begin to middle, begin = None (1, self.nentries - self.skip), # begin to middle (self.skip, -1), # middle to end, end = None (self.skip, self.nentries), # middle to end (self.skip, # middle to different middle self.nentries - self.skip), (1, 1), # begin to begin (self.nentries, self.nentries), # end to end (self.skip, self.skip) # middle to same middle ] # Using this data set to compare only, it doesn't create or populate. ds = SimpleDataSet(self, uri, 0, key_format=self.keyfmt, value_format=self.valuefmt, config=self.config) # Build the layout we're going to test total = self.nentries for begin_skipped,begin_insert,end_skipped,end_insert in layout: # skipped records require insert/append records if begin_skipped and not begin_insert or \ end_skipped and not end_insert: raise AssertionError('test error: skipped set without insert') for begin,end in list: ''' print '===== run:' print 'key:', self.keyfmt, 'begin:', begin, 'end:', end print 'total: ', total, \ 'begin_skipped:', begin_skipped, \ 'begin_insert:', begin_insert, \ 'end_skipped:', end_skipped, \ 'end_insert:', end_insert ''' # Build a dictionary of what the object should look like for # later comparison expected = {} # Create the object. self.session.create( uri, self.config + ',key_format=' + self.keyfmt + ',value_format=' + self.valuefmt) # Insert the records that aren't skipped or inserted. start = begin_skipped + begin_insert stop = self.nentries - (end_skipped + end_insert) cursor = self.session.open_cursor(uri, None) for i in range(start + 1, stop + 1): k = ds.key(i) v = ds.value(i) cursor[k] = v expected[k] = [v] cursor.close() # Optionally close and re-open the object to get a disk image # instead of a big insert list. if self.reopen: self.reopen_conn() # Optionally insert initial skipped records. cursor = self.session.open_cursor(uri, None, "overwrite") start = 0 for i in range(0, begin_skipped): start += 1 k = ds.key(start) expected[k] = [0] # Optionally insert leading records. for i in range(0, begin_insert): start += 1 k = ds.key(start) v = ds.value(start) cursor[k] = v expected[k] = [v] # Optionally insert trailing skipped records. for i in range(0, end_skipped): stop += 1 k = ds.key(stop) expected[k] = [0] # Optionally insert trailing records. for i in range(0, end_insert): stop += 1 k = ds.key(stop) v = ds.value(stop) cursor[k] = v expected[k] = [v] cursor.close() self.truncateRangeAndCheck(ds, uri, begin, end, expected) self.session.drop(uri, None)