def test_insert(self): ds = SimpleDataSet(self, self.uri, 1000, key_format=self.keyfmt, value_format=self.valuefmt, config=self.table_config) ds.populate() ds.check()
def test_insert_over_capacity(self): msg = '/WT_CACHE_FULL.*/' ds = SimpleDataSet(self, self.uri, 10000000, key_format=self.keyfmt, value_format=self.valuefmt, config=self.table_config) self.assertRaisesHavingMessage(wiredtiger.WiredTigerError, ds.populate, msg) # Figure out the last key we successfully inserted, and check all # previous inserts are still there. cursor = self.session.open_cursor(self.uri, None) cursor.prev() last_key = int(cursor.get_key()) ds = SimpleDataSet(self, self.uri, last_key, key_format=self.keyfmt, value_format=self.valuefmt, config=self.table_config) ds.check()
def test_wedge(self): # Try to really wedge the cache full ds = SimpleDataSet(self, self.uri, 0, key_format=self.keyfmt, value_format=self.valuefmt, config=self.table_config) ds.populate() cursor = self.session.open_cursor(self.uri, None) run = 0 start, last_key = -1000, 0 while last_key - start > 100: msg = '/WT_CACHE_FULL.*/' start = last_key self.assertRaisesHavingMessage( wiredtiger.WiredTigerError, lambda: self.fill(cursor, ds, start, 10000000), msg) cursor.reset() sleep(1) # Figure out the last key we successfully inserted, and check all # previous inserts are still there. cursor.prev() last_key = int(cursor.get_key()) run += 1 self.pr('Finished iteration ' + str(run) + ', last_key = ' + str(last_key)) self.pr('Checking ' + str(last_key) + ' keys') ds = SimpleDataSet(self, self.uri, last_key, key_format=self.keyfmt, value_format=self.valuefmt, config=self.table_config) # This test is *much* slower for fixed-length column stores: we fit # many more records into the cache, so don't do as many passes through # the data. checks = 10 if self.valuefmt.endswith('t') else 100 for run in range(checks): ds.check() self.pr('Finished check ' + str(run)) sleep(1)
def test_sharing(self): # FIXME: WT-8235 Enable the test once file containing transaction ids is supported. self.skipTest( 'Sharing the checkpoint file containing transaction ids is not supported' ) ds = SimpleDataSet(self, self.uri, 10) ds.populate() ds.check() self.session.checkpoint() ds.check() # Create a secondary database dir2 = os.path.join(self.home, 'SECONDARY') os.mkdir(dir2) conn2 = self.setUpConnectionOpen(dir2) session2 = conn2.open_session() # Reference the tree from the secondary: metac = self.session.open_cursor('metadata:') metac2 = session2.open_cursor('metadata:', None, 'readonly=0') uri2 = self.uri[:5] + '../' + self.uri[5:] metac2[uri2] = metac[self.uri] + ",readonly=1" cursor2 = session2.open_cursor(uri2) ds.check_cursor(cursor2) cursor2.close() newds = SimpleDataSet(self, self.uri, 10000) newds.populate() newds.check() self.session.checkpoint() newds.check() # Check we can still read from the last checkpoint cursor2 = session2.open_cursor(uri2) ds.check_cursor(cursor2) cursor2.close() # Bump to new checkpoint origmeta = metac[self.uri] checkpoint = re.search(r',checkpoint=\(.+?\)\)', origmeta).group(0)[1:] self.pr('Orig checkpoint: ' + checkpoint) session2.alter(uri2, checkpoint) self.pr('New metadata on secondaery: ' + metac2[uri2]) # Check that we can see the new data cursor2 = session2.open_cursor(uri2) newds.check_cursor(cursor2)
def test_sharing(self): args = 'block_allocation=log-structured' self.verbose( 3, 'Test log-structured allocation with config: ' + args + ' count: ' + str(self.nrecs)) ds = SimpleDataSet(self, self.uri, 10, config=args) ds.populate() ds.check() self.session.checkpoint() ds.check() # Create a secondary database dir2 = os.path.join(self.home, 'SECONDARY') os.mkdir(dir2) conn2 = self.setUpConnectionOpen(dir2) session2 = conn2.open_session() # Reference the tree from the secondary: metac = self.session.open_cursor('metadata:') metac2 = session2.open_cursor('metadata:', None, 'readonly=0') uri2 = self.uri[:5] + '../' + self.uri[5:] metac2[uri2] = metac[self.uri] + ",readonly=1" cursor2 = session2.open_cursor(uri2) ds.check_cursor(cursor2) cursor2.close() newds = SimpleDataSet(self, self.uri, 10000, config=args) newds.populate() newds.check() self.session.checkpoint() newds.check() # Check we can still read from the last checkpoint cursor2 = session2.open_cursor(uri2) ds.check_cursor(cursor2) cursor2.close() # Bump to new checkpoint origmeta = metac[self.uri] checkpoint = re.search(r',checkpoint=\(.+?\)\)', origmeta).group(0)[1:] self.pr('Orig checkpoint: ' + checkpoint) session2.alter(uri2, checkpoint) self.pr('New metadata on secondaery: ' + metac2[uri2]) # Check that we can see the new data cursor2 = session2.open_cursor(uri2) newds.check_cursor(cursor2)
def test_wedge(self): # Try to really wedge the cache full ds = SimpleDataSet(self, self.uri, 0, key_format=self.keyfmt, value_format=self.valuefmt, config=self.table_config) ds.populate() cursor = self.session.open_cursor(self.uri, None) run = 0 start, last_key = -1000, 0 while last_key - start > 100: msg = '/WT_CACHE_FULL.*/' start = last_key self.assertRaisesHavingMessage(wiredtiger.WiredTigerError, lambda: self.fill(cursor, ds, start, 10000000), msg) cursor.reset() sleep(1) # Figure out the last key we successfully inserted, and check all # previous inserts are still there. cursor.prev() last_key = int(cursor.get_key()) run += 1 self.pr('Finished iteration ' + str(run) + ', last_key = ' + str(last_key)) self.pr('Checking ' + str(last_key) + ' keys') ds = SimpleDataSet(self, self.uri, last_key, key_format=self.keyfmt, value_format=self.valuefmt, config=self.table_config) # This test is *much* slower for fixed-length column stores: we fit # many more records into the cache, so don't do as many passes through # the data. checks = 10 if self.valuefmt.endswith('t') else 100 for run in xrange(checks): ds.check() self.pr('Finished check ' + str(run)) sleep(1)
def test_tiered(self): self.flushed_objects = 0 args = 'key_format=S' intl_page = 'internal_page_max=16K' base_create = 'key_format=S,value_format=S,' + intl_page self.pr("create sys") #self.session.create(self.uri + 'xxx', base_create) self.progress('Create simple data set (10)') ds = SimpleDataSet(self, self.uri, 10, config=args) self.progress('populate') ds.populate() ds.check() self.progress('checkpoint') self.session.checkpoint() self.progress('flush_tier') self.session.flush_tier(None) self.confirm_flush() ds.check() self.close_conn() self.progress('reopen_conn') self.reopen_conn() # Check what was there before ds = SimpleDataSet(self, self.uri, 10, config=args) ds.check() self.progress('Create simple data set (50)') ds = SimpleDataSet(self, self.uri, 50, config=args) self.progress('populate') ds.populate() ds.check() self.progress('open extra cursor on ' + self.uri) cursor = self.session.open_cursor(self.uri, None, None) self.progress('checkpoint') self.session.checkpoint() self.progress('flush_tier') self.session.flush_tier(None) self.progress('flush_tier complete') self.confirm_flush() self.progress('Create simple data set (100)') ds = SimpleDataSet(self, self.uri, 100, config=args) self.progress('populate') ds.populate() ds.check() self.progress('checkpoint') self.session.checkpoint() self.progress('flush_tier') self.session.flush_tier(None) self.confirm_flush() self.progress('Create simple data set (200)') ds = SimpleDataSet(self, self.uri, 200, config=args) self.progress('populate') ds.populate() ds.check() cursor.close() self.progress('close_conn') self.close_conn() self.progress('reopen_conn') self.reopen_conn() # Check what was there before ds = SimpleDataSet(self, self.uri, 200, config=args) ds.check() # Now add some more. self.progress('Create simple data set (300)') ds = SimpleDataSet(self, self.uri, 300, config=args) self.progress('populate') ds.populate() ds.check() # We haven't done a flush so there should be # nothing extra on the shared tier. self.confirm_flush(increase=False) self.progress('checkpoint') self.session.checkpoint() self.confirm_flush(increase=False) self.progress('END TEST')
def test_tiered(self): self.flushed_objects = 0 args = 'key_format=S,block_allocation=log-structured' self.verbose(3, 'Test log-structured allocation with config: ' + args) ds = SimpleDataSet(self, self.uri, 10, config=args) ds.populate() ds.check() self.session.checkpoint() # For some reason, every checkpoint does not cause a flush. # As we're about to move to a new model of flushing, we're not going to chase this error. #self.confirm_flush() ds = SimpleDataSet(self, self.uri, 50, config=args) ds.populate() ds.check() self.session.checkpoint() self.confirm_flush() ds = SimpleDataSet(self, self.uri, 100, config=args) ds.populate() ds.check() self.session.checkpoint() self.confirm_flush() ds = SimpleDataSet(self, self.uri, 200, config=args) ds.populate() ds.check() self.close_conn() self.confirm_flush() # closing the connection does a checkpoint self.reopen_conn() # Check what was there before ds = SimpleDataSet(self, self.uri, 200, config=args) ds.check() # Now add some more. ds = SimpleDataSet(self, self.uri, 300, config=args) ds.populate() ds.check() # We haven't done a checkpoint/flush so there should be # nothing extra on the shared tier. self.confirm_flush(increase=False)