class test_stat_cursor_dsrc_clear(wttest.WiredTigerTestCase): pfx = 'test_stat_cursor_dsrc_clear' uri = [('1', dict(uri='file:' + pfx, pop=simple_populate)), ('2', dict(uri='table:' + pfx, pop=simple_populate)), ('3', dict(uri='table:' + pfx, pop=complex_populate)), ('4', dict(uri='table:' + pfx, pop=complex_populate_lsm))] scenarios = number_scenarios(multiply_scenarios('.', uri)) conn_config = 'statistics=(all)' def test_stat_cursor_dsrc_clear(self): self.pop(self, self.uri, 'key_format=S', 100) # cursor_insert should clear # # We can't easily test data-source items that shouldn't clear: as I # write this, session_cursor_open is the only such item, and it will # change to account for the statistics cursors we open here. cursor = self.session.open_cursor('statistics:' + self.uri, None, 'statistics=(all,clear)') self.assertGreater(cursor[stat.dsrc.cursor_insert][2], 0) cursor = self.session.open_cursor('statistics:' + self.uri, None, 'statistics=(all,clear)') self.assertEqual(cursor[stat.dsrc.cursor_insert][2], 0)
class test_stat_cursor_fast(wttest.WiredTigerTestCase): pfx = 'test_stat_cursor_fast' uri = [('1', dict(uri='file:' + pfx, pop=simple_populate)), ('2', dict(uri='table:' + pfx, pop=simple_populate)), ('3', dict(uri='table:' + pfx, pop=complex_populate)), ('4', dict(uri='table:' + pfx, pop=complex_populate_lsm))] scenarios = number_scenarios(multiply_scenarios('.', uri)) # Override WiredTigerTestCase, we have extensions. def setUpConnectionOpen(self, dir): conn = wiredtiger.wiredtiger_open( dir, 'create,statistics=(all),' + 'error_prefix="%s: "' % self.shortid()) return conn def test_stat_cursor_fast(self): self.pop(self, self.uri, 'key_format=S', 100) # A "fast" cursor shouldn't see the underlying btree statistics. # Check "fast" first, otherwise we get a copy of the statistics # we generated in the "all" call, they just aren't updated. cursor = self.session.open_cursor('statistics:' + self.uri, None, 'statistics=(fast)') self.assertEqual(cursor[stat.dsrc.btree_entries][2], 0) cursor = self.session.open_cursor('statistics:' + self.uri, None, 'statistics=(all)') self.assertGreater(cursor[stat.dsrc.btree_entries][2], 0)
class test_stat_cursor_dsrc_error(wttest.WiredTigerTestCase): pfx = 'test_stat_cursor_dsrc_error' uri = [('1', dict(uri='file:' + pfx, pop=simple_populate)), ('2', dict(uri='table:' + pfx, pop=simple_populate)), ('3', dict(uri='table:' + pfx, pop=complex_populate)), ('4', dict(uri='table:' + pfx, pop=complex_populate_lsm))] scenarios = number_scenarios(multiply_scenarios('.', uri)) # Override WiredTigerTestCase, we have extensions. def setUpConnectionOpen(self, dir): conn = wiredtiger.wiredtiger_open( dir, 'create,statistics=(all),' + 'error_prefix="%s: "' % self.shortid()) return conn def test_stat_cursor_dsrc_error(self): self.pop(self, self.uri, 'key_format=S', 100) args = ['all', 'fast'] for i in list(itertools.permutations(args, 2)): config = 'statistics=(' + i[0] + ',' + i[1] + ')' msg = '/only one statistics configuration value/' self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: self.session.open_cursor( 'statistics:' + self.uri, None, config), msg)
class test_bulk_load(wttest.WiredTigerTestCase): name = 'test_bulk' types = [('file', dict(type='file:')), ('table', dict(type='table:'))] keyfmt = [ ('integer', dict(keyfmt='i')), ('recno', dict(keyfmt='r')), ('string', dict(keyfmt='S')), ] valfmt = [ ('fixed', dict(valfmt='8t')), ('integer', dict(valfmt='i')), ('string', dict(valfmt='S')), ] scenarios = number_scenarios(multiply_scenarios('.', types, keyfmt, valfmt)) # Test a simple bulk-load def test_bulk_load(self): uri = self.type + self.name self.session.create( uri, 'key_format=' + self.keyfmt + ',value_format=' + self.valfmt) cursor = self.session.open_cursor(uri, None, "bulk") for i in range(1, 100): cursor.set_key(key_populate(cursor, i)) cursor.set_value(value_populate(cursor, i)) cursor.insert() cursor.close()
class test_stat_cursor_dsrc_clear(wttest.WiredTigerTestCase): pfx = 'test_stat_cursor_dsrc_clear' uri = [('1', dict(uri='file:' + pfx, pop=simple_populate)), ('2', dict(uri='table:' + pfx, pop=simple_populate)), ('3', dict(uri='table:' + pfx, pop=complex_populate)), ('4', dict(uri='table:' + pfx, pop=complex_populate_lsm))] scenarios = number_scenarios(multiply_scenarios('.', uri)) # Override WiredTigerTestCase, we have extensions. def setUpConnectionOpen(self, dir): conn = wiredtiger.wiredtiger_open( dir, 'create,statistics=(all),' + 'error_prefix="%s: "' % self.shortid()) return conn def test_stat_cursor_dsrc_clear(self): self.pop(self, self.uri, 'key_format=S', 100) # cursor_insert should clear # # We can't easily test data-source items that shouldn't clear: as I # write this, session_cursor_open is the only such item, and it will # change to account for the statistics cursors we open here. cursor = self.session.open_cursor('statistics:' + self.uri, None, 'statistics=(all,clear)') self.assertGreater(cursor[stat.dsrc.cursor_insert][2], 0) cursor = self.session.open_cursor('statistics:' + self.uri, None, 'statistics=(all,clear)') self.assertEqual(cursor[stat.dsrc.cursor_insert][2], 0)
class test_huffman02(wttest.WiredTigerTestCase, suite_subprocess): """ Test basic operations """ table_name = 'table:test_huff' huffkey = [ ('none', dict(huffkey='huffman_key=none')), ('english', dict(huffkey='huffman_key=english')), ('bad', dict(huffkey='huffman_key=bad')), ] huffval = [ ('bad', dict(huffval=',huffman_value=bad')), ] scenarios = number_scenarios(multiply_scenarios('.', huffkey, huffval)) def test_huffman(self): gotException = False expectMessage = 'illegal Huffman' config = self.huffkey + self.huffval with self.expectedStderrPattern(expectMessage): try: self.pr('expect an error message...') self.session.create(self.table_name, config) except wiredtiger.WiredTigerError as e: gotException = True self.pr('got expected exception: ' + str(e)) self.assertTrue(str(e).find('nvalid argument') >= 0) self.assertTrue(gotException, 'expected exception')
class test_truncate_cursor_order(wttest.WiredTigerTestCase): name = 'test_truncate' types = [('file', dict(type='file:')), ('table', dict(type='table:'))] keyfmt = [ ('integer', dict(keyfmt='i')), ('recno', dict(keyfmt='r')), ('string', dict(keyfmt='S')), ] scenarios = number_scenarios(multiply_scenarios('.', types, keyfmt)) # Test an illegal order, then confirm that equal cursors works. def test_truncate_cursor_order(self): uri = self.type + self.name simple_populate(self, uri, 'key_format=' + self.keyfmt, 100) c1 = self.session.open_cursor(uri, None) c2 = self.session.open_cursor(uri, None) c1.set_key(key_populate(c1, 20)) c2.set_key(key_populate(c2, 10)) msg = '/the start cursor position is after the stop cursor position/' self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: self.session.truncate(None, c1, c2, None), msg) c2.set_key(key_populate(c2, 20)) self.session.truncate(None, c1, c2, None)
class test_huffman02(wttest.WiredTigerTestCase, suite_subprocess): huffkey = [ ('bad', dict(keybad=1, huffkey=',huffman_key=bad')), ('english', dict(keybad=0, huffkey=',huffman_key=english')), ('none', dict(keybad=0, huffkey=',huffman_key=none')), ] huffval = [ ('bad', dict(valbad=1, huffval=',huffman_value=bad')), ('english', dict(valbad=0, huffval=',huffman_value=english')), ('none', dict(valbad=0, huffval=',huffman_value=english')), ] type = [ ('file', dict(uri='file:huff')), ('table', dict(uri='table:huff')), ] scenarios = number_scenarios( multiply_scenarios('.', type, huffkey, huffval)) def test_huffman(self): if self.keybad or self.valbad: msg = '/Invalid argument/' self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: self.session.create( self.uri, self.huffkey + self.huffval), msg) else: self.session.create(self.uri, self.huffkey + self.huffval)
class test_stat_cursor_reset(wttest.WiredTigerTestCase): pfx = 'test_stat_cursor_reset' uri = [ ('file-simple', dict(uri='file:' + pfx, pop=simple_populate)), ('table-simple', dict(uri='table:' + pfx, pop=simple_populate)), ('table-complex', dict(uri='table:' + pfx, pop=complex_populate)), ('table-complex-lsm', dict(uri='table:' + pfx, pop=complex_populate_lsm)), ] scenarios = number_scenarios(multiply_scenarios('.', uri)) conn_config = 'statistics=(all)' def stat_cursor(self, uri): return self.session.open_cursor('statistics:' + uri, None, 'statistics=(all)') def test_stat_cursor_reset(self): # The number of btree_entries reported is influenced by the # number of column groups and indices. Each insert will have # a multiplied effect. if self.pop == simple_populate: multiplier = 1 # no declared colgroup is like one big colgroup else: multiplier = complex_populate_colgroup_count() + \ complex_populate_index_count() n = 100 self.pop(self, self.uri, 'key_format=S', n) statc = self.stat_cursor(self.uri) self.assertEqual(statc[stat.dsrc.btree_entries][2], n * multiplier) c = self.session.open_cursor(self.uri) c.set_key(key_populate(c, 200)) if self.pop == simple_populate: c.set_value(value_populate(c, 200)) else: c.set_value(tuple(complex_value_populate(c, 200))) c.insert() # Test that cursor reset re-loads the values. self.assertEqual(statc[stat.dsrc.btree_entries][2], n * multiplier) statc.reset() n += 1 self.assertEqual(statc[stat.dsrc.btree_entries][2], n * multiplier) # For applications with indices and/or column groups, verify # that there is a way to count the base number of entries. if self.pop != simple_populate: statc.close() statc = self.stat_cursor( complex_populate_index_name(self, self.uri, 0)) self.assertEqual(statc[stat.dsrc.btree_entries][2], n) statc.close() statc = self.stat_cursor( complex_populate_colgroup_name(self, self.uri, 0)) self.assertEqual(statc[stat.dsrc.btree_entries][2], n) statc.close()
class test_jsondump01(wttest.WiredTigerTestCase, suite_subprocess): name = 'test_jsondump01' nentries = 2500 keyfmt = [('integer', dict(keyfmt='i')), ('recno', dict(keyfmt='r')), ('string', dict(keyfmt='S'))] types = [('file', dict(type='file:', name='file', populate=simple_populate, populate_check=simple_populate_check_cursor)), ('table-simple', dict(type='table:', name='table-simple', populate=simple_populate, populate_check=simple_populate_check_cursor)), ('table-complex', dict(type='table:', name='table-complex', populate=complex_populate, populate_check=complex_populate_check_cursor))] scenarios = number_scenarios(multiply_scenarios('.', types, keyfmt)) # Dump using util, re-load using python's JSON, and do a content comparison. def test_jsondump_util(self): # Create the object. uri = self.type + self.name self.populate(self, uri, 'key_format=' + self.keyfmt, self.nentries) # Dump the object. self.runWt(['dump', '-j', uri], outfilename='jsondump.out') # Load it using python's built-in JSON dumpin = open('jsondump.out') tables = json.load(dumpin) dumpin.close() # spot check configs = tables[uri][0] data = tables[uri][1] d = data[24] if 'column5' in d: self.assertEqual(d['column5'], '25: abcde') else: self.assertEqual(d['value0'], '25: abcdefghijklmnopqrstuvwxyz') # check the contents of the data we read. # we only use a wt cursor to get the key_format/value_format. cursor = self.session.open_cursor(uri, None) fake = FakeCursor(cursor.key_format, cursor.value_format, data) cursor.close() self.populate_check(self, fake, self.nentries)
class test_bulkload_backup(wttest.WiredTigerTestCase, suite_subprocess): types = [ ('file', dict(uri='file:data')), ('table', dict(uri='table:data')), ] ckpt_type = [ ('named', dict(ckpt_type='named')), ('none', dict(ckpt_type='none')), ('unnamed', dict(ckpt_type='unnamed')), ] session_type = [ ('different', dict(session_type='different')), ('same', dict(session_type='same')), ] scenarios = number_scenarios( multiply_scenarios('.', types, ckpt_type, session_type)) # Backup a set of chosen tables/files using the wt backup command. # The only files are bulk-load files, so they shouldn't be copied. def check_backup(self, session): backupdir = 'backup.dir' self.backup(backupdir, session) # Open the target directory, and confirm the object has no contents. conn = wiredtiger.wiredtiger_open(backupdir) session = conn.open_session() cursor = session.open_cursor(self.uri, None, None) self.assertEqual(cursor.next(), wiredtiger.WT_NOTFOUND) conn.close() def test_bulk_backup(self): # Open a bulk cursor and insert a few records. self.session.create(self.uri, 'key_format=S,value_format=S') cursor = self.session.open_cursor(self.uri, None, 'bulk') for i in range(1, 10): cursor.set_key(key_populate(cursor, i)) cursor.set_value(value_populate(cursor, i)) cursor.insert() # Test without a checkpoint, with an unnamed checkpoint, with a named # checkpoint. if self.ckpt_type == 'named': self.session.checkpoint('name=myckpt') elif self.ckpt_type == 'unnamed': self.session.checkpoint() # Test with the same and different sessions than the bulk-get call, # test both the database handle and session handle caches. if self.session_type == 'same': self.check_backup(self.session) else: self.check_backup(self.conn.open_session())
class test_lsm01(wttest.WiredTigerTestCase): K = 1024 M = 1024 * K G = 1024 * M uri = "lsm:test_lsm01" chunk_size_scenarios = wtscenario.quick_scenarios('s_chunk_size', [1 * M, 20 * M, None], [0.6, 0.6, 0.6]) merge_max_scenarios = wtscenario.quick_scenarios('s_merge_max', [2, 10, 20, None], None) bloom_scenarios = wtscenario.quick_scenarios('s_bloom', [True, False, None], None) bloom_bit_scenarios = wtscenario.quick_scenarios('s_bloom_bit_count', [2, 8, 20, None], None) bloom_hash_scenarios = wtscenario.quick_scenarios('s_bloom_hash_count', [2, 10, 20, None], None) # Occasionally add a lot of records, so that merges (and bloom) happen. record_count_scenarios = wtscenario.quick_scenarios( 'nrecs', [10, 10000], [0.9, 0.1]) config_vars = [ 'chunk_size', 'merge_max', 'bloom', 'bloom_bit_count', 'bloom_hash_count' ] all_scenarios = wtscenario.multiply_scenarios( '_', chunk_size_scenarios, merge_max_scenarios, bloom_scenarios, bloom_bit_scenarios, bloom_hash_scenarios, record_count_scenarios) scenarios = wtscenario.prune_scenarios(all_scenarios, 500) scenarios = wtscenario.number_scenarios(scenarios) # Test drop of an object. def test_lsm(self): args = 'key_format=S' args += ',lsm=(' # Start the LSM configuration options. # add names to args, e.g. args += ',session_max=30' for var in self.config_vars: value = getattr(self, 's_' + var) if value != None: if var == 'verbose': value = '[' + str(value) + ']' if value == True: value = 'true' if value == False: value = 'false' args += ',' + var + '=' + str(value) args += ')' # Close the LSM configuration option group self.verbose( 3, 'Test LSM with config: ' + args + ' count: ' + str(self.nrecs)) simple_populate(self, self.uri, args, self.nrecs)
class test_stat_cursor_config(wttest.WiredTigerTestCase): pfx = 'test_stat_cursor_config' uri = [('file', dict(uri='file:' + pfx, pop=simple_populate, cfg='')), ('table', dict(uri='table:' + pfx, pop=simple_populate, cfg='')), ('table-lsm', dict(uri='table:' + pfx, pop=simple_populate, cfg=',type=lsm')), ('complex', dict(uri='table:' + pfx, pop=complex_populate, cfg='')), ('complex-lsm', dict(uri='table:' + pfx, pop=complex_populate_lsm, cfg=''))] data_config = [ ('none', dict(data_config='none', ok=[])), ('all', dict(data_config='all', ok=['empty', 'fast', 'all', 'size'])), ('fast', dict(data_config='fast', ok=['empty', 'fast', 'size'])) ] cursor_config = [('empty', dict(cursor_config='empty')), ('all', dict(cursor_config='all')), ('fast', dict(cursor_config='fast')), ('size', dict(cursor_config='size'))] scenarios = number_scenarios( multiply_scenarios('.', uri, data_config, cursor_config)) # Override WiredTigerTestCase, we have extensions. def setUpConnectionOpen(self, dir): conn = wiredtiger.wiredtiger_open( dir, 'create,' + 'statistics=(' + self.data_config + '),' + 'error_prefix="%s: "' % self.shortid()) return conn # For each database/cursor configuration, confirm the right combinations # succeed or fail. def test_stat_cursor_config(self): self.pop(self, self.uri, 'key_format=S' + self.cfg, 100) config = 'statistics=(' if self.cursor_config != 'empty': config = config + self.cursor_config config = config + ')' if self.ok and self.cursor_config in self.ok: self.session.open_cursor('statistics:', None, config) else: msg = '/database statistics configuration/' self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: self.session.open_cursor('statistics:', None, config), msg)
class test_bug003(wttest.WiredTigerTestCase): types = [ ('file', dict(uri='file:data')), ('table', dict(uri='table:data')), ] ckpt = [ ('no', dict(name=0)), ('yes', dict(name=1)), ] scenarios = number_scenarios(multiply_scenarios('.', types, ckpt)) # Confirm bulk-load isn't stopped by checkpoints. def test_bug003(self): self.session.create(self.uri, "key_format=S,value_format=S") if self.name == 1: self.session.checkpoint("name=ckpt") else: self.session.checkpoint() cursor = self.session.open_cursor(self.uri, None, "bulk")
class test_stat_cursor_dsrc_error(wttest.WiredTigerTestCase): pfx = 'test_stat_cursor_dsrc_error' uri = [('1', dict(uri='file:' + pfx, pop=simple_populate)), ('2', dict(uri='table:' + pfx, pop=simple_populate)), ('3', dict(uri='table:' + pfx, pop=complex_populate)), ('4', dict(uri='table:' + pfx, pop=complex_populate_lsm))] scenarios = number_scenarios(multiply_scenarios('.', uri)) conn_config = 'statistics=(all)' def test_stat_cursor_dsrc_error(self): self.pop(self, self.uri, 'key_format=S', 100) args = ['all', 'fast'] for i in list(itertools.permutations(args, 2)): config = 'statistics=(' + i[0] + ',' + i[1] + ')' msg = '/only one statistics configuration value/' self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: self.session.open_cursor( 'statistics:' + self.uri, None, config), msg)
class test_bulkload_checkpoint(wttest.WiredTigerTestCase, suite_subprocess): types = [ ('file', dict(uri='file:data')), ('table', dict(uri='table:data')), ] ckpt_type = [ ('named', dict(ckpt_type='named')), ('unnamed', dict(ckpt_type='unnamed')), ] scenarios = number_scenarios(multiply_scenarios('.', types, ckpt_type)) # Bulk-load handles return EBUSY to the checkpoint code, causing the # checkpoint call to find a handle anyway, and create fake checkpoint. # Named and unnamed checkpoint versions. def test_bulkload_checkpoint(self): # Open a bulk cursor and insert a few records. self.session.create(self.uri, 'key_format=S,value_format=S') cursor = self.session.open_cursor(self.uri, None, 'bulk') for i in range(1, 10): cursor.set_key(key_populate(cursor, i)) cursor.set_value(value_populate(cursor, i)) cursor.insert() # Checkpoint a few times (to test the drop code). for i in range(1, 5): if self.ckpt_type == 'named': self.session.checkpoint('name=myckpt') else: self.session.checkpoint() # Close the bulk cursor. cursor.close() # In the case of named checkpoints, verify they're still there, # reflecting an empty file. if self.ckpt_type == 'named': cursor = self.session.open_cursor(self.uri, None, 'checkpoint=myckpt') self.assertEquals(cursor.next(), wiredtiger.WT_NOTFOUND) cursor.close()
class test_huffman01(wttest.WiredTigerTestCase, suite_subprocess): """ Test basic operations """ table_name = 'table:test_huff' huffkey = [ ('none', dict(huffkey='huffman_key=none', kfile=None)), ('english', dict(huffkey='huffman_key=english', kfile=None)), ('utf8', dict(huffkey='huffman_key=utf8t8file', kfile='t8file')), ('utf16', dict(huffkey='huffman_key=utf16t16file', kfile='t16file')), ] huffval = [ ('none', dict(huffval=',huffman_value=none', vfile=None)), ('english', dict(huffval=',huffman_value=english', vfile=None)), ('utf8', dict(huffval=',huffman_value=utf8t8file', vfile='t8file')), ('utf16', dict(huffval=',huffman_value=utf16t16file', vfile='t16file')), ] scenarios = number_scenarios(multiply_scenarios('.', huffkey, huffval)) def test_huffman(self): dir = self.conn.get_home() if self.kfile != None: # For the UTF settings write some made-up frequency information. f = open(dir + '/' + self.kfile, 'w') f.write('48 546233\n49 460946\n') f.write('0x4a 546233\n0x4b 460946\n') f.close() # if self.vfile != None and not os.path.exists(self.vfile): if self.vfile != None: f = open(dir + '/' + self.vfile, 'w') # For the UTF settings write some made-up frequency information. f.write('48 546233\n49 460946\n') f.write('0x4a 546233\n0x4b 460946\n') f.close() config = self.huffkey + self.huffval self.session.create(self.table_name, config)
class test_stat_cursor_fast(wttest.WiredTigerTestCase): pfx = 'test_stat_cursor_fast' uri = [('1', dict(uri='file:' + pfx, pop=simple_populate)), ('2', dict(uri='table:' + pfx, pop=simple_populate)), ('3', dict(uri='table:' + pfx, pop=complex_populate)), ('4', dict(uri='table:' + pfx, pop=complex_populate_lsm))] scenarios = number_scenarios(multiply_scenarios('.', uri)) conn_config = 'statistics=(all)' def test_stat_cursor_fast(self): self.pop(self, self.uri, 'key_format=S', 100) # A "fast" cursor shouldn't see the underlying btree statistics. # Check "fast" first, otherwise we get a copy of the statistics # we generated in the "all" call, they just aren't updated. cursor = self.session.open_cursor('statistics:' + self.uri, None, 'statistics=(fast)') self.assertEqual(cursor[stat.dsrc.btree_entries][2], 0) cursor = self.session.open_cursor('statistics:' + self.uri, None, 'statistics=(all)') self.assertGreater(cursor[stat.dsrc.btree_entries][2], 0)
class test_bulkload_checkpoint(wttest.WiredTigerTestCase, suite_subprocess): types = [ ('file', dict(uri='file:data')), ('table', dict(uri='table:data')), ] ckpt_type = [ ('named', dict(ckpt_type='named')), ('unnamed', dict(ckpt_type='unnamed')), ] scenarios = number_scenarios(multiply_scenarios('.', types, ckpt_type)) # Bulk-load handles are skipped by checkpoints. # Named and unnamed checkpoint versions. def test_bulkload_checkpoint(self): # Open a bulk cursor and insert a few records. self.session.create(self.uri, 'key_format=S,value_format=S') cursor = self.session.open_cursor(self.uri, None, 'bulk') for i in range(1, 10): cursor[key_populate(cursor, i)] = value_populate(cursor, i) # Checkpoint a few times (to test the drop code). for i in range(1, 5): if self.ckpt_type == 'named': self.session.checkpoint('name=myckpt') else: self.session.checkpoint() # Close the bulk cursor. cursor.close() # In the case of named checkpoints, verify they're still there, # reflecting an empty file. if self.ckpt_type == 'named': self.assertRaises( wiredtiger.WiredTigerError, lambda: self.session.open_cursor( self.uri, None, 'checkpoint=myckpt'))
class test_cursor_comparison(wttest.WiredTigerTestCase): name = 'test_compare' types = [('file', dict(type='file:')), ('table', dict(type='table:'))] keyfmt = [('integer', dict(keyfmt='i')), ('recno', dict(keyfmt='r')), ('string', dict(keyfmt='S'))] scenarios = number_scenarios(multiply_scenarios('.', types, keyfmt)) def test_cursor_comparison(self): uri = self.type + 'compare' # Build the object. if self.type == 'file:': simple_populate(self, uri, 'key_format=' + self.keyfmt, 100) else: complex_populate(self, uri, 'key_format=' + self.keyfmt, 100) c1 = self.session.open_cursor(uri, None) c2 = self.session.open_cursor(uri, None) # Confirm the method fails unless the keys are set. msg = '/requires key be set/' self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: c1.compare(c2), msg) self.assertRaisesWithMessage(wiredtiger.WiredTigerError, lambda: c2.compare(c1), msg) # Test cursors in all three orders. c1.set_key(key_populate(c1, 10)) self.assertEquals(c1.search(), 0) c2.set_key(key_populate(c2, 20)) self.assertEquals(c2.search(), 0) self.assertGreater(c2.compare(c1), 0) self.assertLess(c1.compare(c2), 0) c2.set_key(key_populate(c2, 10)) self.assertEquals(c1.compare(c2), 0)
class test_truncate_cursor_end(wttest.WiredTigerTestCase): name = 'test_truncate' types = [('file', dict(type='file:')), ('table', dict(type='table:'))] keyfmt = [ ('integer', dict(keyfmt='i')), ('recno', dict(keyfmt='r')), ('string', dict(keyfmt='S')), ] scenarios = number_scenarios(multiply_scenarios('.', types, keyfmt)) # Test truncation of cursors past the end of the object. def test_truncate_cursor_order(self): uri = self.type + self.name # A simple, one-file file or table object. simple_populate(self, uri, 'key_format=' + self.keyfmt, 100) c1 = self.session.open_cursor(uri, None) c1.set_key(key_populate(c1, 1000)) c2 = self.session.open_cursor(uri, None) c2.set_key(key_populate(c2, 2000)) self.session.truncate(None, c1, c2, None) self.assertEquals(c1.close(), 0) self.assertEquals(c2.close(), 0) self.session.drop(uri) if self.type == "table:": complex_populate(self, uri, 'key_format=' + self.keyfmt, 100) c1 = self.session.open_cursor(uri, None) c1.set_key(key_populate(c1, 1000)) c2 = self.session.open_cursor(uri, None) c2.set_key(key_populate(c2, 2000)) self.session.truncate(None, c1, c2, None) self.assertEquals(c1.close(), 0) self.assertEquals(c2.close(), 0) self.session.drop(uri)
class test_config03(test_base03.test_base03): K = 1024 M = 1024 * K G = 1024 * M cache_size_scenarios = wtscenario.quick_scenarios( 's_cache_size', [1 * M, 20 * M, 100 * M, 1 * G, None], [0.6, 0.6, 0.6, 0.6, 0.6]) create_scenarios = wtscenario.quick_scenarios('s_create', [True, False, None], [1.0, 0.2, 0.3]) error_prefix_scenarios = wtscenario.quick_scenarios( 's_error_prefix', [None, "errpfx:"], [1.0, 0.2]) # eviction_target < eviction_trigger -- checked later eviction_target_scenarios = wtscenario.quick_scenarios( 's_eviction_target', [10, 40, 85, 98], None) eviction_trigger_scenarios = wtscenario.quick_scenarios( 's_eviction_trigger', [50, 90, 95, 99], None) hazard_max_scenarios = wtscenario.quick_scenarios('s_hazard_max', [15, 50, 500], [0.4, 0.8, 0.8]) multiprocess_scenarios = wtscenario.quick_scenarios( 's_multiprocess', [True, False], [1.0, 1.0]) session_max_scenarios = wtscenario.quick_scenarios('s_session_max', [3, 30, 300], None) transactional_scenarios = wtscenario.quick_scenarios( 's_transactional', [True, False], [0.2, 1.0]) # Note: we are not using any truly verbose scenarios until we have # a way to redirect verbose output to a file in Python. # #verbose_scenarios = wtscenario.quick_scenarios('s_verbose', # ['block', 'evict,evictserver', 'fileops,hazard,mutex', # 'read,readserver,reconcile,salvage','verify,write',''], None) verbose_scenarios = wtscenario.quick_scenarios('s_verbose', [None], None) config_vars = [ 'cache_size', 'create', 'error_prefix', 'eviction_target', 'eviction_trigger', 'hazard_max', 'multiprocess', 'session_max', 'verbose' ] all_scenarios = wtscenario.multiply_scenarios( '_', cache_size_scenarios, create_scenarios, error_prefix_scenarios, eviction_target_scenarios, eviction_trigger_scenarios, hazard_max_scenarios, multiprocess_scenarios, session_max_scenarios, transactional_scenarios, verbose_scenarios) scenarios = wtscenario.prune_scenarios(all_scenarios, 1000) scenarios = wtscenario.number_scenarios(scenarios) #wttest.WiredTigerTestCase.printVerbose(2, 'test_config03: running ' + \ # str(len(scenarios)) + ' of ' + \ # str(len(all_scenarios)) + ' possible scenarios') def setUpConnectionOpen(self, dir): args = '' # add names to args, e.g. args += ',session_max=30' for var in self.config_vars: value = getattr(self, 's_' + var) if value != None: if var == 'verbose': value = '[' + str(value) + ']' if value == True: value = 'true' if value == False: value = 'false' args += ',' + var + '=' + str(value) args += ',' self.pr('wiredtiger_open with args: ' + args) expect_fail = False successargs = args if self.s_create == False: successargs = successargs.replace(',create=false,', ',create,') expect_fail = True fail_msg = '/(No such file or directory|The system cannot find the file specified)/' elif self.s_create == None: successargs = successargs + 'create=true,' expect_fail = True fail_msg = '/(No such file or directory|The system cannot find the file specified)/' if self.s_eviction_target >= self.s_eviction_trigger: # construct args that guarantee that target < trigger # we know that trigger >= 1 repfrom = ',eviction_target=' + str(self.s_eviction_target) repto = ',eviction_target=' + str(self.s_eviction_trigger - 1) successargs = successargs.replace(repfrom, repto) if not expect_fail: expect_fail = True fail_msg = \ '/eviction target must be lower than the eviction trigger/' if expect_fail: self.verbose(3, 'wiredtiger_open (should fail) with args: ' + args) self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: wiredtiger.wiredtiger_open(dir, args), fail_msg) args = successargs self.verbose(3, 'wiredtiger_open with args: ' + args) conn = wiredtiger.wiredtiger_open(dir, args) self.pr( ` conn `) return conn
class test_truncate_fast_delete(wttest.WiredTigerTestCase): name = 'test_truncate' nentries = 10000 # Use a small page size and lots of keys because we want to create lots # of individual pages in the file. types = [ ('file', dict(type='file:', config=\ 'allocation_size=512,leaf_page_max=512,' +\ 'value_format=S,key_format=')), ] # This is all about testing the btree layer, not the schema layer, test # files and ignore tables. keyfmt = [ ('integer', dict(keyfmt='i')), ('recno', dict(keyfmt='r')), ('string', dict(keyfmt='S')), ] # Overflow records force pages to be instantiated, blocking fast delete. overflow = [ ('ovfl1', dict(overflow=False)), ('ovfl2', dict(overflow=True)), ] # Random reads and writes force pages to be instantiated and potentially # create update structures, blocking fast delete and changing how fast # delete rollback works. reads = [ ('read1', dict(readafter=False,readbefore=False)), ('read2', dict(readafter=True,readbefore=False)), ('read3', dict(readafter=False,readbefore=True)), ('read4', dict(readafter=True,readbefore=True)), ] writes = [ ('write1', dict(writeafter=False,writebefore=False)), ('write2', dict(writeafter=True,writebefore=False)), ('write3', dict(writeafter=False,writebefore=True)), ('write4', dict(writeafter=True,writebefore=True)), ] # Test both commit and abort of the truncate transaction. txn = [ ('txn1', dict(commit=True)), ('txn2', dict(commit=False)), ] scenarios = number_scenarios( multiply_scenarios('.', types, keyfmt, overflow, reads, writes, txn)) # Return the number of records visible to the cursor; test both forward # and backward iteration, they are different code paths in this case. def cursor_count(self, cursor, expected): count = 0 while cursor.next() == 0: count += 1 self.assertEqual(count, expected) cursor.reset() count = 0 while cursor.prev() == 0: count += 1 self.assertEqual(count, expected) # Open a cursor in a new session and confirm how many records it sees. def outside_count(self, isolation, expected): s = self.conn.open_session() s.begin_transaction(isolation) cursor = s.open_cursor(self.type + self.name, None) self.cursor_count(cursor, expected) s.close() # Trigger fast delete and test cursor counts. def test_truncate_fast_delete(self): uri = self.type + self.name ''' print '===== run:' print 'config:', self.config + self.keyfmt, \ 'overflow=', self.overflow, \ 'readafter=', self.readafter, 'readbefore=', self.readbefore, \ 'writeafter=', self.writeafter, 'writebefore=', self.writebefore, \ 'commit=', self.commit ''' # Create the object. simple_populate(self, uri, self.config + self.keyfmt, self.nentries) # Optionally add a few overflow records so we block fast delete on # those pages. if self.overflow: cursor = self.session.open_cursor(uri, None, 'overwrite=false') for i in range(1, self.nentries, 3123): cursor.set_key(key_populate(cursor, i)) cursor.set_value(value_populate(cursor, i)) cursor.update() cursor.close() # Close and re-open it so we get a disk image, not an insert skiplist. self.reopen_conn() # Optionally read/write a few rows before truncation. if self.readbefore or self.writebefore: cursor = self.session.open_cursor(uri, None, 'overwrite=false') if self.readbefore: for i in range(1, self.nentries, 737): cursor.set_key(key_populate(cursor, i)) cursor.search() if self.writebefore: for i in range(1, self.nentries, 988): cursor.set_key(key_populate(cursor, i)) cursor.set_value(value_populate(cursor, i + 100)) cursor.update() cursor.close() # Begin a transaction, and truncate a big range of rows. self.session.begin_transaction(None) start = self.session.open_cursor(uri, None) start.set_key(key_populate(start, 10)) end = self.session.open_cursor(uri, None) end.set_key(key_populate(end, self.nentries - 10)) self.session.truncate(None, start, end, None) start.close() end.close() # Optionally read/write a few rows after truncation. if self.readafter or self.writeafter: cursor = self.session.open_cursor(uri, None, 'overwrite=false') if self.readafter: for i in range(1, self.nentries, 1123): cursor.set_key(key_populate(cursor, i)) cursor.search() if self.writeafter: for i in range(1, self.nentries, 621): cursor.set_key(key_populate(cursor, i)) cursor.set_value(value_populate(cursor, i + 100)) cursor.update() cursor.close() # A cursor involved in the transaction should see the deleted records. # The number 19 comes from deleting row 10 (inclusive), to row N - 10, # exclusive, or 9 + 10 == 19. remaining = 19 cursor = self.session.open_cursor(uri, None) self.cursor_count(cursor, remaining) cursor.close() # A separate, read_committed cursor should not see the deleted records. self.outside_count("isolation=read-committed", self.nentries) # A separate, read_uncommitted cursor should see the deleted records. self.outside_count("isolation=read-uncommitted", remaining) # Commit/rollback the transaction. if self.commit: self.session.commit_transaction() else: self.session.rollback_transaction() # Check a read_committed cursor sees the right records. cursor = self.session.open_cursor(uri, None) if self.commit: self.cursor_count(cursor, remaining) else: self.cursor_count(cursor, self.nentries) cursor.close()
class test_txn04(wttest.WiredTigerTestCase, suite_subprocess): logmax = "100K" tablename = 'test_txn04' uri = 'table:' + tablename sync_list = [ '(method=dsync,enabled)', '(method=fsync,enabled)', '(method=none,enabled)', '(enabled=false)' ] types = [ ('row', dict(tabletype='row', create_params='key_format=i,value_format=i')), ('var', dict(tabletype='var', create_params='key_format=r,value_format=i')), ('fix', dict(tabletype='fix', create_params='key_format=r,value_format=8t')), ] op1s = [ ('insert', dict(op1=('insert', 6))), ('update', dict(op1=('update', 2))), ('remove', dict(op1=('remove', 2))), ('trunc-stop', dict(op1=('stop', 2))), ] txn1s = [('t1c', dict(txn1='commit')), ('t1r', dict(txn1='rollback'))] scenarios = number_scenarios(multiply_scenarios('.', types, op1s, txn1s)) # Overrides WiredTigerTestCase def setUpConnectionOpen(self, dir): self.home = dir # Cycle through the different transaction_sync values in a # deterministic manner. self.txn_sync = self.sync_list[self.scenario_number % len(self.sync_list)] self.backup_dir = os.path.join(self.home, "WT_BACKUP") # Set archive false on the home directory. conn_params = \ 'log=(archive=false,enabled,file_max=%s),' % self.logmax + \ 'create,error_prefix="%s: ",' % self.shortid() + \ 'transaction_sync="%s",' % self.txn_sync # print "Creating conn at '%s' with config '%s'" % (dir, conn_params) conn = wiredtiger_open(dir, conn_params) self.pr( ` conn `) self.session2 = conn.open_session() return conn # Check that a cursor (optionally started in a new transaction), sees the # expected values. def check(self, session, txn_config, expected): if txn_config: session.begin_transaction(txn_config) c = session.open_cursor(self.uri, None) actual = dict((k, v) for k, v in c if v != 0) # Search for the expected items as well as iterating for k, v in expected.iteritems(): self.assertEqual(c[k], v) c.close() if txn_config: session.commit_transaction() self.assertEqual(actual, expected) # Check the state of the system with respect to the current cursor and # different isolation levels. def check_all(self, current, committed): # Transactions see their own changes. # Read-uncommitted transactions see all changes. # Snapshot and read-committed transactions should not see changes. self.check(self.session, None, current) self.check(self.session2, "isolation=snapshot", committed) self.check(self.session2, "isolation=read-committed", committed) self.check(self.session2, "isolation=read-uncommitted", current) def hot_backup(self, backup_uri, committed): # If we are backing up a target, assume the directory exists. # We just use the wt backup command. # A future test extension could also use a cursor. cmd = '-h ' + self.home + ' backup ' if backup_uri != None: cmd += '-t ' + backup_uri + ' ' else: shutil.rmtree(self.backup_dir, ignore_errors=True) os.mkdir(self.backup_dir) cmd += self.backup_dir self.runWt(cmd.split()) self.exception = 'false' backup_conn_params = 'log=(enabled,file_max=%s)' % self.logmax backup_conn = wiredtiger_open(self.backup_dir, backup_conn_params) try: self.check(backup_conn.open_session(), None, committed) except: self.exception = 'true' finally: backup_conn.close() def test_ops(self): self.session.create(self.uri, self.create_params) c = self.session.open_cursor(self.uri, None, 'overwrite') # Set up the table with entries for 1-5. # We then truncate starting or ending in various places. # We use the overwrite config so insert can update as needed. current = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1} c.set_value(1) for k in current: c.set_key(k) c.insert() committed = current.copy() ops = (self.op1, ) txns = (self.txn1, ) for i, ot in enumerate(zip(ops, txns)): # Perform a full hot backup of the original tables. # The runWt command closes our connection and sessions so # we need to reopen them here. self.hot_backup(None, committed) self.assertEqual(True, self.exception == 'false') c = self.session.open_cursor(self.uri, None, 'overwrite') c.set_value(1) # Then do the given modification. # Perform a targeted hot backup. self.session.begin_transaction() ok, txn = ot op, k = ok # print '%d: %s(%d)[%s]' % (i, ok[0], ok[1], txn) if op == 'insert' or op == 'update': c.set_value(i + 2) c.set_key(k) c.insert() current[k] = i + 2 elif op == 'remove': c.set_key(k) c.remove() if k in current: del current[k] elif op == 'stop': # For both, the key given is the start key. Add 2 # for the stop key. c.set_key(k) kstart = 1 kstop = k self.session.truncate(None, None, c, None) while (kstart <= kstop): del current[kstart] kstart += 1 # print current # Check the state after each operation. self.check_all(current, committed) if txn == 'commit': committed = current.copy() self.session.commit_transaction() elif txn == 'rollback': current = committed.copy() self.session.rollback_transaction() # Check the state after each commit/rollback. self.check_all(current, committed) # Backup the target we modified. We expect that running # recovery now will generate an exception if we committed. # print 'Call hot_backup with ' + self.uri self.hot_backup(self.uri, committed) if txn == 'commit': self.assertEqual(True, self.exception == 'true') else: self.assertEqual(True, self.exception == 'false')
class test_txn02(wttest.WiredTigerTestCase, suite_subprocess): logmax = "100K" tablename = 'test_txn02' uri = 'table:' + tablename archive_list = ['true', 'false'] conn_list = ['reopen', 'stay_open'] sync_list = [ '(method=dsync,enabled)', '(method=fsync,enabled)', '(method=none,enabled)', '(enabled=false)' ] types = [ ('row', dict(tabletype='row', create_params='key_format=i,value_format=i')), ('var', dict(tabletype='var', create_params='key_format=r,value_format=i')), ('fix', dict(tabletype='fix', create_params='key_format=r,value_format=8t')), ] op1s = [ ('i4', dict(op1=('insert', 4))), ('r1', dict(op1=('remove', 1))), ('u10', dict(op1=('update', 10))), ] op2s = [ ('i6', dict(op2=('insert', 6))), ('r4', dict(op2=('remove', 4))), ('u4', dict(op2=('update', 4))), ] op3s = [ ('i12', dict(op3=('insert', 12))), ('r4', dict(op3=('remove', 4))), ('u4', dict(op3=('update', 4))), ] op4s = [ ('i14', dict(op4=('insert', 14))), ('r12', dict(op4=('remove', 12))), ('u12', dict(op4=('update', 12))), ] txn1s = [('t1c', dict(txn1='commit')), ('t1r', dict(txn1='rollback'))] txn2s = [('t2c', dict(txn2='commit')), ('t2r', dict(txn2='rollback'))] txn3s = [('t3c', dict(txn3='commit')), ('t3r', dict(txn3='rollback'))] txn4s = [('t4c', dict(txn4='commit')), ('t4r', dict(txn4='rollback'))] all_scenarios = multiply_scenarios('.', types, op1s, txn1s, op2s, txn2s, op3s, txn3s, op4s, txn4s) # This test generates thousands of potential scenarios. # For default runs, we'll use a small subset of them, for # long runs (when --long is set) we'll set a much larger limit. scenarios = number_scenarios(prune_scenarios(all_scenarios, 20, 5000)) # Each check_log() call takes a second, so we don't call it for # every scenario, we'll limit it to the value of checklog_calls. checklog_calls = 100 if wttest.islongtest() else 2 checklog_mod = (len(scenarios) / checklog_calls + 1) # scenarios = number_scenarios(multiply_scenarios('.', types, # op1s, txn1s, op2s, txn2s, op3s, txn3s, op4s, txn4s)) [:3] # Overrides WiredTigerTestCase def setUpConnectionOpen(self, dir): self.home = dir # Cycle through the different transaction_sync values in a # deterministic manner. self.txn_sync = self.sync_list[self.scenario_number % len(self.sync_list)] # # We don't want to run zero fill with only the same settings, such # as archive or sync, which are an even number of options. # freq = 3 zerofill = 'false' if self.scenario_number % freq == 0: zerofill = 'true' self.backup_dir = os.path.join(self.home, "WT_BACKUP") conn_params = \ 'log=(archive=false,enabled,file_max=%s),' % self.logmax + \ 'log=(zero_fill=%s),' % zerofill + \ 'create,error_prefix="%s: ",' % self.shortid() + \ 'transaction_sync="%s",' % self.txn_sync # print "Creating conn at '%s' with config '%s'" % (dir, conn_params) conn = wiredtiger_open(dir, conn_params) self.pr( ` conn `) self.session2 = conn.open_session() return conn # Check that a cursor (optionally started in a new transaction), sees the # expected values. def check(self, session, txn_config, expected): if txn_config: session.begin_transaction(txn_config) c = session.open_cursor(self.uri, None) actual = dict((k, v) for k, v in c if v != 0) # Search for the expected items as well as iterating for k, v in expected.iteritems(): self.assertEqual(c[k], v) c.close() if txn_config: session.commit_transaction() self.assertEqual(actual, expected) # Check the state of the system with respect to the current cursor and # different isolation levels. def check_all(self, current, committed): # Transactions see their own changes. # Read-uncommitted transactions see all changes. # Snapshot and read-committed transactions should not see changes. self.check(self.session, None, current) self.check(self.session2, "isolation=snapshot", committed) self.check(self.session2, "isolation=read-committed", committed) self.check(self.session2, "isolation=read-uncommitted", current) # Opening a clone of the database home directory should run # recovery and see the committed results. self.backup(self.backup_dir) backup_conn_params = 'log=(enabled,file_max=%s)' % self.logmax backup_conn = wiredtiger_open(self.backup_dir, backup_conn_params) try: self.check(backup_conn.open_session(), None, committed) finally: backup_conn.close() def check_log(self, committed): self.backup(self.backup_dir) # # Open and close the backup connection a few times to force # repeated recovery and log archiving even if later recoveries # are essentially no-ops. Confirm that the backup contains # the committed operations after recovery. # # Cycle through the different archive values in a # deterministic manner. self.archive = self.archive_list[self.scenario_number % len(self.archive_list)] backup_conn_params = \ 'log=(enabled,file_max=%s,archive=%s)' % (self.logmax, self.archive) orig_logs = fnmatch.filter(os.listdir(self.backup_dir), "*Log*") endcount = 2 count = 0 while count < endcount: backup_conn = wiredtiger_open(self.backup_dir, backup_conn_params) try: self.check(backup_conn.open_session(), None, committed) finally: # Sleep long enough so that the archive thread is guaranteed # to run before we close the connection. time.sleep(1.0) backup_conn.close() count += 1 # # Check logs after repeated openings. The first log should # have been archived if configured. Subsequent openings would not # archive because no checkpoint is written due to no modifications. # cur_logs = fnmatch.filter(os.listdir(self.backup_dir), "*Log*") for o in orig_logs: if self.archive == 'true': self.assertEqual(False, o in cur_logs) else: self.assertEqual(True, o in cur_logs) # # Run printlog and make sure it exits with zero status. # Printlog should not run recovery nor advance the logs. Make sure # it does not. # self.runWt(['-h', self.backup_dir, 'printlog'], outfilename='printlog.out') pr_logs = fnmatch.filter(os.listdir(self.backup_dir), "*Log*") self.assertEqual(cur_logs, pr_logs) def test_ops(self): # print "Creating %s with config '%s'" % (self.uri, self.create_params) self.session.create(self.uri, self.create_params) # Set up the table with entries for 1, 2, 10 and 11. # We use the overwrite config so insert can update as needed. c = self.session.open_cursor(self.uri, None, 'overwrite') c[1] = c[2] = c[10] = c[11] = 1 current = {1: 1, 2: 1, 10: 1, 11: 1} committed = current.copy() reopen = self.conn_list[self.scenario_number % len(self.conn_list)] ops = (self.op1, self.op2, self.op3, self.op4) txns = (self.txn1, self.txn2, self.txn3, self.txn4) # for ok, txn in zip(ops, txns): # print ', '.join('%s(%d)[%s]' % (ok[0], ok[1], txn) for i, ot in enumerate(zip(ops, txns)): ok, txn = ot op, k = ok # Close and reopen the connection and cursor. if reopen == 'reopen': self.reopen_conn() c = self.session.open_cursor(self.uri, None, 'overwrite') self.session.begin_transaction( (self.scenario_number % 2) and 'sync' or None) # Test multiple operations per transaction by always # doing the same operation on key k + 1. k1 = k + 1 # print '%d: %s(%d)[%s]' % (i, ok[0], ok[1], txn) if op == 'insert' or op == 'update': c[k] = c[k1] = i + 2 current[k] = current[k1] = i + 2 elif op == 'remove': c.set_key(k) c.remove() c.set_key(k1) c.remove() if k in current: del current[k] if k1 in current: del current[k1] # print current # Check the state after each operation. self.check_all(current, committed) if txn == 'commit': committed = current.copy() self.session.commit_transaction() elif txn == 'rollback': current = committed.copy() self.session.rollback_transaction() # Check the state after each commit/rollback. self.check_all(current, committed) # check_log() is slow, we don't run it on every scenario. if self.scenario_number % test_txn02.checklog_mod == 0: self.check_log(committed)
class test_compress01(wttest.WiredTigerTestCase): types = [ ('file', dict(uri='file:test_compress01')), ('table', dict(uri='table:test_compress01')), ] compress = [ ('bzip2', dict(compress='bzip2')), ('nop', dict(compress='nop')), ('snappy', dict(compress='snappy')), ('none', dict(compress=None)), ] scenarios = number_scenarios(multiply_scenarios('.', types, compress)) nrecords = 10000 bigvalue = "abcdefghij" * 1000 # Override WiredTigerTestCase, we have extensions. def setUpConnectionOpen(self, dir): conn = wiredtiger.wiredtiger_open( dir, 'create,' + ('error_prefix="%s: ",' % self.shortid()) + self.extensionArg(self.compress)) self.pr(`conn`) return conn # Return the wiredtiger_open extension argument for a shared library. def extensionArg(self, name): if name == None: return '' testdir = os.path.dirname(__file__) extdir = os.path.join(run.wt_builddir, 'ext/compressors') extfile = os.path.join( extdir, name, '.libs', 'libwiredtiger_' + name + '.so') if not os.path.exists(extfile): self.skipTest('compression extension "' + extfile + '" not built') return ',extensions=["' + extfile + '"]' # Create a table, add keys with both big and small values, then verify them. def test_compress(self): # Use relatively small leaf pages to force big values to be overflow # items, but still large enough that we get some compression action. params = 'key_format=S,value_format=S,leaf_page_max=4096' if self.compress != None: params += ',block_compressor=' + self.compress self.session.create(self.uri, params) cursor = self.session.open_cursor(self.uri, None) for idx in xrange(1,self.nrecords): cursor.set_key(`idx`) if idx / 12 == 0: cursor.set_value(`idx` + self.bigvalue) else: cursor.set_value(`idx` + "abcdefg") cursor.insert() cursor.close() # Force the cache to disk, so we read compressed pages from disk. self.reopen_conn() cursor = self.session.open_cursor(self.uri, None) for idx in xrange(1,self.nrecords): cursor.set_key(`idx`) self.assertEqual(cursor.search(), 0) if idx / 12 == 0: self.assertEquals(cursor.get_value(), `idx` + self.bigvalue) else: self.assertEquals(cursor.get_value(), `idx` + "abcdefg") cursor.close()
class test_dump(wttest.WiredTigerTestCase, suite_subprocess): dir = 'dump.dir' # Backup directory name name = 'test_dump' name2 = 'test_dumpb' nentries = 2500 dumpfmt = [('hex', dict(hex=1)), ('txt', dict(hex=0))] keyfmt = [('integer', dict(keyfmt='i')), ('recno', dict(keyfmt='r')), ('string', dict(keyfmt='S'))] types = [('file', dict(uri='file:', config='', lsm=False, populate=simple_populate, populate_check=simple_populate_check)), ('lsm', dict(uri='lsm:', config='', lsm=True, populate=simple_populate, populate_check=simple_populate_check)), ('table-simple', dict(uri='table:', config='', lsm=False, populate=simple_populate, populate_check=simple_populate_check)), ('table-simple-lsm', dict(uri='table:', config='type=lsm', lsm=True, populate=simple_populate, populate_check=simple_populate_check)), ('table-complex', dict(uri='table:', config='', lsm=False, populate=complex_populate, populate_check=complex_populate_check)), ('table-complex-lsm', dict(uri='table:', config='type=lsm', lsm=True, populate=complex_populate, populate_check=complex_populate_check))] scenarios = number_scenarios( multiply_scenarios('.', types, keyfmt, dumpfmt)) # Extract the values lines from the dump output. def value_lines(self, fname): # mode: # 0 == we are in the header # 1 == next line is key # 2 == next line is value mode = 0 lines = [] for line in open(fname).readlines(): if mode == 0: if line == 'Data\n': mode = 1 elif mode == 1: mode = 2 else: # This is a value line, keep it. lines.append(line) mode = 1 return sorted(lines) def compare_dump_values(self, f1, f2): l1 = self.value_lines(f1) l2 = self.value_lines(f2) self.assertEqual(l1, l2) # Dump, re-load and do a content comparison. def test_dump(self): # LSM and column-store isn't a valid combination. if self.lsm and self.keyfmt == 'r': return # Create the object. uri = self.uri + self.name uri2 = self.uri + self.name2 self.populate(self, uri, self.config + ',key_format=' + self.keyfmt, self.nentries) # Dump the object. os.mkdir(self.dir) if self.hex == 1: self.runWt(['dump', '-x', uri], outfilename='dump.out') else: self.runWt(['dump', uri], outfilename='dump.out') # Re-load the object. self.runWt(['-h', self.dir, 'load', '-f', 'dump.out']) # Check the database contents self.runWt(['list'], outfilename='list.out') self.runWt(['-h', self.dir, 'list'], outfilename='list.out.new') s1 = set(open('list.out').read().split()) s2 = set(open('list.out.new').read().split()) self.assertEqual(not s1.symmetric_difference(s2), True) # Check the object's contents self.reopen_conn(self.dir) self.populate_check(self, uri, self.nentries) # Re-load the object again in the original directory. self.reopen_conn('.') self.runWt(['-h', self.dir, 'load', '-f', 'dump.out']) # Check the contents, they shouldn't have changed. self.populate_check(self, uri, self.nentries) # Re-load the object again, but confirm -n (no overwrite) fails. self.runWt(['-h', self.dir, 'load', '-n', '-f', 'dump.out'], errfilename='errfile.out') self.check_non_empty_file('errfile.out') # If there are indices, dump one of them and check the output. if self.populate == complex_populate: indexuri = 'index:' + self.name + ':indx1' hexopt = ['-x'] if self.hex == 1 else [] self.runWt(['-h', self.dir, 'dump'] + hexopt + [indexuri], outfilename='dumpidx.out') self.check_non_empty_file('dumpidx.out') self.compare_dump_values('dump.out', 'dumpidx.out') # Re-load the object into a different table uri shutil.rmtree(self.dir) os.mkdir(self.dir) self.runWt( ['-h', self.dir, 'load', '-r', self.name2, '-f', 'dump.out']) # Check the contents in the new table. self.reopen_conn(self.dir) self.populate_check(self, uri2, self.nentries)
class test_cursor_random(wttest.WiredTigerTestCase): types = [ ('file', dict(type='file:random')), ('table', dict(type='table:random')) ] config = [ ('sample', dict(config='next_random=true,next_random_sample_size=35')), ('not-sample', dict(config='next_random=true')) ] scenarios =number_scenarios(multiply_scenarios('.', types, config)) # Check that opening a random cursor on a row-store returns not-supported # for methods other than next, reconfigure and reset, and next returns # not-found. def test_cursor_random(self): uri = self.type self.session.create(uri, 'key_format=S,value_format=S') cursor = self.session.open_cursor(uri, None, self.config) msg = "/Unsupported cursor/" self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: cursor.compare(cursor), msg) self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: cursor.insert(), msg) self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: cursor.prev(), msg) self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: cursor.remove(), msg) self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: cursor.search(), msg) self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: cursor.search_near(), msg) self.assertRaisesWithMessage( wiredtiger.WiredTigerError, lambda: cursor.update(), msg) self.assertTrue(cursor.next(), wiredtiger.WT_NOTFOUND) self.assertEquals(cursor.reconfigure(), 0) self.assertEquals(cursor.reset(), 0) cursor.close() # Check that next_random works with a single value, repeatedly. def test_cursor_random_single_record(self): uri = self.type self.session.create(uri, 'key_format=S,value_format=S') cursor = self.session.open_cursor(uri, None) cursor['AAA'] = 'BBB' cursor.close() cursor = self.session.open_cursor(uri, None, self.config) for i in range(1,5): self.assertEquals(cursor.next(), 0) self.assertEquals(cursor.get_key(), 'AAA') cursor.close # Check that next_random works in the presence of a larger set of values, # where the values are in an insert list. def test_cursor_random_multiple_insert_records(self): uri = self.type if uri.startswith('file:'): simple_populate(self, uri, 'allocation_size=512,leaf_page_max=512,key_format=S', 100) else: complex_populate(self, uri, 'allocation_size=512,leaf_page_max=512,key_format=S', 100) # In a insert list, next_random always selects the middle key/value # pair, all we can do is confirm cursor.next works. cursor = self.session.open_cursor(uri, None, self.config) self.assertEqual(cursor.next(), 0) # Check that next_random works in the presence of a larger set of values, # where the values are in a disk format page. def cursor_random_multiple_page_records(self, reopen): uri = self.type if uri.startswith('file:'): simple_populate(self, uri, 'allocation_size=512,leaf_page_max=512,key_format=S', 10000) else: complex_populate(self, uri, 'allocation_size=512,leaf_page_max=512,key_format=S', 10000) # Optionally close the connection so everything is forced to disk, # insert lists are an entirely different path in the code. if reopen: self.reopen_conn() cursor = self.session.open_cursor(uri, None, self.config) last = '' match = 0 for i in range(1,10): self.assertEqual(cursor.next(), 0) current = cursor.get_key() if current == last: match += 1 last = current self.assertLess(match, 5, 'next_random did not return random records, too many matches found') def test_cursor_random_multiple_page_records_reopen(self): self.cursor_random_multiple_page_records(1) def test_cursor_random_multiple_page_records(self): self.cursor_random_multiple_page_records(0)
class test_cursor_random_invisible(wttest.WiredTigerTestCase): types = [ ('file', dict(type='file:random')), ('table', dict(type='table:random')) ] config = [ ('sample', dict(config='next_random=true,next_random_sample_size=35')), ('not-sample', dict(config='next_random=true')) ] scenarios =number_scenarios(multiply_scenarios('.', types, config)) def test_cursor_random_invisible_all(self): uri = self.type self.session.create(uri, 'key_format=S,value_format=S') cursor = self.session.open_cursor(uri, None) # Start a transaction. self.session.begin_transaction() for i in range(1, 100): cursor[key_populate(cursor, i)] = value_populate(cursor, i) # Open another session, the updates won't yet be visible, we shouldn't # find anything at all. s = self.conn.open_session() cursor = s.open_cursor(uri, None, self.config) self.assertEqual(cursor.next(), wiredtiger.WT_NOTFOUND) def test_cursor_random_invisible_after(self): uri = self.type self.session.create(uri, 'key_format=S,value_format=S') cursor = self.session.open_cursor(uri, None) # Insert a single leading record. cursor[key_populate(cursor, 1)] = value_populate(cursor, 1) # Start a transaction. self.session.begin_transaction() for i in range(2, 100): cursor[key_populate(cursor, i)] = value_populate(cursor, i) # Open another session, the updates won't yet be visible, we should # return the only possible record. s = self.conn.open_session() cursor = s.open_cursor(uri, None, self.config) self.assertEquals(cursor.next(), 0) self.assertEqual(cursor.get_key(), key_populate(cursor, 1)) def test_cursor_random_invisible_before(self): uri = self.type self.session.create(uri, 'key_format=S,value_format=S') cursor = self.session.open_cursor(uri, None) # Insert a single leading record. cursor[key_populate(cursor, 99)] = value_populate(cursor, 99) # Start a transaction. self.session.begin_transaction() for i in range(2, 100): cursor[key_populate(cursor, i)] = value_populate(cursor, i) # Open another session, the updates won't yet be visible, we should # return the only possible record. s = self.conn.open_session() cursor = s.open_cursor(uri, None, self.config) self.assertEquals(cursor.next(), 0) self.assertEqual(cursor.get_key(), key_populate(cursor, 99))
class test_stat01(wttest.WiredTigerTestCase): """ Test statistics """ config = 'internal_page_max=4K,leaf_page_max=8K' nentries = 25 types = [('file', dict(uri='file:test_stat01.wt')), ('table', dict(uri='table:test_stat01.wt'))] keyfmt = [ ('recno', dict(keyfmt='r')), ('string', dict(keyfmt='S')), ] scenarios = number_scenarios(multiply_scenarios('.', types, keyfmt)) # Override WiredTigerTestCase, we have extensions. def setUpConnectionOpen(self, dir): conn = wiredtiger.wiredtiger_open( dir, 'create,statistics=(all),' + 'error_prefix="%s: "' % self.shortid()) return conn def statstr_to_int(self, str): """ Convert a statistics value string, which may be in either form: '12345' or '33M (33604836)' """ parts = str.rpartition('(') return int(parts[2].rstrip(')')) # Do a quick check of the entries in the the stats cursor, the "lookfor" # string should appear with a minimum value of least "min". def check_stats(self, statcursor, min, lookfor): stringclass = ''.__class__ intclass = (0).__class__ # Reset the cursor, we're called multiple times. statcursor.reset() found = False foundval = 0 for id, desc, valstr, val in statcursor: self.assertEqual(type(desc), stringclass) self.assertEqual(type(valstr), stringclass) self.assertEqual(type(val), intclass) self.assertEqual(val, self.statstr_to_int(valstr)) self.printVerbose( 2, ' stat: \'' + desc + '\', \'' + valstr + '\', ' + str(val)) if desc == lookfor: found = True foundval = val self.assertTrue(found, 'in stats, did not see: ' + lookfor) self.assertTrue(foundval >= min) # Test simple connection statistics. def test_basic_conn_stats(self): # Build an object and force some writes. config = self.config + ',key_format=' + self.keyfmt simple_populate(self, self.uri, config, 1000) self.session.checkpoint(None) # See that we can get a specific stat value by its key and verify its # entry is self-consistent. allstat_cursor = self.session.open_cursor('statistics:', None, None) self.check_stats(allstat_cursor, 10, 'block-manager: blocks written') values = allstat_cursor[stat.conn.block_write] self.assertEqual(values[0], 'block-manager: blocks written') val = self.statstr_to_int(values[1]) self.assertEqual(val, values[2]) allstat_cursor.close() # Test simple object statistics. def test_basic_data_source_stats(self): # Build an object. config = self.config + ',key_format=' + self.keyfmt self.session.create(self.uri, config) cursor = self.session.open_cursor(self.uri, None, None) value = "" for i in range(1, self.nentries): value = value + 1000 * "a" cursor[key_populate(cursor, i)] = value cursor.close() # Force the object to disk, otherwise we can't check the overflow count. self.reopen_conn() # See that we can get a specific stat value by its key and verify its # entry is self-consistent. cursor = self.session.open_cursor('statistics:' + self.uri, None, None) self.check_stats(cursor, 8192, 'btree: maximum leaf page size') self.check_stats(cursor, 4096, 'btree: maximum internal page size') self.check_stats(cursor, 10, 'btree: overflow pages') values = cursor[stat.dsrc.btree_overflow] self.assertEqual(values[0], 'btree: overflow pages') val = self.statstr_to_int(values[1]) self.assertEqual(val, values[2]) cursor.close() cursor = self.session.open_cursor('statistics:' + self.uri, None, "statistics=(size)") values = cursor[stat.dsrc.block_size] self.assertNotEqual(values[2], 0) cursor.close() # Test simple per-checkpoint statistics. def test_checkpoint_stats(self): for name in ('first', 'second', 'third'): config = self.config + ',key_format=' + self.keyfmt helper.simple_populate(self, self.uri, config, self.nentries) self.session.checkpoint('name=' + name) cursor = self.session.open_cursor('statistics:' + self.uri, None, 'checkpoint=' + name) self.assertEqual(cursor[stat.dsrc.btree_entries][2], self.nentries) cursor.close() def test_missing_file_stats(self): self.assertRaises( wiredtiger.WiredTigerError, lambda: self.session.open_cursor('statistics:file:DoesNotExist'))