def test_backup26(self): selective_remove_uri_file_list = [] selective_remove_uri_list = [] selective_uri_list = [] for i in range(0, self.ntables): uri = "table:{0}".format(self.uri + str(i)) dataset = SimpleDataSet(self, uri, 100, key_format="S") dataset.populate() # Append the table uri to the selective backup remove list until the set percentage. # These tables will not be copied over in selective backup. if (i <= int(self.ntables * self.percentage)): selective_remove_uri_list.append(uri) selective_remove_uri_file_list.append( "{0}.wt".format(self.uri + str(i))) else: selective_uri_list.append(uri) self.session.checkpoint() os.mkdir(self.dir) # Now copy the files using full backup. This should not include the tables inside the remove list. all_files = self.take_selective_backup(self.dir, selective_remove_uri_file_list) target_uris = None if self.reverse: target_uris = str(selective_uri_list[::-1]).replace("\'", "\"") else: target_uris = str(selective_uri_list).replace("\'", "\"") starttime = time.time() # After the full backup, open and recover the backup database. backup_conn = self.wiredtiger_open( self.dir, "backup_restore_target={0}".format(target_uris)) elapsed = time.time() - starttime self.pr("%s partial backup has taken %.2f seconds." % (str(self), elapsed)) bkup_session = backup_conn.open_session() # Open the cursor from uris that were not part of the selective backup and expect failure # since file doesn't exist. for remove_uri in selective_remove_uri_list: self.assertRaisesException( wiredtiger.WiredTigerError, lambda: bkup_session.open_cursor(remove_uri, None, None)) # Open the cursors on tables that copied over to the backup directory. They should still # recover properly. for uri in selective_uri_list: c = bkup_session.open_cursor(uri, None, None) ds = SimpleDataSet(self, uri, 100, key_format="S") ds.check_cursor(c) c.close() backup_conn.close()
def test_sharing(self): # FIXME: WT-8235 Enable the test once file containing transaction ids is supported. self.skipTest( 'Sharing the checkpoint file containing transaction ids is not supported' ) ds = SimpleDataSet(self, self.uri, 10) ds.populate() ds.check() self.session.checkpoint() ds.check() # Create a secondary database dir2 = os.path.join(self.home, 'SECONDARY') os.mkdir(dir2) conn2 = self.setUpConnectionOpen(dir2) session2 = conn2.open_session() # Reference the tree from the secondary: metac = self.session.open_cursor('metadata:') metac2 = session2.open_cursor('metadata:', None, 'readonly=0') uri2 = self.uri[:5] + '../' + self.uri[5:] metac2[uri2] = metac[self.uri] + ",readonly=1" cursor2 = session2.open_cursor(uri2) ds.check_cursor(cursor2) cursor2.close() newds = SimpleDataSet(self, self.uri, 10000) newds.populate() newds.check() self.session.checkpoint() newds.check() # Check we can still read from the last checkpoint cursor2 = session2.open_cursor(uri2) ds.check_cursor(cursor2) cursor2.close() # Bump to new checkpoint origmeta = metac[self.uri] checkpoint = re.search(r',checkpoint=\(.+?\)\)', origmeta).group(0)[1:] self.pr('Orig checkpoint: ' + checkpoint) session2.alter(uri2, checkpoint) self.pr('New metadata on secondaery: ' + metac2[uri2]) # Check that we can see the new data cursor2 = session2.open_cursor(uri2) newds.check_cursor(cursor2)
def test_sharing(self): args = 'block_allocation=log-structured' self.verbose( 3, 'Test log-structured allocation with config: ' + args + ' count: ' + str(self.nrecs)) ds = SimpleDataSet(self, self.uri, 10, config=args) ds.populate() ds.check() self.session.checkpoint() ds.check() # Create a secondary database dir2 = os.path.join(self.home, 'SECONDARY') os.mkdir(dir2) conn2 = self.setUpConnectionOpen(dir2) session2 = conn2.open_session() # Reference the tree from the secondary: metac = self.session.open_cursor('metadata:') metac2 = session2.open_cursor('metadata:', None, 'readonly=0') uri2 = self.uri[:5] + '../' + self.uri[5:] metac2[uri2] = metac[self.uri] + ",readonly=1" cursor2 = session2.open_cursor(uri2) ds.check_cursor(cursor2) cursor2.close() newds = SimpleDataSet(self, self.uri, 10000, config=args) newds.populate() newds.check() self.session.checkpoint() newds.check() # Check we can still read from the last checkpoint cursor2 = session2.open_cursor(uri2) ds.check_cursor(cursor2) cursor2.close() # Bump to new checkpoint origmeta = metac[self.uri] checkpoint = re.search(r',checkpoint=\(.+?\)\)', origmeta).group(0)[1:] self.pr('Orig checkpoint: ' + checkpoint) session2.alter(uri2, checkpoint) self.pr('New metadata on secondaery: ' + metac2[uri2]) # Check that we can see the new data cursor2 = session2.open_cursor(uri2) newds.check_cursor(cursor2)