def run_archive_commitlog(self, restore_point_in_time=False, restore_archived_commitlog=True, archive_active_commitlogs=False): """Run archive commit log restoration test""" cluster = self.cluster cluster.populate(1) (node1, ) = cluster.nodelist() # Create a temp directory for storing commitlog archives: tmp_commitlog = safe_mkdtemp() debug("tmp_commitlog: " + tmp_commitlog) # Edit commitlog_archiving.properties and set an archive # command: replace_in_file( os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^archive_command=.*$', 'archive_command=cp %path {tmp_commitlog}/%name'.format( tmp_commitlog=tmp_commitlog))]) cluster.start() session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 1) session.execute( 'CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);') debug("Writing first 30,000 rows...") self.insert_rows(session, 0, 30000) # Record when this first set of inserts finished: insert_cutoff_times = [time.gmtime()] # Delete all commitlog backups so far: for f in glob.glob(tmp_commitlog + "/*"): os.remove(f) snapshot_dir = self.make_snapshot(node1, 'ks', 'cf', 'basic') if self.cluster.version() >= '3.0': system_ks_snapshot_dir = self.make_snapshot( node1, 'system_schema', 'keyspaces', 'keyspaces') else: system_ks_snapshot_dir = self.make_snapshot( node1, 'system', 'schema_keyspaces', 'keyspaces') if self.cluster.version() >= '3.0': system_col_snapshot_dir = self.make_snapshot( node1, 'system_schema', 'columns', 'columns') else: system_col_snapshot_dir = self.make_snapshot( node1, 'system', 'schema_columns', 'columns') if self.cluster.version() >= '3.0': system_ut_snapshot_dir = self.make_snapshot( node1, 'system_schema', 'types', 'usertypes') else: system_ut_snapshot_dir = self.make_snapshot( node1, 'system', 'schema_usertypes', 'usertypes') if self.cluster.version() >= '3.0': system_cfs_snapshot_dir = self.make_snapshot( node1, 'system_schema', 'tables', 'cfs') else: system_cfs_snapshot_dir = self.make_snapshot( node1, 'system', 'schema_columnfamilies', 'cfs') try: # Write more data: debug("Writing second 30,000 rows...") self.insert_rows(session, 30000, 60000) node1.flush() time.sleep(10) # Record when this second set of inserts finished: insert_cutoff_times.append(time.gmtime()) debug("Writing final 5,000 rows...") self.insert_rows(session, 60000, 65000) # Record when the third set of inserts finished: insert_cutoff_times.append(time.gmtime()) rows = session.execute('SELECT count(*) from ks.cf') # Make sure we have the same amount of rows as when we snapshotted: self.assertEqual(rows[0][0], 65000) # Check that there are at least one commit log backed up that # is not one of the active commit logs: commitlog_dir = os.path.join(node1.get_path(), 'commitlogs') debug("node1 commitlog dir: " + commitlog_dir) self.assertTrue( len( set(os.listdir(tmp_commitlog)) - set(os.listdir(commitlog_dir))) > 0) cluster.flush() cluster.compact() node1.drain() if archive_active_commitlogs: # restart the node which causes the active commitlogs to be archived node1.stop() node1.start(wait_for_binary_proto=True) # Destroy the cluster cluster.stop() self.copy_logs(name=self.id().split(".")[0] + "_pre-restore") self._cleanup_cluster() cluster = self.cluster = self._get_cluster() cluster.populate(1) node1, = cluster.nodelist() # Restore schema from snapshots: if self.cluster.version() >= '3.0': self.restore_snapshot(system_ks_snapshot_dir, node1, 'system_schema', 'keyspaces', 'keyspaces') else: self.restore_snapshot(system_ks_snapshot_dir, node1, 'system', 'schema_keyspaces', 'keyspaces') if self.cluster.version() >= '3.0': self.restore_snapshot(system_col_snapshot_dir, node1, 'system_schema', 'columns', 'columns') else: self.restore_snapshot(system_col_snapshot_dir, node1, 'system', 'schema_columns', 'columns') if self.cluster.version() >= '3.0': self.restore_snapshot(system_ut_snapshot_dir, node1, 'system_schema', 'types', 'usertypes') else: self.restore_snapshot(system_ut_snapshot_dir, node1, 'system', 'schema_usertypes', 'usertypes') if self.cluster.version() >= '3.0': self.restore_snapshot(system_cfs_snapshot_dir, node1, 'system_schema', 'tables', 'cfs') else: self.restore_snapshot(system_cfs_snapshot_dir, node1, 'system', 'schema_columnfamilies', 'cfs') self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf', 'basic') cluster.start(wait_for_binary_proto=True) session = self.patient_cql_connection(node1) node1.nodetool('refresh ks cf') rows = session.execute('SELECT count(*) from ks.cf') # Make sure we have the same amount of rows as when we snapshotted: self.assertEqual(rows[0][0], 30000) # Edit commitlog_archiving.properties. Remove the archive # command and set a restore command and restore_directories: if restore_archived_commitlog: replace_in_file( os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^archive_command=.*$', 'archive_command='), (r'^restore_command=.*$', 'restore_command=cp -f %from %to'), (r'^restore_directories=.*$', 'restore_directories={tmp_commitlog}'.format( tmp_commitlog=tmp_commitlog))]) if restore_point_in_time: restore_time = time.strftime("%Y:%m:%d %H:%M:%S", insert_cutoff_times[1]) replace_in_file( os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^restore_point_in_time=.*$', 'restore_point_in_time={restore_time}'.format( **locals()))]) debug("Restarting node1..") node1.stop() node1.start(wait_for_binary_proto=True) node1.nodetool('flush') node1.nodetool('compact') session = self.patient_cql_connection(node1) rows = session.execute('SELECT count(*) from ks.cf') # Now we should have 30000 rows from the snapshot + 30000 rows # from the commitlog backups: if not restore_archived_commitlog: self.assertEqual(rows[0][0], 30000) elif restore_point_in_time: self.assertEqual(rows[0][0], 60000) else: self.assertEqual(rows[0][0], 65000) finally: # clean up debug("removing snapshot_dir: " + snapshot_dir) shutil.rmtree(snapshot_dir) debug("removing snapshot_dir: " + system_ks_snapshot_dir) shutil.rmtree(system_ks_snapshot_dir) debug("removing snapshot_dir: " + system_cfs_snapshot_dir) shutil.rmtree(system_cfs_snapshot_dir) debug("removing snapshot_dir: " + system_ut_snapshot_dir) shutil.rmtree(system_ut_snapshot_dir) debug("removing snapshot_dir: " + system_col_snapshot_dir) shutil.rmtree(system_col_snapshot_dir) debug("removing tmp_commitlog: " + tmp_commitlog) shutil.rmtree(tmp_commitlog)
def run_archive_commitlog(self, restore_point_in_time=False, restore_archived_commitlog=True, archive_active_commitlogs=False): """Run archive commit log restoration test""" cluster = self.cluster cluster.populate(1) (node1,) = cluster.nodelist() # Create a temp directory for storing commitlog archives: tmp_commitlog = tempfile.mkdtemp() debug("tmp_commitlog: " + tmp_commitlog) # Edit commitlog_archiving.properties and set an archive # command: replace_in_file(os.path.join(node1.get_path(),'conf','commitlog_archiving.properties'), [(r'^archive_command=.*$', 'archive_command=cp %path {tmp_commitlog}/%name'.format( tmp_commitlog=tmp_commitlog))]) cluster.start() cursor = self.patient_cql_connection(node1) self.create_ks(cursor, 'ks', 1) cursor.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);') debug("Writing first 30,000 rows...") self.insert_rows(cursor, 0, 30000) # Record when this first set of inserts finished: insert_cutoff_times = [time.gmtime()] # Delete all commitlog backups so far: for f in glob.glob(tmp_commitlog+"/*"): os.remove(f) snapshot_dir = self.make_snapshot(node1, 'ks', 'cf', 'basic') system_ks_snapshot_dir = self.make_snapshot(node1, 'system', 'schema_keyspaces', 'keyspaces') system_col_snapshot_dir = self.make_snapshot(node1, 'system', 'schema_columns', 'columns') if self.cluster.version() >= '2.1': system_ut_snapshot_dir = self.make_snapshot(node1, 'system', 'schema_usertypes', 'usertypes') system_cfs_snapshot_dir = self.make_snapshot(node1, 'system', 'schema_columnfamilies', 'cfs') try: # Write more data: debug("Writing second 30,000 rows...") self.insert_rows(cursor, 30000, 60000) node1.flush() time.sleep(10) # Record when this second set of inserts finished: insert_cutoff_times.append(time.gmtime()) debug("Writing final 5,000 rows...") self.insert_rows(cursor,60000, 65000) # Record when the third set of inserts finished: insert_cutoff_times.append(time.gmtime()) rows = cursor.execute('SELECT count(*) from ks.cf') # Make sure we have the same amount of rows as when we snapshotted: self.assertEqual(rows[0][0], 65000) # Check that there are at least one commit log backed up that # is not one of the active commit logs: commitlog_dir = os.path.join(node1.get_path(), 'commitlogs') debug("node1 commitlog dir: " + commitlog_dir) self.assertTrue(len(set(os.listdir(tmp_commitlog)) - set(os.listdir(commitlog_dir))) > 0) cluster.flush() cluster.compact() node1.drain() if archive_active_commitlogs: # restart the node which causes the active commitlogs to be archived node1.stop() node1.start(wait_for_binary_proto=True) # Destroy the cluster cluster.stop() self.copy_logs(name=self.id().split(".")[0]+"_pre-restore") self._cleanup_cluster() cluster = self.cluster = self._get_cluster() cluster.populate(1) node1, = cluster.nodelist() # Restore schema from snapshots: self.restore_snapshot(system_ks_snapshot_dir, node1, 'system', 'schema_keyspaces', 'keyspaces') self.restore_snapshot(system_col_snapshot_dir, node1, 'system', 'schema_columns', 'columns') if self.cluster.version() >= '2.1': self.restore_snapshot(system_ut_snapshot_dir, node1, 'system', 'schema_usertypes', 'usertypes') self.restore_snapshot(system_cfs_snapshot_dir, node1, 'system', 'schema_columnfamilies', 'cfs') self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf', 'basic') cluster.start(wait_for_binary_proto=True) cursor = self.patient_cql_connection(node1) node1.nodetool('refresh ks cf') rows = cursor.execute('SELECT count(*) from ks.cf') # Make sure we have the same amount of rows as when we snapshotted: self.assertEqual(rows[0][0], 30000) # Edit commitlog_archiving.properties. Remove the archive # command and set a restore command and restore_directories: if restore_archived_commitlog: replace_in_file(os.path.join(node1.get_path(),'conf','commitlog_archiving.properties'), [(r'^archive_command=.*$', 'archive_command='), (r'^restore_command=.*$', 'restore_command=cp -f %from %to'), (r'^restore_directories=.*$', 'restore_directories={tmp_commitlog}'.format( tmp_commitlog=tmp_commitlog))]) if restore_point_in_time: restore_time = time.strftime("%Y:%m:%d %H:%M:%S", insert_cutoff_times[1]) replace_in_file(os.path.join(node1.get_path(),'conf','commitlog_archiving.properties'), [(r'^restore_point_in_time=.*$', 'restore_point_in_time={restore_time}'.format(**locals()))]) debug("Restarting node1..") node1.stop() node1.start(wait_for_binary_proto=True) node1.nodetool('flush') node1.nodetool('compact') cursor = self.patient_cql_connection(node1) rows = cursor.execute('SELECT count(*) from ks.cf') # Now we should have 30000 rows from the snapshot + 30000 rows # from the commitlog backups: if not restore_archived_commitlog: self.assertEqual(rows[0][0], 30000) elif restore_point_in_time: self.assertEqual(rows[0][0], 60000) else: self.assertEqual(rows[0][0], 65000) finally: # clean up debug("removing snapshot_dir: " + snapshot_dir) shutil.rmtree(snapshot_dir) debug("removing snapshot_dir: " + system_ks_snapshot_dir) shutil.rmtree(system_ks_snapshot_dir) debug("removing snapshot_dir: " + system_cfs_snapshot_dir) shutil.rmtree(system_cfs_snapshot_dir) if self.cluster.version() >= '2.1': debug("removing snapshot_dir: " + system_ut_snapshot_dir) shutil.rmtree(system_ut_snapshot_dir) debug("removing snapshot_dir: " + system_col_snapshot_dir) shutil.rmtree(system_col_snapshot_dir) debug("removing tmp_commitlog: " + tmp_commitlog) shutil.rmtree(tmp_commitlog)
print('Generation') print('=================') print('') execute_command( 'Create folders', 'mkdir -p server-certificates/{0}/configs server-certificates/{0}/private-keys server-certificates/{0}/certificates server-certificates/{0}/certificate-chains server-certificates/{0}/certificate-signing-requests' .format(signing_ca_name)) execute_command( 'Copy configuration template', 'cp {0}/templates/server-certificate.conf server-certificates/{1}/configs/{2}.conf' .format(script_dir, signing_ca_name, server_certificate_name)) replace_in_file( 'Replace organization name', 'server-certificates/{0}/configs/{1}.conf'.format(signing_ca_name, server_certificate_name), 'ORGANIZATION_NAME', organization_name) replace_in_file( 'Replace common name', 'server-certificates/{0}/configs/{1}.conf'.format(signing_ca_name, server_certificate_name), 'COMMON_NAME', common_name) replace_in_file( 'Replace domain components', 'server-certificates/{0}/configs/{1}.conf'.format(signing_ca_name, server_certificate_name), 'DOMAIN_COMPONENTS', '\n'.join([ '{0}.domainComponent = "{1}"'.format(index, domain_component) for index, domain_component in enumerate(domain_components) ]))
response = input('Do you want to create the root certificate authority? [y,N] ') print('') if response != 'y' and response != 'Y': quit(-1) # Generation print('Generation') print('=================') print('') execute_command('Create folders', 'mkdir -p ca/{0} ca/{0}/archive ca/{0}/db'.format(signing_ca_name)) execute_command('Create database structure (index.db)', 'cp /dev/null ca/{0}/db/index.db'.format(signing_ca_name)) execute_command('Create database structure (index.db.attr)', 'cp /dev/null ca/{0}/db/index.db.attr'.format(signing_ca_name)) execute_command('Create database structure (crt.srl)', 'echo 01 > ca/{0}/db/crt.srl'.format(signing_ca_name)) execute_command('Create database structure (crl.srl)', 'echo 01 > ca/{0}/db/crl.srl'.format(signing_ca_name)) execute_command('Copy configuration template', 'cp {0}/templates/signing-ca.conf ca/{1}/{1}.conf'.format(script_dir, signing_ca_name)) replace_in_file('Replace signing ca name', 'ca/{0}/{0}.conf'.format(signing_ca_name), 'SIGNING_CA_NAME', signing_ca_name) replace_in_file('Replace organization name', 'ca/{0}/{0}.conf'.format(signing_ca_name), 'ORGANIZATION_NAME', organization_name) replace_in_file('Replace common name', 'ca/{0}/{0}.conf'.format(signing_ca_name), 'COMMON_NAME', common_name) replace_in_file('Replace domain components', 'ca/{0}/{0}.conf'.format(signing_ca_name), 'DOMAIN_COMPONENTS', '\n'.join(['{0}.domainComponent = "{1}"'.format(index, domain_component) for index, domain_component in enumerate(domain_components)])) execute_command('Generate private key and certificate signing request', 'openssl req -new -config ca/{0}/{0}.conf -out ca/{0}/certificate-signing-request.csr -keyout ca/{0}/private-key.key -passout stdin'.format(signing_ca_name, signing_ca_passphrase), stdin=signing_ca_passphrase) execute_command('Generate and sign certificate', 'openssl ca -batch -config ca/{0}/{0}.conf -in ca/{1}/certificate-signing-request.csr -out ca/{1}/certificate.crt -extensions signing_ca_ext -passin stdin'.format(root_ca_name, signing_ca_name), stdin=root_ca_passphrase) execute_command('Delete certificate signing request', 'rm -rf ca/{0}/certificate-signing-request.csr'.format(signing_ca_name)) print('')
def test_archive_and_restore_commitlog_repeatedly(self): """ @jira_ticket CASSANDRA-10593 Run archive commit log restoration test repeatedly to make sure it is idempotent and doesn't fail if done repeatedly """ cluster = self.cluster cluster.populate(1) node1 = cluster.nodelist()[0] # Create a temp directory for storing commitlog archives: tmp_commitlog = safe_mkdtemp() debug("tmp_commitlog: {}".format(tmp_commitlog)) # Edit commitlog_archiving.properties and set an archive # command: replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^archive_command=.*$', 'archive_command=ln %path {tmp_commitlog}/%name'.format( tmp_commitlog=tmp_commitlog)), (r'^restore_command=.*$', 'restore_command=cp -f %from %to'), (r'^restore_directories=.*$', 'restore_directories={tmp_commitlog}'.format( tmp_commitlog=tmp_commitlog))]) cluster.start(wait_for_binary_proto=True) debug("Creating initial connection") session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 1) session.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);') debug("Writing 30,000 rows...") self.insert_rows(session, 0, 60000) try: # Check that there are at least one commit log backed up that # is not one of the active commit logs: commitlog_dir = os.path.join(node1.get_path(), 'commitlogs') debug("node1 commitlog dir: " + commitlog_dir) cluster.flush() self.assertGreater(len(set(os.listdir(tmp_commitlog)) - set(os.listdir(commitlog_dir))), 0) debug("Flushing and doing first restart") cluster.compact() node1.drain() # restart the node which causes the active commitlogs to be archived node1.stop() node1.start(wait_for_binary_proto=True) debug("Stopping and second restart") node1.stop() node1.start(wait_for_binary_proto=True) # Shouldn't be any additional data since it's replaying the same stuff repeatedly session = self.patient_cql_connection(node1) rows = session.execute('SELECT count(*) from ks.cf') self.assertEqual(rows[0][0], 60000) finally: debug("removing tmp_commitlog: " + tmp_commitlog) shutil.rmtree(tmp_commitlog)
def test_archive_and_restore_commitlog_repeatedly(self): """ @jira_ticket CASSANDRA-10593 Run archive commit log restoration test repeatedly to make sure it is idempotent and doesn't fail if done repeatedly """ cluster = self.cluster cluster.populate(1) node1 = cluster.nodelist()[0] # Create a temp directory for storing commitlog archives: tmp_commitlog = safe_mkdtemp() debug("tmp_commitlog: {}".format(tmp_commitlog)) # Edit commitlog_archiving.properties and set an archive # command: replace_in_file( os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^archive_command=.*$', 'archive_command=ln %path {tmp_commitlog}/%name'.format( tmp_commitlog=tmp_commitlog)), (r'^restore_command=.*$', 'restore_command=cp -f %from %to'), (r'^restore_directories=.*$', 'restore_directories={tmp_commitlog}'.format( tmp_commitlog=tmp_commitlog))]) cluster.start(wait_for_binary_proto=True) debug("Creating initial connection") session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 1) session.execute( 'CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);') debug("Writing 30,000 rows...") self.insert_rows(session, 0, 60000) try: # Check that there are at least one commit log backed up that # is not one of the active commit logs: commitlog_dir = os.path.join(node1.get_path(), 'commitlogs') debug("node1 commitlog dir: " + commitlog_dir) cluster.flush() self.assertNotEqual( set(os.listdir(tmp_commitlog)) - set(os.listdir(commitlog_dir)), set()) debug("Flushing and doing first restart") cluster.compact() node1.drain() # restart the node which causes the active commitlogs to be archived node1.stop() node1.start(wait_for_binary_proto=True) debug("Stopping and second restart") node1.stop() node1.start(wait_for_binary_proto=True) # Shouldn't be any additional data since it's replaying the same stuff repeatedly session = self.patient_cql_connection(node1) rows = session.execute('SELECT count(*) from ks.cf') self.assertEqual(rows[0][0], 60000) finally: debug("removing tmp_commitlog: " + tmp_commitlog) shutil.rmtree(tmp_commitlog)
def run_archive_commitlog(self, restore_point_in_time=False, restore_archived_commitlog=True, archive_active_commitlogs=False): """Run archive commit log restoration test""" cluster = self.cluster cluster.populate(1) (node1, ) = cluster.nodelist() # Create a temp directory for storing commitlog archives: tmp_commitlog = tempfile.mkdtemp() debug("tmp_commitlog: " + tmp_commitlog) # Edit commitlog_archiving.properties and set an archive # command: replace_in_file( os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^archive_command=.*$', 'archive_command=/bin/cp %path {tmp_commitlog}/%name'.format( tmp_commitlog=tmp_commitlog))]) cluster.start() cursor = self.patient_cql_connection(node1).cursor() self.create_ks(cursor, 'ks', 1) cursor.execute( 'CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);') debug("Writing first 30,000 rows...") self.insert_rows(cursor, 0, 30000) # Record when this first set of inserts finished: insert_cutoff_times = [time.gmtime()] # Delete all commitlog backups so far: os.system('rm {tmp_commitlog}/*'.format(tmp_commitlog=tmp_commitlog)) snapshot_dir = self.make_snapshot(node1, 'ks', 'cf', 'basic') # Write more data: debug("Writing second 30,000 rows...") self.insert_rows(cursor, 30000, 60000) node1.flush() time.sleep(10) # Record when this second set of inserts finished: insert_cutoff_times.append(time.gmtime()) debug("Writing final 5,000 rows...") self.insert_rows(cursor, 60000, 65000) # Record when the third set of inserts finished: insert_cutoff_times.append(time.gmtime()) cursor.execute('SELECT count(*) from ks.cf') # Make sure we have the same amount of rows as when we snapshotted: self.assertEqual(cursor.fetchone()[0], 65000) # Check that there are at least one commit log backed up that # is not one of the active commit logs: commitlog_dir = os.path.join(node1.get_path(), 'commitlogs') debug("node1 commitlog dir: " + commitlog_dir) self.assertTrue( len( set(os.listdir(tmp_commitlog)) - set(os.listdir(commitlog_dir))) > 0) cluster.flush() cluster.compact() node1.drain() if archive_active_commitlogs: # Copy the active commitlogs to the backup directory: for f in glob.glob(commitlog_dir + "/*"): shutil.copy2(f, tmp_commitlog) # Destroy the cluster cluster.stop() self.copy_logs(name=self.id().split(".")[0] + "_pre-restore") cluster = self.cluster = self._get_cluster() cluster.populate(1) (node1, ) = cluster.nodelist() cluster.start() cursor = self.patient_cql_connection(node1).cursor() self.create_ks(cursor, 'ks', 1) cursor.execute( 'CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);') # Restore from snapshot: self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf') cursor.execute('SELECT count(*) from ks.cf') # Make sure we have the same amount of rows as when we snapshotted: self.assertEqual(cursor.fetchone()[0], 30000) # Edit commitlog_archiving.properties. Remove the archive # command and set a restore command and restore_directories: if restore_archived_commitlog: replace_in_file( os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^archive_command=.*$', 'archive_command='), (r'^restore_command=.*$', 'restore_command=cp -f %from %to'), (r'^restore_directories=.*$', 'restore_directories={tmp_commitlog}'.format( tmp_commitlog=tmp_commitlog))]) if restore_point_in_time: restore_time = time.strftime("%Y:%m:%d %H:%M:%S", insert_cutoff_times[1]) replace_in_file( os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^restore_point_in_time=.*$', 'restore_point_in_time={restore_time}'.format( **locals()))]) debug("Restarting node1..") node1.stop() node1.start() node1.nodetool('flush') node1.nodetool('compact') cursor = self.patient_cql_connection(node1).cursor() cursor.execute('SELECT count(*) from ks.cf') # Now we should have 30000 rows from the snapshot + 30000 rows # from the commitlog backups: if not restore_archived_commitlog: self.assertEqual(cursor.fetchone()[0], 30000) elif restore_point_in_time: self.assertEqual(cursor.fetchone()[0], 60000) else: self.assertEqual(cursor.fetchone()[0], 65000) shutil.rmtree(snapshot_dir)