def make_snapshot(self, node, ks, cf, name): debug("Making snapshot....") node.flush() snapshot_cmd = 'snapshot {ks} -cf {cf} -t {name}'.format(**locals()) debug("Running snapshot cmd: {snapshot_cmd}".format(snapshot_cmd=snapshot_cmd)) node.nodetool(snapshot_cmd) tmpdir = safe_mkdtemp() os.mkdir(os.path.join(tmpdir, ks)) os.mkdir(os.path.join(tmpdir, ks, cf)) # Find the snapshot dir, it's different in various C* x = 0 for data_dir in node.data_directories(): snapshot_dir = "{data_dir}/{ks}/{cf}/snapshots/{name}".format(**locals()) if not os.path.isdir(snapshot_dir): snapshot_dirs = glob.glob("{data_dir}/{ks}/{cf}-*/snapshots/{name}".format(**locals())) if len(snapshot_dirs) > 0: snapshot_dir = snapshot_dirs[0] else: continue debug("snapshot_dir is : " + snapshot_dir) debug("snapshot copy is : " + tmpdir) # Copy files from the snapshot dir to existing temp dir distutils.dir_util.copy_tree(str(snapshot_dir), os.path.join(tmpdir, str(x), ks, cf)) x += 1 return tmpdir
def make_snapshot(self, node, ks, cf, name): debug("Making snapshot....") node.flush() snapshot_cmd = 'snapshot {ks} -cf {cf} -t {name}'.format(**locals()) debug("Running snapshot cmd: {snapshot_cmd}".format( snapshot_cmd=snapshot_cmd)) node.nodetool(snapshot_cmd) tmpdir = safe_mkdtemp() os.mkdir(os.path.join(tmpdir, ks)) os.mkdir(os.path.join(tmpdir, ks, cf)) node_dir = node.get_path() # Find the snapshot dir, it's different in various C* versions: snapshot_dir = "{node_dir}/data/{ks}/{cf}/snapshots/{name}".format( **locals()) if not os.path.isdir(snapshot_dir): snapshot_dir = glob.glob( "{node_dir}/data/{ks}/{cf}-*/snapshots/{name}".format( **locals()))[0] debug("snapshot_dir is : " + snapshot_dir) debug("snapshot copy is : " + tmpdir) # Copy files from the snapshot dir to existing temp dir distutils.dir_util.copy_tree(str(snapshot_dir), os.path.join(tmpdir, ks, cf)) return tmpdir
def make_snapshot(self, node, ks, cf, name): debug("Making snapshot....") node.flush() snapshot_cmd = 'snapshot {ks} -cf {cf} -t {name}'.format(ks=ks, cf=cf, name=name) debug("Running snapshot cmd: {snapshot_cmd}".format( snapshot_cmd=snapshot_cmd)) node.nodetool(snapshot_cmd) tmpdir = safe_mkdtemp() os.mkdir(os.path.join(tmpdir, ks)) os.mkdir(os.path.join(tmpdir, ks, cf)) # Find the snapshot dir, it's different in various C* x = 0 for data_dir in node.data_directories(): snapshot_dir = "{data_dir}/{ks}/{cf}/snapshots/{name}".format( data_dir=data_dir, ks=ks, cf=cf, name=name) if not os.path.isdir(snapshot_dir): snapshot_dirs = glob.glob( "{data_dir}/{ks}/{cf}-*/snapshots/{name}".format( data_dir=data_dir, ks=ks, cf=cf, name=name)) if len(snapshot_dirs) > 0: snapshot_dir = snapshot_dirs[0] else: continue debug("snapshot_dir is : " + snapshot_dir) debug("snapshot copy is : " + tmpdir) # Copy files from the snapshot dir to existing temp dir distutils.dir_util.copy_tree(str(snapshot_dir), os.path.join(tmpdir, str(x), ks, cf)) x += 1 return tmpdir
def make_snapshot(self, node, ks, cf, name): debug("Making snapshot....") node.flush() snapshot_cmd = 'snapshot {ks} -cf {cf} -t {name}'.format(**locals()) debug("Running snapshot cmd: {snapshot_cmd}".format(snapshot_cmd=snapshot_cmd)) node.nodetool(snapshot_cmd) tmpdir = safe_mkdtemp() node_dir = node.get_path() # Copy files from the snapshot dir to existing temp dir distutils.dir_util.copy_tree(os.path.join(node.get_path(), 'data', ks), tmpdir) return tmpdir
def make_snapshot(self, node, ks, cf, name): debug("Making snapshot....") node.flush() snapshot_cmd = 'snapshot {ks} -cf {cf} -t {name}'.format(**locals()) debug("Running snapshot cmd: {snapshot_cmd}".format( snapshot_cmd=snapshot_cmd)) node.nodetool(snapshot_cmd) tmpdir = safe_mkdtemp() node_dir = node.get_path() # Copy files from the snapshot dir to existing temp dir distutils.dir_util.copy_tree(os.path.join(node.get_path(), 'data', ks), tmpdir) return tmpdir
def make_snapshot(self, node, ks, cf, name): debug("Making snapshot....") node.flush() snapshot_cmd = 'snapshot {ks} -cf {cf} -t {name}'.format(**locals()) debug("Running snapshot cmd: {snapshot_cmd}".format(snapshot_cmd=snapshot_cmd)) node.nodetool(snapshot_cmd) tmpdirs = [] base_tmpdir = safe_mkdtemp() for x in xrange(0, self.cluster.data_dir_count): tmpdir = os.path.join(base_tmpdir, str(x)) os.mkdir(tmpdir) # Copy files from the snapshot dir to existing temp dir distutils.dir_util.copy_tree(os.path.join(node.get_path(), 'data{0}'.format(x), ks), tmpdir) tmpdirs.append(tmpdir) return tmpdirs
def make_snapshot(self, node, ks, cf, name): debug("Making snapshot....") node.flush() snapshot_cmd = 'snapshot {ks} -cf {cf} -t {name}'.format(**locals()) debug("Running snapshot cmd: {snapshot_cmd}".format(snapshot_cmd=snapshot_cmd)) node.nodetool(snapshot_cmd) tmpdir = safe_mkdtemp() os.mkdir(os.path.join(tmpdir, ks)) os.mkdir(os.path.join(tmpdir, ks, cf)) node_dir = node.get_path() # Find the snapshot dir, it's different in various C* versions: snapshot_dir = "{node_dir}/data/{ks}/{cf}/snapshots/{name}".format(**locals()) if not os.path.isdir(snapshot_dir): snapshot_dir = glob.glob("{node_dir}/data/{ks}/{cf}-*/snapshots/{name}".format(**locals()))[0] debug("snapshot_dir is : " + snapshot_dir) debug("snapshot copy is : " + tmpdir) # Copy files from the snapshot dir to existing temp dir distutils.dir_util.copy_tree(str(snapshot_dir), os.path.join(tmpdir, ks, cf)) return tmpdir
def run_archive_commitlog(self, restore_point_in_time=False, restore_archived_commitlog=True, archive_active_commitlogs=False): """Run archive commit log restoration test""" cluster = self.cluster cluster.populate(1) (node1,) = cluster.nodelist() # Create a temp directory for storing commitlog archives: tmp_commitlog = safe_mkdtemp() debug("tmp_commitlog: " + tmp_commitlog) # Edit commitlog_archiving.properties and set an archive # command: replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^archive_command=.*$', 'archive_command=cp %path {tmp_commitlog}/%name'.format( tmp_commitlog=tmp_commitlog))]) cluster.start() session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 1) session.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);') debug("Writing first 30,000 rows...") self.insert_rows(session, 0, 30000) # Record when this first set of inserts finished: insert_cutoff_times = [time.gmtime()] # Delete all commitlog backups so far: for f in glob.glob(tmp_commitlog + "/*"): os.remove(f) snapshot_dir = self.make_snapshot(node1, 'ks', 'cf', 'basic') if self.cluster.version() >= '3.0': system_ks_snapshot_dir = self.make_snapshot(node1, 'system_schema', 'keyspaces', 'keyspaces') else: system_ks_snapshot_dir = self.make_snapshot(node1, 'system', 'schema_keyspaces', 'keyspaces') if self.cluster.version() >= '3.0': system_col_snapshot_dir = self.make_snapshot(node1, 'system_schema', 'columns', 'columns') else: system_col_snapshot_dir = self.make_snapshot(node1, 'system', 'schema_columns', 'columns') if self.cluster.version() >= '3.0': system_ut_snapshot_dir = self.make_snapshot(node1, 'system_schema', 'types', 'usertypes') elif self.cluster.version() >= '2.1': system_ut_snapshot_dir = self.make_snapshot(node1, 'system', 'schema_usertypes', 'usertypes') if self.cluster.version() >= '3.0': system_cfs_snapshot_dir = self.make_snapshot(node1, 'system_schema', 'tables', 'cfs') else: system_cfs_snapshot_dir = self.make_snapshot(node1, 'system', 'schema_columnfamilies', 'cfs') try: # Write more data: debug("Writing second 30,000 rows...") self.insert_rows(session, 30000, 60000) node1.flush() time.sleep(10) # Record when this second set of inserts finished: insert_cutoff_times.append(time.gmtime()) debug("Writing final 5,000 rows...") self.insert_rows(session, 60000, 65000) # Record when the third set of inserts finished: insert_cutoff_times.append(time.gmtime()) rows = session.execute('SELECT count(*) from ks.cf') # Make sure we have the same amount of rows as when we snapshotted: self.assertEqual(rows[0][0], 65000) # Check that there are at least one commit log backed up that # is not one of the active commit logs: commitlog_dir = os.path.join(node1.get_path(), 'commitlogs') debug("node1 commitlog dir: " + commitlog_dir) self.assertTrue(len(set(os.listdir(tmp_commitlog)) - set(os.listdir(commitlog_dir))) > 0) cluster.flush() cluster.compact() node1.drain() if archive_active_commitlogs: # restart the node which causes the active commitlogs to be archived node1.stop() node1.start(wait_for_binary_proto=True) # Destroy the cluster cluster.stop() self.copy_logs(name=self.id().split(".")[0] + "_pre-restore") self._cleanup_cluster() cluster = self.cluster = self._get_cluster() cluster.populate(1) node1, = cluster.nodelist() # Restore schema from snapshots: if self.cluster.version() >= '3.0': self.restore_snapshot(system_ks_snapshot_dir, node1, 'system_schema', 'keyspaces', 'keyspaces') else: self.restore_snapshot(system_ks_snapshot_dir, node1, 'system', 'schema_keyspaces', 'keyspaces') if self.cluster.version() >= '3.0': self.restore_snapshot(system_col_snapshot_dir, node1, 'system_schema', 'columns', 'columns') else: self.restore_snapshot(system_col_snapshot_dir, node1, 'system', 'schema_columns', 'columns') if self.cluster.version() >= '3.0': self.restore_snapshot(system_ut_snapshot_dir, node1, 'system_schema', 'types', 'usertypes') elif self.cluster.version() >= '2.1': self.restore_snapshot(system_ut_snapshot_dir, node1, 'system', 'schema_usertypes', 'usertypes') if self.cluster.version() >= '3.0': self.restore_snapshot(system_cfs_snapshot_dir, node1, 'system_schema', 'tables', 'cfs') else: self.restore_snapshot(system_cfs_snapshot_dir, node1, 'system', 'schema_columnfamilies', 'cfs') self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf', 'basic') cluster.start(wait_for_binary_proto=True) session = self.patient_cql_connection(node1) node1.nodetool('refresh ks cf') rows = session.execute('SELECT count(*) from ks.cf') # Make sure we have the same amount of rows as when we snapshotted: self.assertEqual(rows[0][0], 30000) # Edit commitlog_archiving.properties. Remove the archive # command and set a restore command and restore_directories: if restore_archived_commitlog: replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^archive_command=.*$', 'archive_command='), (r'^restore_command=.*$', 'restore_command=cp -f %from %to'), (r'^restore_directories=.*$', 'restore_directories={tmp_commitlog}'.format( tmp_commitlog=tmp_commitlog))]) if restore_point_in_time: restore_time = time.strftime("%Y:%m:%d %H:%M:%S", insert_cutoff_times[1]) replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^restore_point_in_time=.*$', 'restore_point_in_time={restore_time}'.format(**locals()))]) debug("Restarting node1..") node1.stop() node1.start(wait_for_binary_proto=True) node1.nodetool('flush') node1.nodetool('compact') session = self.patient_cql_connection(node1) rows = session.execute('SELECT count(*) from ks.cf') # Now we should have 30000 rows from the snapshot + 30000 rows # from the commitlog backups: if not restore_archived_commitlog: self.assertEqual(rows[0][0], 30000) elif restore_point_in_time: self.assertEqual(rows[0][0], 60000) else: self.assertEqual(rows[0][0], 65000) finally: # clean up debug("removing snapshot_dir: " + snapshot_dir) shutil.rmtree(snapshot_dir) debug("removing snapshot_dir: " + system_ks_snapshot_dir) shutil.rmtree(system_ks_snapshot_dir) debug("removing snapshot_dir: " + system_cfs_snapshot_dir) shutil.rmtree(system_cfs_snapshot_dir) if self.cluster.version() >= '2.1': debug("removing snapshot_dir: " + system_ut_snapshot_dir) shutil.rmtree(system_ut_snapshot_dir) debug("removing snapshot_dir: " + system_col_snapshot_dir) shutil.rmtree(system_col_snapshot_dir) debug("removing tmp_commitlog: " + tmp_commitlog) shutil.rmtree(tmp_commitlog)
def run_archive_commitlog(self, restore_point_in_time=False, restore_archived_commitlog=True, archive_active_commitlogs=False): """Run archive commit log restoration test""" cluster = self.cluster cluster.populate(1) (node1, ) = cluster.nodelist() # Create a temp directory for storing commitlog archives: tmp_commitlog = safe_mkdtemp() debug("tmp_commitlog: " + tmp_commitlog) # Edit commitlog_archiving.properties and set an archive # command: replace_in_file( os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^archive_command=.*$', 'archive_command=cp %path {tmp_commitlog}/%name'.format( tmp_commitlog=tmp_commitlog))]) cluster.start() session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 1) session.execute( 'CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);') debug("Writing first 30,000 rows...") self.insert_rows(session, 0, 30000) # Record when this first set of inserts finished: insert_cutoff_times = [time.gmtime()] # Delete all commitlog backups so far: for f in glob.glob(tmp_commitlog + "/*"): os.remove(f) snapshot_dir = self.make_snapshot(node1, 'ks', 'cf', 'basic') if self.cluster.version() >= '3.0': system_ks_snapshot_dir = self.make_snapshot( node1, 'system_schema', 'keyspaces', 'keyspaces') else: system_ks_snapshot_dir = self.make_snapshot( node1, 'system', 'schema_keyspaces', 'keyspaces') if self.cluster.version() >= '3.0': system_col_snapshot_dir = self.make_snapshot( node1, 'system_schema', 'columns', 'columns') else: system_col_snapshot_dir = self.make_snapshot( node1, 'system', 'schema_columns', 'columns') if self.cluster.version() >= '3.0': system_ut_snapshot_dir = self.make_snapshot( node1, 'system_schema', 'types', 'usertypes') else: system_ut_snapshot_dir = self.make_snapshot( node1, 'system', 'schema_usertypes', 'usertypes') if self.cluster.version() >= '3.0': system_cfs_snapshot_dir = self.make_snapshot( node1, 'system_schema', 'tables', 'cfs') else: system_cfs_snapshot_dir = self.make_snapshot( node1, 'system', 'schema_columnfamilies', 'cfs') try: # Write more data: debug("Writing second 30,000 rows...") self.insert_rows(session, 30000, 60000) node1.flush() time.sleep(10) # Record when this second set of inserts finished: insert_cutoff_times.append(time.gmtime()) debug("Writing final 5,000 rows...") self.insert_rows(session, 60000, 65000) # Record when the third set of inserts finished: insert_cutoff_times.append(time.gmtime()) rows = session.execute('SELECT count(*) from ks.cf') # Make sure we have the same amount of rows as when we snapshotted: self.assertEqual(rows[0][0], 65000) # Check that there are at least one commit log backed up that # is not one of the active commit logs: commitlog_dir = os.path.join(node1.get_path(), 'commitlogs') debug("node1 commitlog dir: " + commitlog_dir) self.assertTrue( len( set(os.listdir(tmp_commitlog)) - set(os.listdir(commitlog_dir))) > 0) cluster.flush() cluster.compact() node1.drain() if archive_active_commitlogs: # restart the node which causes the active commitlogs to be archived node1.stop() node1.start(wait_for_binary_proto=True) # Destroy the cluster cluster.stop() self.copy_logs(name=self.id().split(".")[0] + "_pre-restore") self._cleanup_cluster() cluster = self.cluster = self._get_cluster() cluster.populate(1) node1, = cluster.nodelist() # Restore schema from snapshots: if self.cluster.version() >= '3.0': self.restore_snapshot(system_ks_snapshot_dir, node1, 'system_schema', 'keyspaces', 'keyspaces') else: self.restore_snapshot(system_ks_snapshot_dir, node1, 'system', 'schema_keyspaces', 'keyspaces') if self.cluster.version() >= '3.0': self.restore_snapshot(system_col_snapshot_dir, node1, 'system_schema', 'columns', 'columns') else: self.restore_snapshot(system_col_snapshot_dir, node1, 'system', 'schema_columns', 'columns') if self.cluster.version() >= '3.0': self.restore_snapshot(system_ut_snapshot_dir, node1, 'system_schema', 'types', 'usertypes') else: self.restore_snapshot(system_ut_snapshot_dir, node1, 'system', 'schema_usertypes', 'usertypes') if self.cluster.version() >= '3.0': self.restore_snapshot(system_cfs_snapshot_dir, node1, 'system_schema', 'tables', 'cfs') else: self.restore_snapshot(system_cfs_snapshot_dir, node1, 'system', 'schema_columnfamilies', 'cfs') self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf', 'basic') cluster.start(wait_for_binary_proto=True) session = self.patient_cql_connection(node1) node1.nodetool('refresh ks cf') rows = session.execute('SELECT count(*) from ks.cf') # Make sure we have the same amount of rows as when we snapshotted: self.assertEqual(rows[0][0], 30000) # Edit commitlog_archiving.properties. Remove the archive # command and set a restore command and restore_directories: if restore_archived_commitlog: replace_in_file( os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^archive_command=.*$', 'archive_command='), (r'^restore_command=.*$', 'restore_command=cp -f %from %to'), (r'^restore_directories=.*$', 'restore_directories={tmp_commitlog}'.format( tmp_commitlog=tmp_commitlog))]) if restore_point_in_time: restore_time = time.strftime("%Y:%m:%d %H:%M:%S", insert_cutoff_times[1]) replace_in_file( os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^restore_point_in_time=.*$', 'restore_point_in_time={restore_time}'.format( **locals()))]) debug("Restarting node1..") node1.stop() node1.start(wait_for_binary_proto=True) node1.nodetool('flush') node1.nodetool('compact') session = self.patient_cql_connection(node1) rows = session.execute('SELECT count(*) from ks.cf') # Now we should have 30000 rows from the snapshot + 30000 rows # from the commitlog backups: if not restore_archived_commitlog: self.assertEqual(rows[0][0], 30000) elif restore_point_in_time: self.assertEqual(rows[0][0], 60000) else: self.assertEqual(rows[0][0], 65000) finally: # clean up debug("removing snapshot_dir: " + snapshot_dir) shutil.rmtree(snapshot_dir) debug("removing snapshot_dir: " + system_ks_snapshot_dir) shutil.rmtree(system_ks_snapshot_dir) debug("removing snapshot_dir: " + system_cfs_snapshot_dir) shutil.rmtree(system_cfs_snapshot_dir) debug("removing snapshot_dir: " + system_ut_snapshot_dir) shutil.rmtree(system_ut_snapshot_dir) debug("removing snapshot_dir: " + system_col_snapshot_dir) shutil.rmtree(system_col_snapshot_dir) debug("removing tmp_commitlog: " + tmp_commitlog) shutil.rmtree(tmp_commitlog)
def test_archive_and_restore_commitlog_repeatedly(self): """ @jira_ticket CASSANDRA-10593 Run archive commit log restoration test repeatedly to make sure it is idempotent and doesn't fail if done repeatedly """ cluster = self.cluster cluster.populate(1) node1 = cluster.nodelist()[0] # Create a temp directory for storing commitlog archives: tmp_commitlog = safe_mkdtemp() debug("tmp_commitlog: {}".format(tmp_commitlog)) # Edit commitlog_archiving.properties and set an archive # command: replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^archive_command=.*$', 'archive_command=ln %path {tmp_commitlog}/%name'.format( tmp_commitlog=tmp_commitlog)), (r'^restore_command=.*$', 'restore_command=cp -f %from %to'), (r'^restore_directories=.*$', 'restore_directories={tmp_commitlog}'.format( tmp_commitlog=tmp_commitlog))]) cluster.start(wait_for_binary_proto=True) debug("Creating initial connection") session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 1) session.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);') debug("Writing 30,000 rows...") self.insert_rows(session, 0, 60000) try: # Check that there are at least one commit log backed up that # is not one of the active commit logs: commitlog_dir = os.path.join(node1.get_path(), 'commitlogs') debug("node1 commitlog dir: " + commitlog_dir) cluster.flush() self.assertGreater(len(set(os.listdir(tmp_commitlog)) - set(os.listdir(commitlog_dir))), 0) debug("Flushing and doing first restart") cluster.compact() node1.drain() # restart the node which causes the active commitlogs to be archived node1.stop() node1.start(wait_for_binary_proto=True) debug("Stopping and second restart") node1.stop() node1.start(wait_for_binary_proto=True) # Shouldn't be any additional data since it's replaying the same stuff repeatedly session = self.patient_cql_connection(node1) rows = session.execute('SELECT count(*) from ks.cf') self.assertEqual(rows[0][0], 60000) finally: debug("removing tmp_commitlog: " + tmp_commitlog) shutil.rmtree(tmp_commitlog)
def test_archive_and_restore_commitlog_repeatedly(self): """ @jira_ticket CASSANDRA-10593 Run archive commit log restoration test repeatedly to make sure it is idempotent and doesn't fail if done repeatedly """ cluster = self.cluster cluster.populate(1) node1 = cluster.nodelist()[0] # Create a temp directory for storing commitlog archives: tmp_commitlog = safe_mkdtemp() debug("tmp_commitlog: {}".format(tmp_commitlog)) # Edit commitlog_archiving.properties and set an archive # command: replace_in_file( os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^archive_command=.*$', 'archive_command=ln %path {tmp_commitlog}/%name'.format( tmp_commitlog=tmp_commitlog)), (r'^restore_command=.*$', 'restore_command=cp -f %from %to'), (r'^restore_directories=.*$', 'restore_directories={tmp_commitlog}'.format( tmp_commitlog=tmp_commitlog))]) cluster.start(wait_for_binary_proto=True) debug("Creating initial connection") session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', 1) session.execute( 'CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);') debug("Writing 30,000 rows...") self.insert_rows(session, 0, 60000) try: # Check that there are at least one commit log backed up that # is not one of the active commit logs: commitlog_dir = os.path.join(node1.get_path(), 'commitlogs') debug("node1 commitlog dir: " + commitlog_dir) cluster.flush() self.assertNotEqual( set(os.listdir(tmp_commitlog)) - set(os.listdir(commitlog_dir)), set()) debug("Flushing and doing first restart") cluster.compact() node1.drain() # restart the node which causes the active commitlogs to be archived node1.stop() node1.start(wait_for_binary_proto=True) debug("Stopping and second restart") node1.stop() node1.start(wait_for_binary_proto=True) # Shouldn't be any additional data since it's replaying the same stuff repeatedly session = self.patient_cql_connection(node1) rows = session.execute('SELECT count(*) from ks.cf') self.assertEqual(rows[0][0], 60000) finally: debug("removing tmp_commitlog: " + tmp_commitlog) shutil.rmtree(tmp_commitlog)