Exemple #1
0
    def test_archive_and_restore_commitlog_repeatedly(self):
        """
        @jira_ticket CASSANDRA-10593
        Run archive commit log restoration test repeatedly to make sure it is idempotent
        and doesn't fail if done repeatedly
        """
        cluster = self.cluster
        cluster.populate(1)
        node1 = cluster.nodelist()[0]

        # Create a temp directory for storing commitlog archives:
        tmp_commitlog = safe_mkdtemp()
        logger.debug("tmp_commitlog: {}".format(tmp_commitlog))

        # Edit commitlog_archiving.properties and set an archive
        # command:
        replace_in_file(
            os.path.join(node1.get_path(), 'conf',
                         'commitlog_archiving.properties'),
            [(r'^archive_command=.*$',
              'archive_command=ln %path {tmp_commitlog}/%name'.format(
                  tmp_commitlog=tmp_commitlog)),
             (r'^restore_command=.*$', 'restore_command=cp -f %from %to'),
             (r'^restore_directories=.*$',
              'restore_directories={tmp_commitlog}'.format(
                  tmp_commitlog=tmp_commitlog))])

        cluster.start()

        logger.debug("Creating initial connection")
        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 1)
        session.execute(
            'CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);')
        logger.debug("Writing 30,000 rows...")
        self.insert_rows(session, 0, 60000)

        try:
            # Check that there are at least one commit log backed up that
            # is not one of the active commit logs:
            commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
            logger.debug("node1 commitlog dir: " + commitlog_dir)

            cluster.flush()

            assert_directory_not_empty(tmp_commitlog, commitlog_dir)

            logger.debug("Flushing and doing first restart")
            cluster.compact()
            node1.drain()
            # restart the node which causes the active commitlogs to be archived
            node1.stop()
            node1.start(wait_for_binary_proto=True)

            logger.debug("Stopping and second restart")
            node1.stop()
            node1.start(wait_for_binary_proto=True)

            # Shouldn't be any additional data since it's replaying the same stuff repeatedly
            session = self.patient_cql_connection(node1)

            rows = session.execute('SELECT count(*) from ks.cf')
            assert rows[0][0] == 60000
        finally:
            logger.debug("removing tmp_commitlog: " + tmp_commitlog)
            shutil.rmtree(tmp_commitlog)
Exemple #2
0
    def run_archive_commitlog(self,
                              restore_point_in_time=False,
                              restore_archived_commitlog=True,
                              archive_active_commitlogs=False,
                              archive_command='cp'):
        """
        Run archive commit log restoration test
        """

        cluster = self.cluster
        cluster.populate(1)
        (node1, ) = cluster.nodelist()

        # Create a temp directory for storing commitlog archives:
        tmp_commitlog = safe_mkdtemp()
        logger.debug("tmp_commitlog: " + tmp_commitlog)

        # Edit commitlog_archiving.properties and set an archive
        # command:
        replace_in_file(
            os.path.join(node1.get_path(), 'conf',
                         'commitlog_archiving.properties'),
            [(r'^archive_command=.*$',
              'archive_command={archive_command} %path {tmp_commitlog}/%name'.
              format(tmp_commitlog=tmp_commitlog,
                     archive_command=archive_command))])

        cluster.start()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 1)

        # Write until we get a new CL segment. This avoids replaying
        # initialization mutations from startup into system tables when
        # restoring snapshots. See CASSANDRA-11811.
        advance_to_next_cl_segment(session=session,
                                   commitlog_dir=os.path.join(
                                       node1.get_path(), 'commitlogs'))

        session.execute(
            'CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);')
        logger.debug("Writing first 30,000 rows...")
        self.insert_rows(session, 0, 30000)
        # Record when this first set of inserts finished:
        insert_cutoff_times = [time.gmtime()]

        # Delete all commitlog backups so far:
        for f in glob.glob(tmp_commitlog + "/*"):
            logger.debug('Removing {}'.format(f))
            os.remove(f)

        snapshot_dirs = self.make_snapshot(node1, 'ks', 'cf', 'basic')

        if self.cluster.version() >= '3.0':
            system_ks_snapshot_dirs = self.make_snapshot(
                node1, 'system_schema', 'keyspaces', 'keyspaces')
        else:
            system_ks_snapshot_dirs = self.make_snapshot(
                node1, 'system', 'schema_keyspaces', 'keyspaces')

        if self.cluster.version() >= '3.0':
            system_col_snapshot_dirs = self.make_snapshot(
                node1, 'system_schema', 'columns', 'columns')
        else:
            system_col_snapshot_dirs = self.make_snapshot(
                node1, 'system', 'schema_columns', 'columns')

        if self.cluster.version() >= '3.0':
            system_ut_snapshot_dirs = self.make_snapshot(
                node1, 'system_schema', 'types', 'usertypes')
        else:
            system_ut_snapshot_dirs = self.make_snapshot(
                node1, 'system', 'schema_usertypes', 'usertypes')

        if self.cluster.version() >= '3.0':
            system_cfs_snapshot_dirs = self.make_snapshot(
                node1, 'system_schema', 'tables', 'cfs')
        else:
            system_cfs_snapshot_dirs = self.make_snapshot(
                node1, 'system', 'schema_columnfamilies', 'cfs')

        try:
            # Write more data:
            logger.debug("Writing second 30,000 rows...")
            self.insert_rows(session, 30000, 60000)
            node1.flush()
            time.sleep(10)
            # Record when this second set of inserts finished:
            insert_cutoff_times.append(time.gmtime())

            logger.debug("Writing final 5,000 rows...")
            self.insert_rows(session, 60000, 65000)
            # Record when the third set of inserts finished:
            insert_cutoff_times.append(time.gmtime())

            # Flush so we get an accurate view of commitlogs
            node1.flush()

            rows = session.execute('SELECT count(*) from ks.cf')
            # Make sure we have the same amount of rows as when we snapshotted:
            assert rows[0][0] == 65000

            # Check that there are at least one commit log backed up that
            # is not one of the active commit logs:
            commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
            logger.debug("node1 commitlog dir: " + commitlog_dir)
            logger.debug("node1 commitlog dir contents: " +
                         str(os.listdir(commitlog_dir)))
            logger.debug("tmp_commitlog contents: " +
                         str(os.listdir(tmp_commitlog)))

            assert_directory_not_empty(tmp_commitlog, commitlog_dir)

            cluster.flush()
            cluster.compact()
            node1.drain()

            # Destroy the cluster
            cluster.stop()
            logger.debug("node1 commitlog dir contents after stopping: " +
                         str(os.listdir(commitlog_dir)))
            logger.debug("tmp_commitlog contents after stopping: " +
                         str(os.listdir(tmp_commitlog)))

            self.copy_logs(name=get_current_test_name() + "_pre-restore")
            self.fixture_dtest_setup.cleanup_and_replace_cluster()
            cluster = self.cluster
            cluster.populate(1)
            nodes = cluster.nodelist()
            assert len(nodes) == 1
            node1 = nodes[0]

            # Restore schema from snapshots:
            for system_ks_snapshot_dir in system_ks_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_ks_snapshot_dir, node1,
                                          'system_schema', 'keyspaces',
                                          'keyspaces')
                else:
                    self.restore_snapshot(system_ks_snapshot_dir, node1,
                                          'system', 'schema_keyspaces',
                                          'keyspaces')
            for system_col_snapshot_dir in system_col_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_col_snapshot_dir, node1,
                                          'system_schema', 'columns',
                                          'columns')
                else:
                    self.restore_snapshot(system_col_snapshot_dir, node1,
                                          'system', 'schema_columns',
                                          'columns')
            for system_ut_snapshot_dir in system_ut_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_ut_snapshot_dir, node1,
                                          'system_schema', 'types',
                                          'usertypes')
                else:
                    self.restore_snapshot(system_ut_snapshot_dir, node1,
                                          'system', 'schema_usertypes',
                                          'usertypes')

            for system_cfs_snapshot_dir in system_cfs_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_cfs_snapshot_dir, node1,
                                          'system_schema', 'tables', 'cfs')
                else:
                    self.restore_snapshot(system_cfs_snapshot_dir, node1,
                                          'system', 'schema_columnfamilies',
                                          'cfs')
            for snapshot_dir in snapshot_dirs:
                self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf', 'basic')

            cluster.start()

            session = self.patient_cql_connection(node1)
            node1.nodetool('refresh ks cf')

            rows = session.execute('SELECT count(*) from ks.cf')
            # Make sure we have the same amount of rows as when we snapshotted:
            assert rows[0][0] == 30000

            # Edit commitlog_archiving.properties. Remove the archive
            # command  and set a restore command and restore_directories:
            if restore_archived_commitlog:
                replace_in_file(
                    os.path.join(node1.get_path(), 'conf',
                                 'commitlog_archiving.properties'),
                    [(r'^archive_command=.*$', 'archive_command='),
                     (r'^restore_command=.*$',
                      'restore_command=cp -f %from %to'),
                     (r'^restore_directories=.*$',
                      'restore_directories={tmp_commitlog}'.format(
                          tmp_commitlog=tmp_commitlog))])

                if restore_point_in_time:
                    restore_time = time.strftime("%Y:%m:%d %H:%M:%S",
                                                 insert_cutoff_times[1])
                    replace_in_file(
                        os.path.join(node1.get_path(), 'conf',
                                     'commitlog_archiving.properties'),
                        [(r'^restore_point_in_time=.*$',
                          'restore_point_in_time={restore_time}'.format(
                              restore_time=restore_time))])

            logger.debug("Restarting node1..")
            node1.stop()
            node1.start(wait_for_binary_proto=True)

            node1.nodetool('flush')
            node1.nodetool('compact')

            session = self.patient_cql_connection(node1)
            rows = session.execute('SELECT count(*) from ks.cf')
            # Now we should have 30000 rows from the snapshot + 30000 rows
            # from the commitlog backups:
            if not restore_archived_commitlog:
                assert rows[0][0] == 30000
            elif restore_point_in_time:
                assert rows[0][0] == 60000
            else:
                assert rows[0][0] == 65000

        finally:
            # clean up
            logger.debug("removing snapshot_dir: " + ",".join(snapshot_dirs))
            for snapshot_dir in snapshot_dirs:
                shutil.rmtree(snapshot_dir)
            logger.debug("removing snapshot_dir: " +
                         ",".join(system_ks_snapshot_dirs))
            for system_ks_snapshot_dir in system_ks_snapshot_dirs:
                shutil.rmtree(system_ks_snapshot_dir)
            logger.debug("removing snapshot_dir: " +
                         ",".join(system_cfs_snapshot_dirs))
            for system_cfs_snapshot_dir in system_cfs_snapshot_dirs:
                shutil.rmtree(system_cfs_snapshot_dir)
            logger.debug("removing snapshot_dir: " +
                         ",".join(system_ut_snapshot_dirs))
            for system_ut_snapshot_dir in system_ut_snapshot_dirs:
                shutil.rmtree(system_ut_snapshot_dir)
            logger.debug("removing snapshot_dir: " +
                         ",".join(system_col_snapshot_dirs))
            for system_col_snapshot_dir in system_col_snapshot_dirs:
                shutil.rmtree(system_col_snapshot_dir)

            logger.debug("removing tmp_commitlog: " + tmp_commitlog)
            shutil.rmtree(tmp_commitlog)
    def test_archive_and_restore_commitlog_repeatedly(self):
        """
        @jira_ticket CASSANDRA-10593
        Run archive commit log restoration test repeatedly to make sure it is idempotent
        and doesn't fail if done repeatedly
        """
        cluster = self.cluster
        cluster.populate(1)
        node1 = cluster.nodelist()[0]

        # Create a temp directory for storing commitlog archives:
        tmp_commitlog = safe_mkdtemp()
        logger.debug("tmp_commitlog: {}".format(tmp_commitlog))

        # Edit commitlog_archiving.properties and set an archive
        # command:
        replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
                        [(r'^archive_command=.*$', 'archive_command=ln %path {tmp_commitlog}/%name'.format(
                            tmp_commitlog=tmp_commitlog)),
                         (r'^restore_command=.*$', 'restore_command=cp -f %from %to'),
                         (r'^restore_directories=.*$', 'restore_directories={tmp_commitlog}'.format(
                          tmp_commitlog=tmp_commitlog))])

        cluster.start(wait_for_binary_proto=True)

        logger.debug("Creating initial connection")
        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 1)
        session.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);')
        logger.debug("Writing 30,000 rows...")
        self.insert_rows(session, 0, 60000)

        try:
            # Check that there are at least one commit log backed up that
            # is not one of the active commit logs:
            commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
            logger.debug("node1 commitlog dir: " + commitlog_dir)

            cluster.flush()

            assert_directory_not_empty(tmp_commitlog, commitlog_dir)

            logger.debug("Flushing and doing first restart")
            cluster.compact()
            node1.drain()
            # restart the node which causes the active commitlogs to be archived
            node1.stop()
            node1.start(wait_for_binary_proto=True)

            logger.debug("Stopping and second restart")
            node1.stop()
            node1.start(wait_for_binary_proto=True)

            # Shouldn't be any additional data since it's replaying the same stuff repeatedly
            session = self.patient_cql_connection(node1)

            rows = session.execute('SELECT count(*) from ks.cf')
            assert rows[0][0] == 60000
        finally:
            logger.debug("removing tmp_commitlog: " + tmp_commitlog)
            shutil.rmtree(tmp_commitlog)
    def run_archive_commitlog(self, restore_point_in_time=False, restore_archived_commitlog=True, archive_active_commitlogs=False, archive_command='cp'):
        """
        Run archive commit log restoration test
        """

        cluster = self.cluster
        cluster.populate(1)
        (node1,) = cluster.nodelist()

        # Create a temp directory for storing commitlog archives:
        tmp_commitlog = safe_mkdtemp()
        logger.debug("tmp_commitlog: " + tmp_commitlog)

        # Edit commitlog_archiving.properties and set an archive
        # command:
        replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
                        [(r'^archive_command=.*$', 'archive_command={archive_command} %path {tmp_commitlog}/%name'.format(
                            tmp_commitlog=tmp_commitlog, archive_command=archive_command))])

        cluster.start()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 1)

        # Write until we get a new CL segment. This avoids replaying
        # initialization mutations from startup into system tables when
        # restoring snapshots. See CASSANDRA-11811.
        advance_to_next_cl_segment(
            session=session,
            commitlog_dir=os.path.join(node1.get_path(), 'commitlogs')
        )

        session.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);')
        logger.debug("Writing first 30,000 rows...")
        self.insert_rows(session, 0, 30000)
        # Record when this first set of inserts finished:
        insert_cutoff_times = [time.gmtime()]

        # Delete all commitlog backups so far:
        for f in glob.glob(tmp_commitlog + "/*"):
            logger.debug('Removing {}'.format(f))
            os.remove(f)

        snapshot_dirs = self.make_snapshot(node1, 'ks', 'cf', 'basic')

        if self.cluster.version() >= '3.0':
            system_ks_snapshot_dirs = self.make_snapshot(node1, 'system_schema', 'keyspaces', 'keyspaces')
        else:
            system_ks_snapshot_dirs = self.make_snapshot(node1, 'system', 'schema_keyspaces', 'keyspaces')

        if self.cluster.version() >= '3.0':
            system_col_snapshot_dirs = self.make_snapshot(node1, 'system_schema', 'columns', 'columns')
        else:
            system_col_snapshot_dirs = self.make_snapshot(node1, 'system', 'schema_columns', 'columns')

        if self.cluster.version() >= '3.0':
            system_ut_snapshot_dirs = self.make_snapshot(node1, 'system_schema', 'types', 'usertypes')
        else:
            system_ut_snapshot_dirs = self.make_snapshot(node1, 'system', 'schema_usertypes', 'usertypes')

        if self.cluster.version() >= '3.0':
            system_cfs_snapshot_dirs = self.make_snapshot(node1, 'system_schema', 'tables', 'cfs')
        else:
            system_cfs_snapshot_dirs = self.make_snapshot(node1, 'system', 'schema_columnfamilies', 'cfs')

        try:
            # Write more data:
            logger.debug("Writing second 30,000 rows...")
            self.insert_rows(session, 30000, 60000)
            node1.flush()
            time.sleep(10)
            # Record when this second set of inserts finished:
            insert_cutoff_times.append(time.gmtime())

            logger.debug("Writing final 5,000 rows...")
            self.insert_rows(session, 60000, 65000)
            # Record when the third set of inserts finished:
            insert_cutoff_times.append(time.gmtime())

            # Flush so we get an accurate view of commitlogs
            node1.flush()

            rows = session.execute('SELECT count(*) from ks.cf')
            # Make sure we have the same amount of rows as when we snapshotted:
            assert rows[0][0] == 65000

            # Check that there are at least one commit log backed up that
            # is not one of the active commit logs:
            commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
            logger.debug("node1 commitlog dir: " + commitlog_dir)
            logger.debug("node1 commitlog dir contents: " + str(os.listdir(commitlog_dir)))
            logger.debug("tmp_commitlog contents: " + str(os.listdir(tmp_commitlog)))

            assert_directory_not_empty(tmp_commitlog, commitlog_dir)

            cluster.flush()
            cluster.compact()
            node1.drain()

            # Destroy the cluster
            cluster.stop()
            logger.debug("node1 commitlog dir contents after stopping: " + str(os.listdir(commitlog_dir)))
            logger.debug("tmp_commitlog contents after stopping: " + str(os.listdir(tmp_commitlog)))

            self.copy_logs(name=get_current_test_name() + "_pre-restore")
            self.fixture_dtest_setup.cleanup_and_replace_cluster()
            cluster = self.cluster
            cluster.populate(1)
            nodes = cluster.nodelist()
            assert len(nodes) == 1
            node1 = nodes[0]

            # Restore schema from snapshots:
            for system_ks_snapshot_dir in system_ks_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_ks_snapshot_dir, node1, 'system_schema', 'keyspaces', 'keyspaces')
                else:
                    self.restore_snapshot(system_ks_snapshot_dir, node1, 'system', 'schema_keyspaces', 'keyspaces')
            for system_col_snapshot_dir in system_col_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_col_snapshot_dir, node1, 'system_schema', 'columns', 'columns')
                else:
                    self.restore_snapshot(system_col_snapshot_dir, node1, 'system', 'schema_columns', 'columns')
            for system_ut_snapshot_dir in system_ut_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_ut_snapshot_dir, node1, 'system_schema', 'types', 'usertypes')
                else:
                    self.restore_snapshot(system_ut_snapshot_dir, node1, 'system', 'schema_usertypes', 'usertypes')

            for system_cfs_snapshot_dir in system_cfs_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_cfs_snapshot_dir, node1, 'system_schema', 'tables', 'cfs')
                else:
                    self.restore_snapshot(system_cfs_snapshot_dir, node1, 'system', 'schema_columnfamilies', 'cfs')
            for snapshot_dir in snapshot_dirs:
                self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf', 'basic')

            cluster.start(wait_for_binary_proto=True)

            session = self.patient_cql_connection(node1)
            node1.nodetool('refresh ks cf')

            rows = session.execute('SELECT count(*) from ks.cf')
            # Make sure we have the same amount of rows as when we snapshotted:
            assert rows[0][0] == 30000

            # Edit commitlog_archiving.properties. Remove the archive
            # command  and set a restore command and restore_directories:
            if restore_archived_commitlog:
                replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
                                [(r'^archive_command=.*$', 'archive_command='),
                                 (r'^restore_command=.*$', 'restore_command=cp -f %from %to'),
                                 (r'^restore_directories=.*$', 'restore_directories={tmp_commitlog}'.format(
                                     tmp_commitlog=tmp_commitlog))])

                if restore_point_in_time:
                    restore_time = time.strftime("%Y:%m:%d %H:%M:%S", insert_cutoff_times[1])
                    replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
                                    [(r'^restore_point_in_time=.*$', 'restore_point_in_time={restore_time}'.format(restore_time=restore_time))])

            logger.debug("Restarting node1..")
            node1.stop()
            node1.start(wait_for_binary_proto=True)

            node1.nodetool('flush')
            node1.nodetool('compact')

            session = self.patient_cql_connection(node1)
            rows = session.execute('SELECT count(*) from ks.cf')
            # Now we should have 30000 rows from the snapshot + 30000 rows
            # from the commitlog backups:
            if not restore_archived_commitlog:
                assert rows[0][0] == 30000
            elif restore_point_in_time:
                assert rows[0][0] == 60000
            else:
                assert rows[0][0] == 65000

        finally:
            # clean up
            logger.debug("removing snapshot_dir: " + ",".join(snapshot_dirs))
            for snapshot_dir in snapshot_dirs:
                shutil.rmtree(snapshot_dir)
            logger.debug("removing snapshot_dir: " + ",".join(system_ks_snapshot_dirs))
            for system_ks_snapshot_dir in system_ks_snapshot_dirs:
                shutil.rmtree(system_ks_snapshot_dir)
            logger.debug("removing snapshot_dir: " + ",".join(system_cfs_snapshot_dirs))
            for system_cfs_snapshot_dir in system_cfs_snapshot_dirs:
                shutil.rmtree(system_cfs_snapshot_dir)
            logger.debug("removing snapshot_dir: " + ",".join(system_ut_snapshot_dirs))
            for system_ut_snapshot_dir in system_ut_snapshot_dirs:
                shutil.rmtree(system_ut_snapshot_dir)
            logger.debug("removing snapshot_dir: " + ",".join(system_col_snapshot_dirs))
            for system_col_snapshot_dir in system_col_snapshot_dirs:
                shutil.rmtree(system_col_snapshot_dir)

            logger.debug("removing tmp_commitlog: " + tmp_commitlog)
            shutil.rmtree(tmp_commitlog)
    def run_archive_commitlog(self, restore_point_in_time=False, restore_archived_commitlog=True, archive_active_commitlogs=False, archive_command='cp'):
        """
        Run archive commit log restoration test
        """

        cluster = self.cluster
        cluster.populate(1)
        (node1,) = cluster.nodelist()

        # Create a temp directory for storing commitlog archives:
        tmp_commitlog = safe_mkdtemp()
        debug("tmp_commitlog: " + tmp_commitlog)

        # Edit commitlog_archiving.properties and set an archive
        # command:
        replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
                        [(r'^archive_command=.*$', 'archive_command={archive_command} %path {tmp_commitlog}/%name'.format(
                            tmp_commitlog=tmp_commitlog, archive_command=archive_command))])

        cluster.start()

        session = self.patient_cql_connection(node1)
        create_ks(session, 'ks', 1)
        session.execute('CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);')
        debug("Writing first 30,000 rows...")
        self.insert_rows(session, 0, 30000)
        # Record when this first set of inserts finished:
        insert_cutoff_times = [time.gmtime()]

        # Delete all commitlog backups so far:
        for f in glob.glob(tmp_commitlog + "/*"):
            os.remove(f)

        snapshot_dirs = self.make_snapshot(node1, 'ks', 'cf', 'basic')

        if self.cluster.version() >= '3.0':
            system_ks_snapshot_dirs = self.make_snapshot(node1, 'system_schema', 'keyspaces', 'keyspaces')
        else:
            system_ks_snapshot_dirs = self.make_snapshot(node1, 'system', 'schema_keyspaces', 'keyspaces')

        if self.cluster.version() >= '3.0':
            system_col_snapshot_dirs = self.make_snapshot(node1, 'system_schema', 'columns', 'columns')
        else:
            system_col_snapshot_dirs = self.make_snapshot(node1, 'system', 'schema_columns', 'columns')

        if self.cluster.version() >= '3.0':
            system_ut_snapshot_dirs = self.make_snapshot(node1, 'system_schema', 'types', 'usertypes')
        else:
            system_ut_snapshot_dirs = self.make_snapshot(node1, 'system', 'schema_usertypes', 'usertypes')

        if self.cluster.version() >= '3.0':
            system_cfs_snapshot_dirs = self.make_snapshot(node1, 'system_schema', 'tables', 'cfs')
        else:
            system_cfs_snapshot_dirs = self.make_snapshot(node1, 'system', 'schema_columnfamilies', 'cfs')

        try:
            # Write more data:
            debug("Writing second 30,000 rows...")
            self.insert_rows(session, 30000, 60000)
            node1.flush()
            time.sleep(10)
            # Record when this second set of inserts finished:
            insert_cutoff_times.append(time.gmtime())

            debug("Writing final 5,000 rows...")
            self.insert_rows(session, 60000, 65000)
            # Record when the third set of inserts finished:
            insert_cutoff_times.append(time.gmtime())

            rows = session.execute('SELECT count(*) from ks.cf')
            # Make sure we have the same amount of rows as when we snapshotted:
            self.assertEqual(rows[0][0], 65000)

            # Check that there are at least one commit log backed up that
            # is not one of the active commit logs:
            commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
            debug("node1 commitlog dir: " + commitlog_dir)
            debug("node1 commitlog dir contents: " + str(os.listdir(commitlog_dir)))
            debug("tmp_commitlog contents: " + str(os.listdir(tmp_commitlog)))

            self.assertNotEqual(set(os.listdir(tmp_commitlog)) - set(os.listdir(commitlog_dir)),
                                set())

            cluster.flush()
            cluster.compact()
            node1.drain()
            if archive_active_commitlogs:
                # restart the node which causes the active commitlogs to be archived
                node1.stop()
                node1.start(wait_for_binary_proto=True)

            # Destroy the cluster
            cluster.stop()
            self.copy_logs(self.cluster, name=self.id().split(".")[0] + "_pre-restore")
            cleanup_cluster(self.cluster, self.test_path)
            self.test_path = get_test_path()
            cluster = self.cluster = create_ccm_cluster(self.test_path, name='test')
            cluster.populate(1)
            node1, = cluster.nodelist()

            # Restore schema from snapshots:
            for system_ks_snapshot_dir in system_ks_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_ks_snapshot_dir, node1, 'system_schema', 'keyspaces', 'keyspaces')
                else:
                    self.restore_snapshot(system_ks_snapshot_dir, node1, 'system', 'schema_keyspaces', 'keyspaces')
            for system_col_snapshot_dir in system_col_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_col_snapshot_dir, node1, 'system_schema', 'columns', 'columns')
                else:
                    self.restore_snapshot(system_col_snapshot_dir, node1, 'system', 'schema_columns', 'columns')
            for system_ut_snapshot_dir in system_ut_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_ut_snapshot_dir, node1, 'system_schema', 'types', 'usertypes')
                else:
                    self.restore_snapshot(system_ut_snapshot_dir, node1, 'system', 'schema_usertypes', 'usertypes')

            for system_cfs_snapshot_dir in system_cfs_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_cfs_snapshot_dir, node1, 'system_schema', 'tables', 'cfs')
                else:
                    self.restore_snapshot(system_cfs_snapshot_dir, node1, 'system', 'schema_columnfamilies', 'cfs')
            for snapshot_dir in snapshot_dirs:
                self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf', 'basic')

            cluster.start(wait_for_binary_proto=True)

            session = self.patient_cql_connection(node1)
            node1.nodetool('refresh ks cf')

            rows = session.execute('SELECT count(*) from ks.cf')
            # Make sure we have the same amount of rows as when we snapshotted:
            self.assertEqual(rows[0][0], 30000)

            # Edit commitlog_archiving.properties. Remove the archive
            # command  and set a restore command and restore_directories:
            if restore_archived_commitlog:
                replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
                                [(r'^archive_command=.*$', 'archive_command='),
                                 (r'^restore_command=.*$', 'restore_command=cp -f %from %to'),
                                 (r'^restore_directories=.*$', 'restore_directories={tmp_commitlog}'.format(
                                     tmp_commitlog=tmp_commitlog))])

                if restore_point_in_time:
                    restore_time = time.strftime("%Y:%m:%d %H:%M:%S", insert_cutoff_times[1])
                    replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
                                    [(r'^restore_point_in_time=.*$', 'restore_point_in_time={restore_time}'.format(restore_time=restore_time))])

            debug("Restarting node1..")
            node1.stop()
            node1.start(wait_for_binary_proto=True)

            node1.nodetool('flush')
            node1.nodetool('compact')

            session = self.patient_cql_connection(node1)
            rows = session.execute('SELECT count(*) from ks.cf')
            # Now we should have 30000 rows from the snapshot + 30000 rows
            # from the commitlog backups:
            if not restore_archived_commitlog:
                self.assertEqual(rows[0][0], 30000)
            elif restore_point_in_time:
                self.assertEqual(rows[0][0], 60000)
            else:
                self.assertEqual(rows[0][0], 65000)

        finally:
            # clean up
            debug("removing snapshot_dir: " + ",".join(snapshot_dirs))
            for snapshot_dir in snapshot_dirs:
                shutil.rmtree(snapshot_dir)
            debug("removing snapshot_dir: " + ",".join(system_ks_snapshot_dirs))
            for system_ks_snapshot_dir in system_ks_snapshot_dirs:
                shutil.rmtree(system_ks_snapshot_dir)
            debug("removing snapshot_dir: " + ",".join(system_cfs_snapshot_dirs))
            for system_cfs_snapshot_dir in system_cfs_snapshot_dirs:
                shutil.rmtree(system_cfs_snapshot_dir)
            debug("removing snapshot_dir: " + ",".join(system_ut_snapshot_dirs))
            for system_ut_snapshot_dir in system_ut_snapshot_dirs:
                shutil.rmtree(system_ut_snapshot_dir)
            debug("removing snapshot_dir: " + ",".join(system_col_snapshot_dirs))
            for system_col_snapshot_dir in system_col_snapshot_dirs:
                shutil.rmtree(system_col_snapshot_dir)

            debug("removing tmp_commitlog: " + tmp_commitlog)
            shutil.rmtree(tmp_commitlog)
    def run_archive_commitlog(self,
                              restore_point_in_time=False,
                              restore_archived_commitlog=True,
                              archive_active_commitlogs=False,
                              archive_command='cp'):
        """
        Run archive commit log restoration test
        """

        cluster = self.cluster
        cluster.populate(1)
        (node1, ) = cluster.nodelist()

        # Create a temp directory for storing commitlog archives:
        tmp_commitlog = safe_mkdtemp()
        debug("tmp_commitlog: " + tmp_commitlog)

        # Edit commitlog_archiving.properties and set an archive
        # command:
        replace_in_file(
            os.path.join(node1.get_path(), 'conf',
                         'commitlog_archiving.properties'),
            [(r'^archive_command=.*$',
              'archive_command={archive_command} %path {tmp_commitlog}/%name'.
              format(tmp_commitlog=tmp_commitlog,
                     archive_command=archive_command))])

        cluster.start()

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'ks', 1)
        session.execute(
            'CREATE TABLE ks.cf ( key bigint PRIMARY KEY, val text);')
        debug("Writing first 30,000 rows...")
        self.insert_rows(session, 0, 30000)
        # Record when this first set of inserts finished:
        insert_cutoff_times = [time.gmtime()]

        # Delete all commitlog backups so far:
        for f in glob.glob(tmp_commitlog + "/*"):
            os.remove(f)

        snapshot_dirs = self.make_snapshot(node1, 'ks', 'cf', 'basic')

        if self.cluster.version() >= '3.0':
            system_ks_snapshot_dirs = self.make_snapshot(
                node1, 'system_schema', 'keyspaces', 'keyspaces')
        else:
            system_ks_snapshot_dirs = self.make_snapshot(
                node1, 'system', 'schema_keyspaces', 'keyspaces')

        if self.cluster.version() >= '3.0':
            system_col_snapshot_dirs = self.make_snapshot(
                node1, 'system_schema', 'columns', 'columns')
        else:
            system_col_snapshot_dirs = self.make_snapshot(
                node1, 'system', 'schema_columns', 'columns')

        if self.cluster.version() >= '3.0':
            system_ut_snapshot_dirs = self.make_snapshot(
                node1, 'system_schema', 'types', 'usertypes')
        else:
            system_ut_snapshot_dirs = self.make_snapshot(
                node1, 'system', 'schema_usertypes', 'usertypes')

        if self.cluster.version() >= '3.0':
            system_cfs_snapshot_dirs = self.make_snapshot(
                node1, 'system_schema', 'tables', 'cfs')
        else:
            system_cfs_snapshot_dirs = self.make_snapshot(
                node1, 'system', 'schema_columnfamilies', 'cfs')

        try:
            # Write more data:
            debug("Writing second 30,000 rows...")
            self.insert_rows(session, 30000, 60000)
            node1.flush()
            time.sleep(10)
            # Record when this second set of inserts finished:
            insert_cutoff_times.append(time.gmtime())

            debug("Writing final 5,000 rows...")
            self.insert_rows(session, 60000, 65000)
            # Record when the third set of inserts finished:
            insert_cutoff_times.append(time.gmtime())

            rows = session.execute('SELECT count(*) from ks.cf')
            # Make sure we have the same amount of rows as when we snapshotted:
            self.assertEqual(rows[0][0], 65000)

            # Check that there are at least one commit log backed up that
            # is not one of the active commit logs:
            commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
            debug("node1 commitlog dir: " + commitlog_dir)
            debug("node1 commitlog dir contents: " +
                  str(os.listdir(commitlog_dir)))
            debug("tmp_commitlog contents: " + str(os.listdir(tmp_commitlog)))

            self.assertNotEqual(
                set(os.listdir(tmp_commitlog)) -
                set(os.listdir(commitlog_dir)), set())

            cluster.flush()
            cluster.compact()
            node1.drain()
            if archive_active_commitlogs:
                # restart the node which causes the active commitlogs to be archived
                node1.stop()
                node1.start(wait_for_binary_proto=True)

            # Destroy the cluster
            cluster.stop()
            self.copy_logs(self.cluster,
                           name=self.id().split(".")[0] + "_pre-restore")
            cleanup_cluster(self.cluster, self.test_path)
            self.test_path = get_test_path()
            cluster = self.cluster = create_ccm_cluster(self.test_path,
                                                        name='test')
            cluster.populate(1)
            node1, = cluster.nodelist()

            # Restore schema from snapshots:
            for system_ks_snapshot_dir in system_ks_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_ks_snapshot_dir, node1,
                                          'system_schema', 'keyspaces',
                                          'keyspaces')
                else:
                    self.restore_snapshot(system_ks_snapshot_dir, node1,
                                          'system', 'schema_keyspaces',
                                          'keyspaces')
            for system_col_snapshot_dir in system_col_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_col_snapshot_dir, node1,
                                          'system_schema', 'columns',
                                          'columns')
                else:
                    self.restore_snapshot(system_col_snapshot_dir, node1,
                                          'system', 'schema_columns',
                                          'columns')
            for system_ut_snapshot_dir in system_ut_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_ut_snapshot_dir, node1,
                                          'system_schema', 'types',
                                          'usertypes')
                else:
                    self.restore_snapshot(system_ut_snapshot_dir, node1,
                                          'system', 'schema_usertypes',
                                          'usertypes')

            for system_cfs_snapshot_dir in system_cfs_snapshot_dirs:
                if self.cluster.version() >= '3.0':
                    self.restore_snapshot(system_cfs_snapshot_dir, node1,
                                          'system_schema', 'tables', 'cfs')
                else:
                    self.restore_snapshot(system_cfs_snapshot_dir, node1,
                                          'system', 'schema_columnfamilies',
                                          'cfs')
            for snapshot_dir in snapshot_dirs:
                self.restore_snapshot(snapshot_dir, node1, 'ks', 'cf', 'basic')

            cluster.start(wait_for_binary_proto=True)

            session = self.patient_cql_connection(node1)
            node1.nodetool('refresh ks cf')

            rows = session.execute('SELECT count(*) from ks.cf')
            # Make sure we have the same amount of rows as when we snapshotted:
            self.assertEqual(rows[0][0], 30000)

            # Edit commitlog_archiving.properties. Remove the archive
            # command  and set a restore command and restore_directories:
            if restore_archived_commitlog:
                replace_in_file(
                    os.path.join(node1.get_path(), 'conf',
                                 'commitlog_archiving.properties'),
                    [(r'^archive_command=.*$', 'archive_command='),
                     (r'^restore_command=.*$',
                      'restore_command=cp -f %from %to'),
                     (r'^restore_directories=.*$',
                      'restore_directories={tmp_commitlog}'.format(
                          tmp_commitlog=tmp_commitlog))])

                if restore_point_in_time:
                    restore_time = time.strftime("%Y:%m:%d %H:%M:%S",
                                                 insert_cutoff_times[1])
                    replace_in_file(
                        os.path.join(node1.get_path(), 'conf',
                                     'commitlog_archiving.properties'),
                        [(r'^restore_point_in_time=.*$',
                          'restore_point_in_time={restore_time}'.format(
                              restore_time=restore_time))])

            debug("Restarting node1..")
            node1.stop()
            node1.start(wait_for_binary_proto=True)

            node1.nodetool('flush')
            node1.nodetool('compact')

            session = self.patient_cql_connection(node1)
            rows = session.execute('SELECT count(*) from ks.cf')
            # Now we should have 30000 rows from the snapshot + 30000 rows
            # from the commitlog backups:
            if not restore_archived_commitlog:
                self.assertEqual(rows[0][0], 30000)
            elif restore_point_in_time:
                self.assertEqual(rows[0][0], 60000)
            else:
                self.assertEqual(rows[0][0], 65000)

        finally:
            # clean up
            debug("removing snapshot_dir: " + ",".join(snapshot_dirs))
            for snapshot_dir in snapshot_dirs:
                shutil.rmtree(snapshot_dir)
            debug("removing snapshot_dir: " +
                  ",".join(system_ks_snapshot_dirs))
            for system_ks_snapshot_dir in system_ks_snapshot_dirs:
                shutil.rmtree(system_ks_snapshot_dir)
            debug("removing snapshot_dir: " +
                  ",".join(system_cfs_snapshot_dirs))
            for system_cfs_snapshot_dir in system_cfs_snapshot_dirs:
                shutil.rmtree(system_cfs_snapshot_dir)
            debug("removing snapshot_dir: " +
                  ",".join(system_ut_snapshot_dirs))
            for system_ut_snapshot_dir in system_ut_snapshot_dirs:
                shutil.rmtree(system_ut_snapshot_dir)
            debug("removing snapshot_dir: " +
                  ",".join(system_col_snapshot_dirs))
            for system_col_snapshot_dir in system_col_snapshot_dirs:
                shutil.rmtree(system_col_snapshot_dir)

            debug("removing tmp_commitlog: " + tmp_commitlog)
            shutil.rmtree(tmp_commitlog)