예제 #1
0
    def abortedcompaction_test(self):
        """
        @jira_ticket CASSANDRA-7066
        @jira_ticket CASSANDRA-11497

        Check that we can cleanup temporary files after a compaction is aborted.
        """
        log_file_name = 'debug.log'
        cluster = self.cluster
        cluster.populate(1).start(wait_for_binary_proto=True)
        node = cluster.nodelist()[0]

        numrecords = 250000

        self._create_data(node, KeyspaceName, TableName, numrecords)
        finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
        self.assertTrue(len(finalfiles) > 0, "Expected to find some final files")
        self.assertEqual(0, len(tmpfiles), "Expected no tmp files")

        t = InterruptCompaction(node, TableName, filename=log_file_name, delay=2)
        t.start()

        try:
            debug("Compacting...")
            node.compact()
        except ToolError:
            pass  # expected to fail

        t.join()

        finalfiles = _normcase_all(self._invoke_sstableutil(KeyspaceName, TableName, type='final'))
        tmpfiles = _normcase_all(self._invoke_sstableutil(KeyspaceName, TableName, type='tmp'))

        # In most cases we should end up with some temporary files to clean up, but it may happen
        # that no temporary files are created if compaction finishes too early or starts too late
        # see CASSANDRA-11497
        debug("Got {} final files and {} tmp files after compaction was interrupted"
              .format(len(finalfiles), len(tmpfiles)))

        self._invoke_sstableutil(KeyspaceName, TableName, cleanup=True)

        self._check_files(node, KeyspaceName, TableName, finalfiles, [])

        # restart to make sure not data is lost
        debug("Restarting node...")
        node.start(wait_for_binary_proto=True)
        # in some environments, a compaction may start that would change sstable files. We should wait if so
        node.wait_for_compactions()

        finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
        self.assertEqual(0, len(tmpfiles))

        debug("Running stress to ensure data is readable")
        self._read_data(node, numrecords)
    def abortedcompaction_test(self):
        """
        @jira_ticket CASSANDRA-7066
        Check we can list the sstable files after aborted compaction (temporary sstable files)
        Then perform a cleanup and verify the temporary files are gone
        """
        log_file_name = 'debug.log'
        cluster = self.cluster
        cluster.populate(1).start(wait_for_binary_proto=True)
        node = cluster.nodelist()[0]

        numrecords = 200000

        self._create_data(node, KeyspaceName, TableName, numrecords)
        finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
        self.assertEqual(0, len(tmpfiles))

        t = InterruptCompaction(node, TableName, filename=log_file_name)
        t.start()

        try:
            node.compact()
            assert False, "Compaction should have failed"
        except NodetoolError:
            pass  # expected to fail

        t.join()

        # should compaction finish before the node is killed, this test would fail,
        # in which case try increasing numrecords
        finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName, finalfiles)
        self.assertTrue(len(tmpfiles) > 0)

        self._invoke_sstableutil(KeyspaceName, TableName, cleanup=True)

        self.assertEqual([], self._invoke_sstableutil(KeyspaceName, TableName, type='tmp'))
        self.assertEqual(finalfiles, self._invoke_sstableutil(KeyspaceName, TableName, type='final'))

        # restart to make sure not data is lost
        node.start(wait_for_binary_proto=True)
        node.watch_log_for("Compacted(.*)%s" % (TableName, ), filename=log_file_name)

        finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
        self.assertEqual(0, len(tmpfiles))

        debug("Run stress to ensure data is readable")
        self._read_data(node, numrecords)
예제 #3
0
    def listfiles_onabortedcompaction_test(self):
        self.skipTest("Feature In Development")
        """
        Check we can list the sstable files after aborted compaction (temporary sstable files)
        """
        cluster = self.cluster
        cluster.populate(1).start(wait_for_binary_proto=True)
        node = cluster.nodelist()[0]

        numrecords = 200000

        self._create_data(node, KeyspaceName, TableName, numrecords)
        finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
        self.assertEqual(0, len(tmpfiles))

        t = InterruptCompaction(node, TableName)
        t.start()

        try:
            node.compact()
            assert False, "Compaction should have failed"
        except NodetoolError:
            pass  # expected to fail

        t.join()

        # should compaction finish before the node is killed, this test would fail,
        # in which case try increasing numrecords
        finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName, finalfiles)
        assert len(tmpfiles) > 0

        # restart and make sure tmp files are gone and the data can be read
        node.start(wait_for_binary_proto=True)
        node.watch_log_for("Compacted(.*)%s" % (TableName, ))

        finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
        self.assertEqual(0, len(tmpfiles))

        debug("Run stress to ensure data is readable")
        self._read_data(node, numrecords)
    def abortedcompaction_test(self):
        """
        @jira_ticket CASSANDRA-7066
        @jira_ticket CASSANDRA-11497

        Check that we can cleanup temporary files after a compaction is aborted.
        """
        log_file_name = 'debug.log'
        cluster = self.cluster
        cluster.populate(1).start(wait_for_binary_proto=True)
        node = cluster.nodelist()[0]

        numrecords = 250000

        self._create_data(node, KeyspaceName, TableName, numrecords)
        finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
        self.assertEqual(0, len(tmpfiles))

        t = InterruptCompaction(node,
                                TableName,
                                filename=log_file_name,
                                delay=2)
        t.start()

        try:
            debug("Compacting...")
            node.compact()
        except NodetoolError:
            pass  # expected to fail

        t.join()

        finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName,
                                                 finalfiles)
        # In most cases we should end up with some temporary files to clean up, but it may happen
        # that no temporary files are created if compaction finishes too early or starts too late
        # see CASSANDRA-11497
        debug(
            "Got {} final files and {} tmp files after compaction was interrupted"
            .format(len(finalfiles), len(tmpfiles)))

        self._invoke_sstableutil(KeyspaceName, TableName, cleanup=True)

        self._check_files(node, KeyspaceName, TableName, finalfiles, [])

        # restart to make sure not data is lost
        debug("Restarting node...")
        node.start(wait_for_binary_proto=True)
        # in some environments, a compaction may start that would change sstable files. We should wait if so
        node.wait_for_compactions()

        finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
        self.assertEqual(0, len(tmpfiles))

        debug("Running stress to ensure data is readable")
        self._read_data(node, numrecords)
예제 #5
0
    def abortedcompaction_test(self):
        """
        @jira_ticket CASSANDRA-7066
        Check we can list the sstable files after aborted compaction (temporary sstable files)
        Then perform a cleanup and verify the temporary files are gone
        """
        log_file_name = 'debug.log'
        cluster = self.cluster
        cluster.populate(1).start(wait_for_binary_proto=True)
        node = cluster.nodelist()[0]

        numrecords = 200000

        self._create_data(node, KeyspaceName, TableName, numrecords)
        finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
        self.assertEqual(0, len(tmpfiles))

        t = InterruptCompaction(node, TableName, filename=log_file_name)
        t.start()

        try:
            node.compact()
            assert False, "Compaction should have failed"
        except NodetoolError:
            pass  # expected to fail

        t.join()

        # should compaction finish before the node is killed, this test would fail,
        # in which case try increasing numrecords
        finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName,
                                                 finalfiles)
        self.assertTrue(len(tmpfiles) > 0)

        self._invoke_sstableutil(KeyspaceName, TableName, cleanup=True)

        self.assertEqual([],
                         self._invoke_sstableutil(KeyspaceName,
                                                  TableName,
                                                  type='tmp'))
        self.assertEqual(
            finalfiles,
            self._invoke_sstableutil(KeyspaceName, TableName, type='final'))

        # restart to make sure not data is lost
        node.start(wait_for_binary_proto=True)
        node.watch_log_for("Compacted(.*)%s" % (TableName, ),
                           filename=log_file_name)

        finalfiles, tmpfiles = self._check_files(node, KeyspaceName, TableName)
        self.assertEqual(0, len(tmpfiles))

        debug("Run stress to ensure data is readable")
        self._read_data(node, numrecords)