def overlapping_data_folders(self):
        """
        @jira_ticket CASSANDRA-10902
        """
        self.cluster.populate(1)
        node1 = self.cluster.nodelist()[0]
        default_path = node1.data_directories()[0]
        node1.set_configuration_options({
            'saved_caches_directory':
            os.path.join(default_path, 'saved_caches')
        })
        remove_perf_disable_shared_mem(node1)
        self.cluster.start(wait_for_binary_proto=True)

        session = self.patient_exclusive_cql_connection(node1)
        session.execute(
            "CREATE KEYSPACE ks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1}"
        )
        session.execute("CREATE TABLE ks.tab (key int PRIMARY KEY, a int)")
        session.execute("INSERT INTO ks.tab (key, a) VALUES (%s, %s)", [0, 0])
        session.execute("SELECT * FROM ks.tab WHERE key = %s", [0])

        cache_service = make_mbean('db', type="Caches")
        with JolokiaAgent(node1) as jmx:
            jmx.execute_method(cache_service, 'saveCaches')

        self.cluster.stop()
        self.cluster.start(wait_for_binary_proto=True)
    def blacklisted_directory_test(self):
        cluster = self.cluster
        cluster.set_datadir_count(3)
        cluster.populate(1)
        [node] = cluster.nodelist()
        remove_perf_disable_shared_mem(node)
        cluster.start(wait_for_binary_proto=True)

        session = self.patient_cql_connection(node)
        self.create_ks(session, 'ks', 1)
        create_c1c2_table(self, session)
        insert_c1c2(session, n=10000)
        node.flush()
        for k in xrange(0, 10000):
            query_c1c2(session, k)

        node.compact()
        mbean = make_mbean('db', type='BlacklistedDirectories')
        with JolokiaAgent(node) as jmx:
            jmx.execute_method(mbean, 'markUnwritable',
                               [os.path.join(node.get_path(), 'data0')])

        for k in xrange(0, 10000):
            query_c1c2(session, k)

        node.nodetool('relocatesstables')

        for k in xrange(0, 10000):
            query_c1c2(session, k)
def table_metric(node, keyspace, table, name):
    version = node.get_cassandra_version()
    typeName = "ColumnFamily" if version <= '2.2.X' else 'Table'
    with JolokiaAgent(node) as jmx:
        mbean = make_mbean('metrics', type=typeName,
                           name=name, keyspace=keyspace, scope=table)
        value = jmx.read_attribute(mbean, 'Value')

    return value
    def _deprecated_repair_jmx(self, method, arguments):
        """
        * Launch a two node, two DC cluster
        * Create a keyspace and table
        * Insert some data
        * Call the deprecated repair JMX API based on the arguments passed into this method
        * Check the node log to see if the correct repair was performed based on the jmx args
        """
        cluster = self.cluster

        debug("Starting cluster..")
        cluster.populate([1, 1])
        node1, node2 = cluster.nodelist()
        remove_perf_disable_shared_mem(node1)
        cluster.start()

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'ks', 2)
        self.create_cf(session,
                       'cf',
                       read_repair=0.0,
                       columns={
                           'c1': 'text',
                           'c2': 'text'
                       })

        insert_c1c2(session, n=1000, consistency=ConsistencyLevel.ALL)

        # Run repair
        mbean = make_mbean('db', 'StorageService')
        with JolokiaAgent(node1) as jmx:
            # assert repair runs and returns valid cmd number
            self.assertEqual(jmx.execute_method(mbean, method, arguments), 1)
        # wait for log to start
        node1.watch_log_for("Starting repair command")
        # get repair parameters from the log
        l = node1.grep_log((
            "Starting repair command #1, repairing keyspace ks with repair options \(parallelism: (?P<parallelism>\w+), primary range: (?P<pr>\w+), "
            "incremental: (?P<incremental>\w+), job threads: (?P<jobs>\d+), ColumnFamilies: (?P<cfs>.+), dataCenters: (?P<dc>.+), "
            "hosts: (?P<hosts>.+), # of ranges: (?P<ranges>\d+)\)"))
        self.assertEqual(len(l), 1)
        line, m = l[0]
        return {
            "parallelism": m.group("parallelism"),
            "primary_range": m.group("pr"),
            "incremental": m.group("incremental"),
            "job_threads": m.group("jobs"),
            "column_families": m.group("cfs"),
            "data_centers": m.group("dc"),
            "hosts": m.group("hosts"),
            "ranges": m.group("ranges")
        }
    def table_metric_mbeans_test(self):
        """
        Test some basic table metric mbeans with simple writes.
        """
        cluster = self.cluster
        cluster.populate(3)
        node1, node2, node3 = cluster.nodelist()
        remove_perf_disable_shared_mem(node1)
        cluster.start(wait_for_binary_proto=True)

        version = cluster.version()
        if version < "2.1":
            node1.stress(
                ['-o', 'insert', '--num-keys=10000', '--replication-factor=3'])
        else:
            node1.stress(
                ['write', 'n=10000', '-schema', 'replication(factor=3)'])

        typeName = "ColumnFamily" if version <= '2.2.X' else 'Table'
        debug('Version {} typeName {}'.format(version, typeName))

        # TODO the keyspace and table name are capitalized in 2.0
        memtable_size = make_mbean('metrics',
                                   type=typeName,
                                   keyspace='keyspace1',
                                   scope='standard1',
                                   name='AllMemtablesHeapSize')
        disk_size = make_mbean('metrics',
                               type=typeName,
                               keyspace='keyspace1',
                               scope='standard1',
                               name='LiveDiskSpaceUsed')
        sstable_count = make_mbean('metrics',
                                   type=typeName,
                                   keyspace='keyspace1',
                                   scope='standard1',
                                   name='LiveSSTableCount')

        with JolokiaAgent(node1) as jmx:
            mem_size = jmx.read_attribute(memtable_size, "Value")
            self.assertGreater(int(mem_size), 10000)

            on_disk_size = jmx.read_attribute(disk_size, "Count")
            self.assertEquals(int(on_disk_size), 0)

            node1.flush()

            on_disk_size = jmx.read_attribute(disk_size, "Count")
            self.assertGreater(int(on_disk_size), 10000)

            sstables = jmx.read_attribute(sstable_count, "Value")
            self.assertGreaterEqual(int(sstables), 1)
    def test_closing_connections(self):
        """
        @jira_ticket CASSANDRA-6546

        Test CASSANDRA-6546 - do connections get closed when disabling / renabling thrift service?
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={
            'start_rpc': 'true',
            'rpc_server_type': 'hsha',
            'rpc_max_threads': 20
        })

        cluster.populate(1)
        (node1, ) = cluster.nodelist()
        remove_perf_disable_shared_mem(node1)
        cluster.start(wait_for_binary_proto=True)

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'test', 1)
        session.execute(
            "CREATE TABLE \"CF\" (key text PRIMARY KEY, val text) WITH COMPACT STORAGE;"
        )

        def make_connection():
            pool = pycassa.ConnectionPool('test', timeout=None)
            cf = pycassa.ColumnFamily(pool, 'CF')
            return pool

        pools = []
        connected_thrift_clients = make_mbean('metrics',
                                              type='Client',
                                              name='connectedThriftClients')
        for i in xrange(10):
            debug("Creating connection pools..")
            for x in xrange(3):
                pools.append(make_connection())
            debug("Disabling/Enabling thrift iteration #{i}".format(i=i))
            node1.nodetool('disablethrift')
            node1.nodetool('enablethrift')
            debug("Closing connections from the client side..")
            for pool in pools:
                pool.dispose()

            with JolokiaAgent(node1) as jmx:
                num_clients = jmx.read_attribute(connected_thrift_clients,
                                                 "Value")
                self.assertEqual(
                    int(num_clients), 0,
                    "There are still open Thrift connections after stopping service"
                )
Beispiel #7
0
    def begin_test(self):
        """
        @jira_ticket CASSANDRA-7436
        This test measures the values of MBeans before and after running a load. We expect
        the values to change a certain way, and thus deem them as 'MBeanEqual','MBeanDecrement',
        'MBeanIncrement', or a constant to experss this expected change. If the value does not reflect
        this expected change, then it raises an AssertionError.

        @jira_ticket CASSANDRA-9448
        This test also makes sure to cover all metrics that were renamed by CASSANDRA-9448, in post 3.0
        we also check that the old alias names are the same as the new names.
        """
        cluster = self.cluster
        cluster.populate(1)
        node = cluster.nodelist()[0]
        remove_perf_disable_shared_mem(node)
        cluster.start(wait_for_binary_proto=True)
        session = self.patient_cql_connection(node)
        self.create_ks(session, 'keyspace1', 1)
        session.execute("""
                        CREATE TABLE keyspace1.counter1 (
                            key blob,
                            column1 ascii,
                            value counter,
                            PRIMARY KEY (key, column1)
                        ) WITH COMPACT STORAGE
                            AND CLUSTERING ORDER BY (column1 ASC)
                            AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}
                            AND comment = ''
                            AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'}
                            AND compression = {}
                            AND dclocal_read_repair_chance = 0.1
                            AND default_time_to_live = 0
                            AND gc_grace_seconds = 864000
                            AND max_index_interval = 2048
                            AND memtable_flush_period_in_ms = 0
                            AND min_index_interval = 128
                            AND read_repair_chance = 0.0
                            AND speculative_retry = 'NONE';
                        """)

        with JolokiaAgent(node) as jmx:
            debug("Cluster version {}".format(cluster.version()))
            if cluster.version() <= '2.2.X':
                mbean_values = MBEAN_VALUES_PRE('keyspace1', 'counter1')
                mbean_aliases = None
            else:
                mbean_values = MBEAN_VALUES_POST('keyspace1', 'counter1')
                mbean_aliases = MBEAN_VALUES_PRE('keyspace1', 'counter1')

            before = []
            for package, bean, bean_args, attribute, expected in mbean_values:
                mbean = make_mbean(package, type=bean, **bean_args)
                debug(mbean)
                before.append(jmx.read_attribute(mbean, attribute))

            if mbean_aliases:
                alias_counter = 0
                for package, bean, bean_args, attribute, expected in mbean_aliases:
                    mbean = make_mbean(package, type=bean, **bean_args)
                    debug(mbean)
                    self.assertEqual(before[alias_counter],
                                     jmx.read_attribute(mbean, attribute))
                    alias_counter += 1

            node.stress(['write', 'n=100K'])

            errors = []
            after = []
            attr_counter = 0
            for package, bean, bean_args, attribute, expected in mbean_values:
                mbean = make_mbean(package, type=bean, **bean_args)
                a_value = jmx.read_attribute(mbean, attribute)
                after.append(a_value)
                b_value = before[attr_counter]
                if expected == 'MBeanIncrement':
                    if b_value >= a_value:
                        errors.append(mbean + " has a before value of " +
                                      str(b_value) + " and after value of " +
                                      str(a_value) + " and did not increment" +
                                      "\n")
                elif expected == 'MBeanDecrement':
                    if b_value <= a_value:
                        errors.append(mbean + " has a before value of " +
                                      str(b_value) + " and after value of " +
                                      str(a_value) + " and did not decrement" +
                                      "\n")
                elif expected == 'MBeanEqual':
                    if b_value != a_value:
                        errors.append(mbean + " has a before value of " +
                                      str(b_value) + " and after value of " +
                                      str(a_value) + ", which are not equal" +
                                      "\n")
                elif expected == 'MBeanZero':
                    if not (b_value == 0 and a_value == 0):
                        errors.append(mbean + " has a before value of " +
                                      str(b_value) + " and after value of " +
                                      str(a_value) +
                                      " and they do not equal zero" + "\n")
                # If expected is none of the above, then expected should be a number.
                else:
                    if a_value != expected:
                        errors.append(mbean + " has an after value of " +
                                      str(a_value) + " which does not equal " +
                                      str(expected) + "\n")
                attr_counter += 1

            self.assertEqual(len(errors), 0, "\n" + "\n".join(errors))

            if mbean_aliases:
                alias_counter = 0
                for package, bean, bean_args, attribute, expected in mbean_aliases:
                    mbean = make_mbean(package, type=bean, **bean_args)
                    self.assertEqual(after[alias_counter],
                                     jmx.read_attribute(mbean, attribute))
                    alias_counter += 1
Beispiel #8
0
    def test_compactionstats(self):
        """
        @jira_ticket CASSANDRA-10504
        @jira_ticket CASSANDRA-10427

        Test that jmx MBean used by nodetool compactionstats
        properly updates the progress of a compaction
        """

        cluster = self.cluster
        cluster.populate(1)
        node = cluster.nodelist()[0]
        remove_perf_disable_shared_mem(node)
        cluster.start(wait_for_binary_proto=True)

        # Run a quick stress command to create the keyspace and table
        node.stress(['write', 'n=1', 'no-warmup'])
        # Disable compaction on the table
        node.nodetool('disableautocompaction keyspace1 standard1')
        node.nodetool('setcompactionthroughput 1')
        node.stress(['write', 'n=150K', 'no-warmup'])
        node.flush()
        # Run a major compaction. This will be the compaction whose
        # progress we track.
        node.nodetool_process('compact')
        # We need to sleep here to give compaction time to start
        # Why not do something smarter? Because if the bug regresses,
        # we can't rely on jmx to tell us that compaction started.
        time.sleep(5)

        compaction_manager = make_mbean('db', type='CompactionManager')
        with JolokiaAgent(node) as jmx:
            progress_string = jmx.read_attribute(compaction_manager,
                                                 'CompactionSummary')[0]

            # Pause in between reads
            # to allow compaction to move forward
            time.sleep(2)

            updated_progress_string = jmx.read_attribute(
                compaction_manager, 'CompactionSummary')[0]

            progress = int(
                re.search('standard1, (\d+)\/', progress_string).groups()[0])
            updated_progress = int(
                re.search('standard1, (\d+)\/',
                          updated_progress_string).groups()[0])

            debug(progress_string)
            debug(updated_progress_string)

            # We want to make sure that the progress is increasing,
            # and that values other than zero are displayed.
            self.assertGreater(updated_progress, progress)
            self.assertGreaterEqual(progress, 0)
            self.assertGreater(updated_progress, 0)

            # Block until the major compaction is complete
            # Otherwise nodetool will throw an exception
            # Give a timeout, in case compaction is broken
            # and never ends.
            start = time.time()
            max_query_timeout = 600
            debug("Waiting for compaction to finish:")
            while (len(
                    jmx.read_attribute(compaction_manager,
                                       'CompactionSummary')) >
                   0) and (time.time() - start < max_query_timeout):
                debug(
                    jmx.read_attribute(compaction_manager,
                                       'CompactionSummary'))
                time.sleep(2)
    def test_upgrade_index_summary(self):
        cluster = self.cluster
        cluster.populate(1)
        node = cluster.nodelist()[0]
        original_install_dir = node.get_install_dir()

        # start out with a 2.0 version
        cluster.set_install_dir(version='2.0.12')
        node.set_install_dir(version='2.0.12')
        node.set_log_level("INFO")
        node.stop()

        remove_perf_disable_shared_mem(node)

        cluster.start()

        # Insert enough partitions to fill a full sample's worth of entries
        # in the index summary.  The default index_interval is 128, so every
        # 128th partition will get an entry in the summary.  The minimal downsampling
        # operation will remove every 128th entry in the summary.  So, we need
        # to have 128 entries in the summary, which means 128 * 128 partitions.
        session = self.patient_cql_connection(node, protocol_version=2)
        session.execute(
            "CREATE KEYSPACE testindexsummary WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}"
        )
        session.set_keyspace("testindexsummary")
        session.execute("CREATE TABLE test (k int PRIMARY KEY, v int)")

        insert_statement = session.prepare(
            "INSERT INTO test (k, v) VALUES (? , ?)")
        execute_concurrent_with_args(session, insert_statement,
                                     [(i, i) for i in range(128 * 128)])

        # upgrade to 2.1.3
        session.cluster.shutdown()
        node.drain()
        node.watch_log_for("DRAINED")
        node.stop()
        cluster.set_install_dir(
            version='2.1.3')  # 2.1.3 is affected by CASSANDRA-8993
        node.set_install_dir(version='2.1.3')
        debug("Set new cassandra dir for %s: %s" %
              (node.name, node.get_install_dir()))

        # setup log4j / logback again (necessary moving from 2.0 -> 2.1)
        node.set_log_level("INFO")

        remove_perf_disable_shared_mem(node)

        node.start()

        session = self.patient_cql_connection(node)

        mbean = make_mbean('db', 'IndexSummaries')
        with JolokiaAgent(node) as jmx:
            avg_interval = jmx.read_attribute(mbean, 'AverageIndexInterval')
            self.assertEqual(128.0, avg_interval)

            # force downsampling of the index summary (if it were allowed)
            jmx.write_attribute(mbean, 'MemoryPoolCapacityInMB', 0)
            jmx.execute_method(mbean, 'redistributeSummaries')

            avg_interval = jmx.read_attribute(mbean, 'AverageIndexInterval')

            # after downsampling, the average interval goes up
            self.assertGreater(avg_interval, 128.0)

        # upgrade to the latest 2.1+ by using the original install dir
        session.cluster.shutdown()
        node.drain()
        node.watch_log_for("DRAINED")
        node.stop()
        cluster.set_install_dir(original_install_dir)
        node.set_install_dir(original_install_dir)
        debug("Set new cassandra dir for %s: %s" %
              (node.name, node.get_install_dir()))

        node.set_log_level("INFO")

        remove_perf_disable_shared_mem(node)

        node.start()

        # on startup, it should detect that the old-format sstable had its
        # index summary downsampled (forcing it to be rebuilt)
        node.watch_log_for("Detected erroneously downsampled index summary")

        session = self.patient_cql_connection(node)

        mbean = make_mbean('db', 'IndexSummaries')
        with JolokiaAgent(node) as jmx:
            avg_interval = jmx.read_attribute(mbean, 'AverageIndexInterval')
            self.assertEqual(128.0, avg_interval)

            # force downsampling of the index summary (if it were allowed)
            jmx.write_attribute(mbean, 'MemoryPoolCapacityInMB', 0)
            jmx.execute_method(mbean, 'redistributeSummaries')

            avg_interval = jmx.read_attribute(mbean, 'AverageIndexInterval')

            # post-8993, it shouldn't allow downsampling of old-format sstables
            self.assertEqual(128.0, avg_interval)
Beispiel #10
0
def commitlog_size(node):
    commitlog_size_mbean = make_mbean('metrics', type='CommitLog', name='TotalCommitLogSize')
    with JolokiaAgent(node) as jmx:
        return jmx.read_attribute(commitlog_size_mbean, 'Value')