def force_test(self):
        """
        forcing an incremental repair should incrementally repair any nodes
        that are up, but should not promote the sstables to repaired
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'num_tokens': 1, 'commitlog_sync_period_in_ms': 500})
        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_exclusive_cql_connection(node3)
        session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}")
        session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
        stmt = SimpleStatement("INSERT INTO ks.tbl (k,v) VALUES (%s, %s)")
        stmt.consistency_level = ConsistencyLevel.ALL
        for i in range(10):
            session.execute(stmt, (i, i))

        node2.stop()

        # repair should fail because node2 is down
        with self.assertRaises(ToolError):
            node1.repair(options=['ks'])

        # run with force flag
        node1.repair(options=['ks', '--force'])

        # ... and verify nothing was promoted to repaired
        self.assertNoRepairedSSTables(node1, 'ks')
        self.assertNoRepairedSSTables(node2, 'ks')
예제 #2
0
    def test_retry_policy_says_retry(self):
        session = self.make_session()
        pool = session._pools.get.return_value
        query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)")
        query.retry_policy = Mock()
        query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETRY, ConsistencyLevel.ONE)
        message = QueryMessage(query=query, consistency_level=ConsistencyLevel.QUORUM)

        rf = ResponseFuture(session, message, query)
        rf.send_request()

        rf.session._pools.get.assert_called_once_with('ip1')
        pool.borrow_connection.assert_called_once_with(timeout=ANY)
        connection = pool.borrow_connection.return_value
        connection.send_msg.assert_called_once_with(rf.message, cb=ANY)

        result = Mock(spec=UnavailableErrorMessage, info={})
        rf._set_result(result)

        session.submit.assert_called_once_with(rf._retry_task, True)
        self.assertEqual(1, rf._query_retries)

        # simulate the executor running this
        rf._retry_task(True)

        # it should try again with the same host since this was
        # an UnavailableException
        rf.session._pools.get.assert_called_with('ip1')
        pool.borrow_connection.assert_called_with(timeout=ANY)
        connection = pool.borrow_connection.return_value
        connection.send_msg.assert_called_with(rf.message, cb=ANY)
    def subrange_test(self):
        """
        running an incremental repair with hosts specified should incrementally repair
        the given nodes, but should not promote the sstables to repaired
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'hinted_handoff_enabled': False,
                                                  'num_tokens': 1,
                                                  'commitlog_sync_period_in_ms': 500,
                                                  'partitioner': 'org.apache.cassandra.dht.Murmur3Partitioner'})
        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_exclusive_cql_connection(node3)
        session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}")
        session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
        stmt = SimpleStatement("INSERT INTO ks.tbl (k,v) VALUES (%s, %s)")
        stmt.consistency_level = ConsistencyLevel.ALL
        for i in range(10):
            session.execute(stmt, (i, i))

        for node in cluster.nodelist():
            node.flush()
            self.assertNoRepairedSSTables(node, 'ks')

        # only repair the partition k=0
        token = Murmur3Token.from_key(str(bytearray([0, 0, 0, 0])))
        # import ipdb; ipdb.set_trace()
        # run with force flag
        node1.repair(options=['ks', '-st', str(token.value - 1), '-et', str(token.value)])

        # verify we have a mix of repaired and unrepaired sstables
        self.assertRepairedAndUnrepaired(node1, 'ks')
        self.assertRepairedAndUnrepaired(node2, 'ks')
        self.assertRepairedAndUnrepaired(node3, 'ks')
예제 #4
0
    def test_ssl_connection(self):
        """
         Test to validate that we are able to connect to a cluster using ssl.

        test_ssl_connection Performs a simple sanity check to ensure that we can connect to a cluster with ssl.


        @since 2.6.0
        @jira_ticket PYTHON-332
        @expected_result we can connect and preform some basic operations

        @test_category connection:ssl
        """

        # Setup temporary keyspace.
        abs_path_ca_cert_path = os.path.abspath(DEFAULT_CLIENT_CA_CERTS)

        self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path,
                                                                               'ssl_version': ssl.PROTOCOL_TLSv1})
        self.session = self.cluster.connect()

        # attempt a few simple commands.
        insert_keyspace = """CREATE KEYSPACE ssltest
            WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'}
            """
        statement = SimpleStatement(insert_keyspace)
        statement.consistency_level = 3
        self.session.execute(statement)

        drop_keyspace = "DROP KEYSPACE ssltest"

        statement = SimpleStatement(drop_keyspace)
        statement.consistency_level = ConsistencyLevel.ANY
        self.session.execute(statement)
예제 #5
0
def find_within_distance_and_color(session, lat, lon, distance, color, fetch_size=20):
    # Find all points of color within a diameter of lat and lon
    query = """SELECT * FROM %s.%s WHERE solr_query='{"q":"color:%s", "fq":"+{!geofilt pt=%s,%s sfield=location d=%s}"}';""" \
            % (KEYSPACE, COLUMN_FAMILY, color, lat, lon, distance)
    statement = SimpleStatement(query)
    statement.fetch_size = fetch_size
    return session.execute(statement)
    def test_repaired_tracking_with_partition_deletes(self):
        """
        check that when an tracking repaired data status following a digest mismatch,
        repaired data mismatches are marked as unconfirmed as we may skip sstables
        after the partition delete are encountered.
        @jira_ticket CASSANDRA-14145
        """
        session, node1, node2 = self.setup_for_repaired_data_tracking()
        stmt = SimpleStatement("INSERT INTO ks.tbl (k, c, v) VALUES (%s, %s, %s)")
        stmt.consistency_level = ConsistencyLevel.ALL
        for i in range(10):
            session.execute(stmt, (i, i, i))

        for node in self.cluster.nodelist():
            node.flush()
            self.assertNoRepairedSSTables(node, 'ks')

        node1.repair(options=['ks'])
        node2.stop(wait_other_notice=True)

        session.execute("delete from ks.tbl where k = 5")

        node1.flush()
        node2.start(wait_other_notice=True)

        # expect unconfirmed inconsistencies as the partition deletes cause some sstables to be skipped
        with JolokiaAgent(node1) as jmx:
            self.query_and_check_repaired_mismatches(jmx, session, "SELECT * FROM ks.tbl WHERE k = 5",
                                                     expect_unconfirmed_inconsistencies=True)
            self.query_and_check_repaired_mismatches(jmx, session, "SELECT * FROM ks.tbl WHERE k = 5 AND c = 5",
                                                     expect_unconfirmed_inconsistencies=True)
            # no digest reads for range queries so blocking read repair metric isn't incremented
            # *all* sstables are read for partition ranges too, and as the repaired set is still in sync there should
            # be no inconsistencies
            self.query_and_check_repaired_mismatches(jmx, session, "SELECT * FROM ks.tbl", expect_read_repair=False)
    def test_add_callbacks(self):
        session = self.make_session()
        query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)")
        query.retry_policy = Mock()
        query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None)
        message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)

        # test errback
        rf = ResponseFuture(session, message, query, 1)
        rf.send_request()

        rf.add_callbacks(
            callback=self.assertEqual, callback_args=([{'col': 'val'}],),
            errback=self.assertIsInstance, errback_args=(Exception,))

        result = Mock(spec=UnavailableErrorMessage, info={})
        rf._set_result(result)
        self.assertRaises(Exception, rf.result)

        # test callback
        rf = ResponseFuture(session, message, query, 1)
        rf.send_request()

        callback = Mock()
        expected_result = [{'col': 'val'}]
        arg = "positional"
        kwargs = {'one': 1, 'two': 2}
        rf.add_callbacks(
            callback=callback, callback_args=(arg,), callback_kwargs=kwargs,
            errback=self.assertIsInstance, errback_args=(Exception,))

        rf._set_result(self.make_mock_response(expected_result))
        self.assertEqual(rf.result(), expected_result)

        callback.assert_called_once_with(expected_result, arg, **kwargs)
예제 #8
0
    def test_routing_key_is_ignored(self):
        """
        Compares the routing key generated by simple partition key using the model with the one generated by the equivalent
        bound statement. It also verifies basic operations work with no routing key
        @since 3.2
        @jira_ticket PYTHON-505
        @expected_result they shouldn't match

        @test_category object_mapper
        """

        prepared = self.session.prepare(
            """
          INSERT INTO {0}.basic_model_no_routing (k, v) VALUES  (?, ?)
          """.format(DEFAULT_KEYSPACE))
        bound = prepared.bind((1, 2))

        mrk = BasicModelNoRouting._routing_key_from_values([1], self.session.cluster.protocol_version)
        simple = SimpleStatement("")
        simple.routing_key = mrk
        self.assertNotEqual(bound.routing_key, simple.routing_key)

        # Verify that basic create, update and delete work with no routing key
        t = BasicModelNoRouting.create(k=2, v=3)
        t.update(v=4).save()
        f = BasicModelNoRouting.objects.filter(k=2).first()
        self.assertEqual(t, f)

        t.delete()
        self.assertEqual(BasicModelNoRouting.objects.count(), 0)
    def test_hosts(self):
        """
        running an incremental repair with hosts specified should incrementally repair
        the given nodes, but should not promote the sstables to repaired
        """
        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
                                                                                     'num_tokens': 1,
                                                                                     'commitlog_sync_period_in_ms': 500})
        self.init_default_config()
        self.cluster.populate(3).start()
        node1, node2, node3 = self.cluster.nodelist()

        session = self.patient_exclusive_cql_connection(node3)
        session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}")
        session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
        stmt = SimpleStatement("INSERT INTO ks.tbl (k,v) VALUES (%s, %s)")
        stmt.consistency_level = ConsistencyLevel.ALL
        for i in range(10):
            session.execute(stmt, (i, i))

        # run with force flag
        node1.repair(options=['ks', '-hosts', ','.join([node1.address(), node2.address()])])

        # ... and verify nothing was promoted to repaired
        self.assertNoRepairedSSTables(node1, 'ks')
        self.assertNoRepairedSSTables(node2, 'ks')
예제 #10
0
    def test_routing_key_generation_complex(self):
        """
        Compares the routing key generated by complex composite partition key using the model with the one generated by the equivalent
        bound statement
        @since 3.2
        @jira_ticket PYTHON-535
        @expected_result they should match

        @test_category object_mapper
        """
        prepared = self.session.prepare(
            """
          INSERT INTO {0}.complex_model_routing (partition, cluster, count, text, float, text_2) VALUES  (?, ?, ?, ?, ?, ?)
          """.format(DEFAULT_KEYSPACE))
        partition = uuid4()
        cluster = 1
        count = 2
        text = "text"
        float = 1.2
        text_2 = "text_2"
        bound = prepared.bind((partition, cluster, count, text, float, text_2))
        mrk = ComplexModelRouting._routing_key_from_values([partition, cluster, text, float], self.session.cluster.protocol_version)
        simple = SimpleStatement("")
        simple.routing_key = mrk
        self.assertEqual(bound.routing_key, simple.routing_key)
    def test_force_with_none_down(self):
        """
        if we force an incremental repair, but all the involved nodes are up, 
        we should run normally and promote sstables afterwards
        """
        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
                                                                                     'num_tokens': 1,
                                                                                     'commitlog_sync_period_in_ms': 500})
        self.init_default_config()
        self.cluster.populate(3).start()
        node1, node2, node3 = self.cluster.nodelist()

        session = self.patient_exclusive_cql_connection(node3)
        session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}")
        session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
        stmt = SimpleStatement("INSERT INTO ks.tbl (k,v) VALUES (%s, %s)")
        stmt.consistency_level = ConsistencyLevel.ALL
        for i in range(10):
            session.execute(stmt, (i, i))

        # run with force flag
        node1.repair(options=['ks', '--force'])

        # ... and verify everything was still promoted
        self.assertAllRepairedSSTables(node1, 'ks')
        self.assertAllRepairedSSTables(node2, 'ks')
        self.assertAllRepairedSSTables(node3, 'ks')
    def test_multiple_errbacks(self):
        session = self.make_session()
        pool = session._pools.get.return_value
        connection = Mock(spec=Connection)
        pool.borrow_connection.return_value = (connection, 1)

        query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)")
        query.retry_policy = Mock()
        query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None)
        message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)

        rf = ResponseFuture(session, message, query, 1)
        rf.send_request()

        callback = Mock()
        arg = "positional"
        kwargs = {'one': 1, 'two': 2}
        rf.add_errback(callback, arg, **kwargs)

        callback2 = Mock()
        arg2 = "another"
        kwargs2 = {'three': 3, 'four': 4}
        rf.add_errback(callback2, arg2, **kwargs2)

        expected_exception = Unavailable("message", 1, 2, 3)
        result = Mock(spec=UnavailableErrorMessage, info={'something': 'here'})
        result.to_exception.return_value = expected_exception
        rf._set_result(result)
        self.assertRaises(Exception, rf.result)

        callback.assert_called_once_with(expected_exception, arg, **kwargs)
        callback2.assert_called_once_with(expected_exception, arg2, **kwargs2)
예제 #13
0
def validate_ssl_options(ssl_options):
        # find absolute path to client CA_CERTS
        tries = 0
        while True:
            if tries > 5:
                raise RuntimeError("Failed to connect to SSL cluster after 5 attempts")
            try:
                cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options=ssl_options)
                session = cluster.connect()
                break
            except Exception:
                ex_type, ex, tb = sys.exc_info()
                log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb)))
                del tb
                tries += 1

        # attempt a few simple commands.
        insert_keyspace = """CREATE KEYSPACE ssltest
            WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'}
            """
        statement = SimpleStatement(insert_keyspace)
        statement.consistency_level = 3
        session.execute(statement)

        drop_keyspace = "DROP KEYSPACE ssltest"
        statement = SimpleStatement(drop_keyspace)
        statement.consistency_level = ConsistencyLevel.ANY
        session.execute(statement)

        cluster.shutdown()
예제 #14
0
    def test_add_callbacks(self):
        session = self.make_session()
        query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)")
        query.retry_policy = Mock()
        query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None)
        message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)

        # test errback
        rf = ResponseFuture(session, message, query)
        rf.send_request()

        rf.add_callbacks(
            callback=self.assertEquals, callback_args=([{'col': 'val'}],),
            errback=self.assertIsInstance, errback_args=(Exception,))

        result = Mock(spec=UnavailableErrorMessage, info={})
        rf._set_result(result)
        self.assertRaises(Exception, rf.result)

        # test callback
        rf = ResponseFuture(session, message, query)
        rf.send_request()

        rf.add_callbacks(
            callback=self.assertEquals, callback_args=([{'col': 'val'}],),
            errback=self.assertIsInstance, errback_args=(Exception,))

        response = Mock(spec=ResultMessage, kind=ResultMessage.KIND_ROWS, results=[{'col': 'val'}])
        rf._set_result(response)
        self.assertEqual(rf.result(), [{'col': 'val'}])
예제 #15
0
def find_within_distance(session, lat, lon, distance, fetch_size=20):
    # Find all points within a diameter of lat and lon
    # http://localhost:8983/solr/geo.geo/select?wt=json&indent=true&q=*:*&fq={!geofilt%20pt=37.7752,-122.4232%20sfield=location%20d=5000}
    query = """SELECT * FROM %s.%s WHERE solr_query='{"q":"*:*", "fq":"{!geofilt pt=%s,%s sfield=location d=%s}"}';""" \
            % (KEYSPACE, COLUMN_FAMILY, lat, lon, distance)

    statement = SimpleStatement(query)
    statement.fetch_size = fetch_size
    return session.execute(statement)
예제 #16
0
def find_within_distance_sorted(session, lat, lon, distance, fetch_size=20):
    # Find all points of color within a diameter of lat and lon
    # Not supported yet via CQL, need to use HTTP interface (DSP-5975)
    # http://localhost:8983/solr/geo.geo/select?wt=json&indent=true&fl=key,color&q=*:*&sfield=location&pt=37.7752,-122.4232&sort=geodist()%20asc&fl=_dist_:geodist(),key,color
    query = """SELECT * FROM %s.%s WHERE solr_query='{"q":"*:*", "fq":"+{!geofilt pt=%s,%s sfield=location d=%s}", "sort":"geodist(location,%s,%s) asc"}';""" \
            % (KEYSPACE, COLUMN_FAMILY, lat, lon, distance, lat, lon)
    statement = SimpleStatement(query)
    statement.fetch_size = fetch_size
    return session.execute(statement)
예제 #17
0
def _execute_statement(model, statement, consistency_level, timeout):
    params = statement.get_context()
    s = SimpleStatement(str(statement), consistency_level=consistency_level, fetch_size=statement.fetch_size)
    if model._partition_key_index:
        key_values = statement.partition_key_values(model._partition_key_index)
        if not any(v is None for v in key_values):
            parts = model._routing_key_from_values(key_values, connection.get_cluster().protocol_version)
            s.routing_key = parts
            s.keyspace = model._get_keyspace()
    return connection.execute(s, params, timeout=timeout)
    def test_repaired_tracking_with_mismatching_replicas(self):
        """
        verify that when replicas have different repaired sets, this can be detected via the digests
        computed at read time. All nodes have start with the same data, but only 1 replica's sstables
        are marked repaired. Then a divergence is introduced by overwriting on 1 replica only, which
        is required to trigger a digest mismatch & full data read (for single partition reads).
        As the repaired sets are different between the replicas, but no other shortcutting occurs
        (no partition tombstones or sstable skipping) and no sstables are involved in pending repair
        session, we expect confirmed inconsistencies to be reported.
        there are two variants of this, for single partition slice & names reads and range reads
        @jira_ticket CASSANDRA-14145
        """
        session, node1, node2 = self.setup_for_repaired_data_tracking()
        stmt = SimpleStatement("INSERT INTO ks.tbl (k, c, v) VALUES (%s, %s, %s)")
        stmt.consistency_level = ConsistencyLevel.ALL
        for i in range(10):
            session.execute(stmt, (i, i, i))

        for node in self.cluster.nodelist():
            node.flush()

        for i in range(10,20):
            session.execute(stmt, (i, i, i))

        for node in self.cluster.nodelist():
            node.flush()
            self.assertNoRepairedSSTables(node, 'ks')

        # stop node 2 and mark its sstables repaired
        node2.stop(wait_other_notice=True)
        node2.run_sstablerepairedset(keyspace='ks')
        # before restarting node2 overwrite some data on node1 to trigger digest mismatches
        session.execute("insert into ks.tbl (k, c, v) values (5, 5, 55)")
        node2.start(wait_for_binary_proto=True)

        out1 = node1.run_sstablemetadata(keyspace='ks').stdout
        out2 = node2.run_sstablemetadata(keyspace='ks').stdout

        # verify the repaired at times for the sstables on node1/node2
        assert all(t == 0 for t in [int(x) for x in [y.split(' ')[0] for y in findall('(?<=Repaired at: ).*', out1)]])
        assert all(t > 0 for t in [int(x) for x in [y.split(' ')[0] for y in findall('(?<=Repaired at: ).*', out2)]])

        # we expect inconsistencies due to sstables being marked repaired on one replica only
        # these are marked confirmed because no sessions are pending & all sstables are
        # skipped due to partition deletes
        with JolokiaAgent(node1) as jmx:
            self.query_and_check_repaired_mismatches(jmx, session, "SELECT * FROM ks.tbl WHERE k = 5",
                                                     expect_confirmed_inconsistencies=True)
            self.query_and_check_repaired_mismatches(jmx, session, "SELECT * FROM ks.tbl WHERE k = 5 AND c = 5",
                                                     expect_confirmed_inconsistencies=True)
            # no digest reads for range queries so read repair metric isn't incremented
            self.query_and_check_repaired_mismatches(jmx, session, "SELECT * FROM ks.tbl", expect_read_repair=False)
예제 #19
0
    def test_can_connect_with_ssl_client_auth(self):
        """
        Test to validate that we can connect to a C* cluster that has client_auth enabled.

        This test will setup and use a c* cluster that has client authentication enabled. It will then attempt
        to connect using valid client keys, and certs (that are in the server's truststore), and attempt to preform some
        basic operations
        @since 2.7.0

        @expected_result The client can connect via SSL and preform some basic operations

        @test_category connection:ssl
        """

        # Need to get absolute paths for certs/key
        abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS)
        abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE)
        abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE)

        tries = 0
        while True:
            if tries > 5:
                raise RuntimeError("Failed to connect to SSL cluster after 5 attempts")
            try:
                cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path,
                                                                                  'ssl_version': ssl.PROTOCOL_TLSv1,
                                                                                  'keyfile': abs_driver_keyfile,
                                                                                  'certfile': abs_driver_certfile})

                session = cluster.connect()
                break
            except Exception:
                ex_type, ex, tb = sys.exc_info()
                log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb)))
                del tb
                tries += 1

        # attempt a few simple commands.

        insert_keyspace = """CREATE KEYSPACE ssltest
            WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'}
            """
        statement = SimpleStatement(insert_keyspace)
        statement.consistency_level = 3
        session.execute(statement)

        drop_keyspace = "DROP KEYSPACE ssltest"
        statement = SimpleStatement(drop_keyspace)
        statement.consistency_level = ConsistencyLevel.ANY
        session.execute(statement)

        cluster.shutdown()
예제 #20
0
    def test_write_timeout_error_message(self):
        session = self.make_session()
        query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)")
        query.retry_policy = Mock()
        query.retry_policy.on_write_timeout.return_value = (RetryPolicy.RETHROW, None)
        message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)

        rf = ResponseFuture(session, message, query)
        rf.send_request()

        result = Mock(spec=WriteTimeoutErrorMessage, info={})
        rf._set_result(result)
        self.assertRaises(Exception, rf.result)
예제 #21
0
    def test_retry_policy_says_ignore(self):
        session = self.make_session()
        query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)")
        query.retry_policy = Mock()
        query.retry_policy.on_unavailable.return_value = (RetryPolicy.IGNORE, None)
        message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)

        rf = ResponseFuture(session, message, query)
        rf.send_request()

        result = Mock(spec=UnavailableErrorMessage, info={})
        rf._set_result(result)
        self.assertEqual(None, rf.result())
예제 #22
0
def execute_unlimited_query(stream_key, cols, time_bin, time_range, session=None, prepared=None,
                            query_consistency=None):

    base = ("select %s from %s where subsite=%%s and node=%%s and sensor=%%s and bin=%%s " + \
            "and method=%%s and time>=%%s and time<=%%s") % (','.join(cols), stream_key.stream.name)
    query = SimpleStatement(base)
    query.consistency_level = query_consistency
    return list(session.execute(query, (stream_key.subsite,
                                        stream_key.node,
                                        stream_key.sensor,
                                        time_bin,
                                        stream_key.method,
                                        time_range.start,
                                        time_range.stop)))
    def test_read_timeout_error_message(self):
        session = self.make_session()
        query = SimpleStatement("SELECT * FROM foo")
        query.retry_policy = Mock()
        query.retry_policy.on_read_timeout.return_value = (RetryPolicy.RETHROW, None)
        message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)

        rf = ResponseFuture(session, message, query, 1)
        rf.send_request()

        result = Mock(spec=ReadTimeoutErrorMessage, info={})
        rf._set_result(result)

        self.assertRaises(Exception, rf.result)
    def test_unavailable_error_message(self):
        session = self.make_session()
        query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)")
        query.retry_policy = Mock()
        query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None)
        message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)

        rf = ResponseFuture(session, message, query, 1)
        rf._query_retries = 1
        rf.send_request()

        result = Mock(spec=UnavailableErrorMessage, info={"required_replicas":2, "alive_replicas": 1, "consistency": 1})
        rf._set_result(None, None, None, result)
        self.assertRaises(Exception, rf.result)
예제 #25
0
        def paged_query_generator(
            cql_query,
            django_query
        ):
            statement = SimpleStatement(
                str(cql_query._select_query()),
                consistency_level=ConsistencyLevel.ONE
            )

            if (
                hasattr(
                    django_query,
                    'cassandra_meta'
                ) and None is not django_query.cassandra_meta and
                hasattr(
                    django_query.cassandra_meta,
                    'fetch_size'
                )
            ):
                statement.fetch_size = django_query.cassandra_meta.fetch_size

            parameters = {}
            for where in cql_query._where:
                if isinstance(where.value, Token):
                    value = where.value.value
                    if 1 == len(value):
                        value = value[0]

                else:
                    value = where.value

                parameters[
                    str(where.query_value.context_id)
                ] = value

            django_query.connection.session.row_factory = (
                ordered_dict_factory
            )

            results = django_query.connection.session.execute(
                statement,
                parameters
            )

            for row in results:
                for key, value in cql_query._deferred_values.iteritems():
                    row[key] = value

                yield row
예제 #26
0
    def _perform_cql_statement(self, text, consistency_level, expected_exception):
        """
        Simple helper method to preform cql statements and check for expected exception
        @param text CQl statement to execute
        @param consistency_level Consistency level at which it is to be executed
        @param expected_exception Exception expected to be throw or none
        """
        statement = SimpleStatement(text)
        statement.consistency_level = consistency_level

        if expected_exception is None:
            self.execute_helper(self.session, statement)
        else:
            with self.assertRaises(expected_exception):
                self.execute_helper(self.session, statement)
예제 #27
0
    def test_can_connect_with_ssl_ca(self):
        """
        Test to validate that we are able to connect to a cluster using ssl.

        test_can_connect_with_ssl_ca performs a simple sanity check to ensure that we can connect to a cluster with ssl
        authentication via simple server-side shared certificate authority. The client is able to validate the identity
        of the server, however by using this method the server can't trust the client unless additional authentication
        has been provided.

        @since 2.6.0
        @jira_ticket PYTHON-332
        @expected_result The client can connect via SSL and preform some basic operations

        @test_category connection:ssl
        """

        # Setup temporary keyspace.
        abs_path_ca_cert_path = os.path.abspath(DEFAULT_CLIENT_CA_CERTS)

        tries = 0
        while True:
            if tries > 5:
                raise RuntimeError("Failed to connect to SSL cluster after 5 attempts")
            try:
                cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path,
                                                                                  'ssl_version': ssl.PROTOCOL_TLSv1})
                session = cluster.connect()
                break
            except Exception:
                ex_type, ex, tb = sys.exc_info()
                log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb)))
                del tb
                tries += 1

        # attempt a few simple commands.
        insert_keyspace = """CREATE KEYSPACE ssltest
            WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'}
            """
        statement = SimpleStatement(insert_keyspace)
        statement.consistency_level = 3
        session.execute(statement)

        drop_keyspace = "DROP KEYSPACE ssltest"
        statement = SimpleStatement(drop_keyspace)
        statement.consistency_level = ConsistencyLevel.ANY
        session.execute(statement)

        cluster.shutdown()
예제 #28
0
    def test_errback(self):
        session = self.make_session()
        query = SimpleStatement("INSERT INFO foo (a, b) VALUES (1, 2)")
        query.retry_policy = Mock()
        query.retry_policy.on_unavailable.return_value = (RetryPolicy.RETHROW, None)
        message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)

        rf = ResponseFuture(session, message, query)
        rf.send_request()

        rf.add_errback(self.assertIsInstance, Exception)

        result = Mock(spec=UnavailableErrorMessage, info={})
        rf._set_result(result)
        self.assertRaises(Exception, rf.result)

        # this should get called immediately now that the error is set
        rf.add_errback(self.assertIsInstance, Exception)
    def test_repaired_tracking_with_varying_sstable_sets(self):
        """
        verify that repaired data digests are computed over the merged data for each replica
        and that the particular number of sstables on each doesn't affect the comparisons
        both replicas start with the same repaired set, comprising 2 sstables. node1's is
        then compacted and additional unrepaired data added (which overwrites some in the
        repaired set). We expect the repaired digests to still match as the tracking will
        force all sstables containing the partitions to be read
        there are two variants of this, for single partition slice & names reads and range reads
        @jira_ticket CASSANDRA-14145
        """
        session, node1, node2 = self.setup_for_repaired_data_tracking()
        stmt = SimpleStatement("INSERT INTO ks.tbl (k, c, v) VALUES (%s, %s, %s)")
        stmt.consistency_level = ConsistencyLevel.ALL
        for i in range(10):
            session.execute(stmt, (i, i, i))

        for node in self.cluster.nodelist():
            node.flush()

        for i in range(10,20):
            session.execute(stmt, (i, i, i))

        for node in self.cluster.nodelist():
            node.flush()
            self.assertNoRepairedSSTables(node, 'ks')

        node1.repair(options=['ks'])
        node2.stop(wait_other_notice=True)

        session.execute("insert into ks.tbl (k, c, v) values (5, 5, 55)")
        session.execute("insert into ks.tbl (k, c, v) values (15, 15, 155)")
        node1.flush()
        node1.compact()
        node1.compact()
        node2.start(wait_other_notice=True)

        # we don't expect any inconsistencies as all repaired data is read on both replicas
        with JolokiaAgent(node1) as jmx:
            self.query_and_check_repaired_mismatches(jmx, session, "SELECT * FROM ks.tbl WHERE k = 5")
            self.query_and_check_repaired_mismatches(jmx, session, "SELECT * FROM ks.tbl WHERE k = 5 AND c = 5")
            # no digest reads for range queries so read repair metric isn't incremented
            self.query_and_check_repaired_mismatches(jmx, session, "SELECT * FROM ks.tbl", expect_read_repair=False)
예제 #30
0
    def test_routing_key_generation_multi(self):
        """
        Compares the routing key generated by composite partition key using the model with the one generated by the equivalent
        bound statement
        @since 3.2
        @jira_ticket PYTHON-535
        @expected_result they should match

        @test_category object_mapper
        """

        prepared = self.session.prepare(
            """
          INSERT INTO {0}.basic_model_routing_multi (k, v) VALUES  (?, ?)
          """.format(DEFAULT_KEYSPACE))
        bound = prepared.bind((1, 2))
        mrk = BasicModelMulti._routing_key_from_values([1, 2], self.session.cluster.protocol_version)
        simple = SimpleStatement("")
        simple.routing_key = mrk
        self.assertEqual(bound.routing_key, simple.routing_key)
예제 #31
0
    def _empty_vs_gcable_no_repair(self, sequential):
        """
        Repairing empty partition and tombstoned partition older than gc grace
        should be treated as the same and no repair is necessary.
        @jira_ticket CASSANDRA-8979.
        """
        cluster = self.cluster
        cluster.populate(2)
        cluster.set_configuration_options(
            values={'hinted_handoff_enabled': False})
        cluster.set_batch_commitlog(enabled=True)
        cluster.start()
        node1, node2 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        # create keyspace with RF=2 to be able to be repaired
        self.create_ks(session, 'ks', 2)
        # we create two tables, one has low gc grace seconds so that the data
        # can be dropped during test (but we don't actually drop them).
        # the other has default gc.
        # compaction is disabled not to purge data
        query = """
            CREATE TABLE cf1 (
                key text,
                c1 text,
                c2 text,
                PRIMARY KEY (key, c1)
            )
            WITH gc_grace_seconds=1
            AND compaction = {'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'};
        """
        session.execute(query)
        time.sleep(.5)
        query = """
            CREATE TABLE cf2 (
                key text,
                c1 text,
                c2 text,
                PRIMARY KEY (key, c1)
            )
            WITH compaction = {'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'};
        """
        session.execute(query)
        time.sleep(.5)

        # take down node2, so that only node1 has gc-able data
        node2.stop(wait_other_notice=True)
        for cf in ['cf1', 'cf2']:
            # insert some data
            for i in xrange(0, 10):
                for j in xrange(0, 1000):
                    query = SimpleStatement(
                        "INSERT INTO {} (key, c1, c2) VALUES ('k{}', 'v{}', 'value')"
                        .format(cf, i, j),
                        consistency_level=ConsistencyLevel.ONE)
                    session.execute(query)
            node1.flush()
            # delete those data, half with row tombstone, and the rest with cell range tombstones
            for i in xrange(0, 5):
                query = SimpleStatement(
                    "DELETE FROM {} WHERE key='k{}'".format(cf, i),
                    consistency_level=ConsistencyLevel.ONE)
                session.execute(query)
            node1.flush()
            for i in xrange(5, 10):
                for j in xrange(0, 1000):
                    query = SimpleStatement(
                        "DELETE FROM {} WHERE key='k{}' AND c1='v{}'".format(
                            cf, i, j),
                        consistency_level=ConsistencyLevel.ONE)
                    session.execute(query)
            node1.flush()

        # sleep until gc grace seconds pass so that cf1 can be dropped
        time.sleep(2)

        # bring up node2 and repair
        node2.start(wait_for_binary_proto=True, wait_other_notice=True)
        node2.repair(
            _repair_options(self.cluster.version(),
                            ks='ks',
                            sequential=sequential))

        # check no rows will be returned
        for cf in ['cf1', 'cf2']:
            for i in xrange(0, 10):
                query = SimpleStatement(
                    "SELECT c1, c2 FROM {} WHERE key='k{}'".format(cf, i),
                    consistency_level=ConsistencyLevel.ALL)
                res = list(session.execute(query))
                self.assertEqual(len(filter(lambda x: len(x) != 0, res)), 0,
                                 res)

        # check log for no repair happened for gcable data
        out_of_sync_logs = node2.grep_log(
            "/([0-9.]+) and /([0-9.]+) have ([0-9]+) range\(s\) out of sync for cf1"
        )
        self.assertEqual(
            len(out_of_sync_logs), 0,
            "GC-able data does not need to be repaired with empty data: " +
            str([elt[0] for elt in out_of_sync_logs]))
        # check log for actual repair for non gcable data
        out_of_sync_logs = node2.grep_log(
            "/([0-9.]+) and /([0-9.]+) have ([0-9]+) range\(s\) out of sync for cf2"
        )
        self.assertGreater(len(out_of_sync_logs), 0,
                           "Non GC-able data should be repaired")
    def consistent_repair_test(self):
        cluster = self.cluster
        cluster.set_configuration_options(
            values={
                'hinted_handoff_enabled': False,
                'num_tokens': 1,
                'commitlog_sync_period_in_ms': 500
            })
        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()

        # make data inconsistent between nodes
        session = self.patient_exclusive_cql_connection(node3)
        session.execute(
            "CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}"
        )
        session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
        stmt = SimpleStatement("INSERT INTO ks.tbl (k,v) VALUES (%s, %s)")
        stmt.consistency_level = ConsistencyLevel.ALL
        for i in range(10):
            session.execute(stmt, (i, i))
        node3.flush()
        time.sleep(1)
        node3.stop(gently=False)
        stmt.consistency_level = ConsistencyLevel.QUORUM

        session = self.exclusive_cql_connection(node1)
        for i in range(10):
            session.execute(stmt, (i + 10, i + 10))
        node1.flush()
        time.sleep(1)
        node1.stop(gently=False)
        node3.start(wait_other_notice=True, wait_for_binary_proto=True)
        session = self.exclusive_cql_connection(node2)
        for i in range(10):
            session.execute(stmt, (i + 20, i + 20))
        node1.start(wait_other_notice=True, wait_for_binary_proto=True)

        # flush and check that no sstables are marked repaired
        for node in cluster.nodelist():
            node.flush()
            self.assertNoRepairedSSTables(node, 'ks')
            session = self.patient_exclusive_cql_connection(node)
            results = list(session.execute("SELECT * FROM system.repairs"))
            self.assertEqual(len(results), 0, str(results))

        # disable compaction so we can verify sstables are marked pending repair
        for node in cluster.nodelist():
            node.nodetool('disableautocompaction ks tbl')

        node1.repair(options=['ks'])

        # check that all participating nodes have the repair recorded in their system
        # table, that all nodes are listed as participants, and that all sstables are
        # (still) marked pending repair
        expected_participants = {n.address() for n in cluster.nodelist()}
        recorded_pending_ids = set()
        for node in cluster.nodelist():
            session = self.patient_exclusive_cql_connection(node)
            results = list(session.execute("SELECT * FROM system.repairs"))
            self.assertEqual(len(results), 1)
            result = results[0]
            self.assertEqual(set(result.participants), expected_participants)
            self.assertEqual(result.state, ConsistentState.FINALIZED,
                             "4=FINALIZED")
            pending_id = result.parent_id
            self.assertAllPendingRepairSSTables(node, 'ks', pending_id)
            recorded_pending_ids.add(pending_id)

        self.assertEqual(len(recorded_pending_ids), 1)

        # sstables are compacted out of pending repair by a compaction
        # task, we disabled compaction earlier in the test, so here we
        # force the compaction and check that all sstables are promoted
        for node in cluster.nodelist():
            node.nodetool('compact ks tbl')
            self.assertAllRepairedSSTables(node, 'ks')
예제 #33
0
def main():
    cluster = Cluster(['127.0.0.1'], port=9042)
    session = cluster.connect()

    log.info("creating keyspace...")
    session.execute("""
        CREATE KEYSPACE IF NOT EXISTS %s
        WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '2' }
        """ % KEYSPACE)

    log.info("setting keyspace...")
    session.set_keyspace(KEYSPACE)

    session.execute("""DROP TABLE  mytable""")

    log.info("creating table...")
    session.execute("""
        CREATE TABLE IF NOT EXISTS mytable (
            event_number                                       text,
            date_time                                          text,
            address_rounded_to_block_number_or_intersection    text,
            patrol_beat                                        text,
            incident_type                                      text,
            incident_type_description                          text,
            priority                                            int,
            time                                               time,
            hour                                               text,
            priority_hour                                      text,
            PRIMARY KEY (event_number)
        )
        """)

    query = SimpleStatement("""
        INSERT INTO mytable (event_number, date_time, address_rounded_to_block_number_or_intersection, 
                            patrol_beat, incident_type, incident_type_description, 
                            priority, time, hour, priority_hour)

        VALUES (%(event_number)s, %(date_time)s, %(address_rounded_to_block_number_or_intersection)s,
                %(patrol_beat)s, %(incident_type)s, %(incident_type_description)s,
                %(priority)s, %(time)s, %(hour)s, %(priority_hour)s)
        """, consistency_level=ConsistencyLevel.ONE)

    prepared = session.prepare("""
        INSERT INTO mytable (event_number, date_time, address_rounded_to_block_number_or_intersection, 
                            patrol_beat, incident_type, incident_type_description, 
                            priority, time, hour, priority_hour)
        VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
        """)

    import pandas as pd
    data = pd.read_csv('oak-crimes-for-cassandra.csv').dropna().sample(10000, random_state=42)


    from tqdm import tqdm
    
    for i, row in tqdm(data.iterrows()):
        # log.info("inserting row %d" % i)
        # log.info(tuple(row))
        # session.execute(query, dict(key="key%d" % i, a='a', b='b'))
        session.execute(prepared, tuple(row))

    future = session.execute_async("SELECT * FROM mytable")
    # log.info("key\tcol1\tcol2")
    # log.info("---\t----\t----")

    try:
        rows = future.result()
    except Exception:
        log.exception("Error reading rows:")
        return

    for row in rows:
        log.info(row)
예제 #34
0
def select_all():
    statement = SimpleStatement("SELECT * FROM person")
    result = session.execute(statement)
    return result
예제 #35
0
    def test_mv_filtering(self):
        """
        Test to ensure that cql filtering where clauses are properly supported in the python driver.

        test_mv_filtering Tests that various complex MV where clauses produce the correct results. It also validates that
        these results and the grammar is supported appropriately.

        @since 3.0.0
        @jira_ticket PYTHON-399
        @expected_result Materialized view where clauses should produce the appropriate results.

        @test_category materialized_view
        """
        create_table = """CREATE TABLE {0}.scores(
                        user TEXT,
                        game TEXT,
                        year INT,
                        month INT,
                        day INT,
                        score INT,
                        PRIMARY KEY (user, game, year, month, day)
                        )""".format(self.keyspace_name)

        self.session.execute(create_table)

        create_mv_alltime = """CREATE MATERIALIZED VIEW {0}.alltimehigh AS
                        SELECT * FROM {0}.scores
                        WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL
                        PRIMARY KEY (game, score, user, year, month, day)
                        WITH CLUSTERING ORDER BY (score DESC)""".format(
            self.keyspace_name)

        create_mv_dailyhigh = """CREATE MATERIALIZED VIEW {0}.dailyhigh AS
                        SELECT * FROM {0}.scores
                        WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL
                        PRIMARY KEY ((game, year, month, day), score, user)
                        WITH CLUSTERING ORDER BY (score DESC)""".format(
            self.keyspace_name)

        create_mv_monthlyhigh = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS
                        SELECT * FROM {0}.scores
                        WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL
                        PRIMARY KEY ((game, year, month), score, user, day)
                        WITH CLUSTERING ORDER BY (score DESC)""".format(
            self.keyspace_name)

        create_mv_filtereduserhigh = """CREATE MATERIALIZED VIEW {0}.filtereduserhigh AS
                        SELECT * FROM {0}.scores
                        WHERE user in ('jbellis', 'pcmanus') AND game IS NOT NULL AND score IS NOT NULL AND year is NOT NULL AND day is not NULL and month IS NOT NULL
                        PRIMARY KEY (game, score, user, year, month, day)
                        WITH CLUSTERING ORDER BY (score DESC)""".format(
            self.keyspace_name)

        self.session.execute(create_mv_alltime)
        self.session.execute(create_mv_dailyhigh)
        self.session.execute(create_mv_monthlyhigh)
        self.session.execute(create_mv_filtereduserhigh)

        prepared_insert = self.session.prepare(
            """INSERT INTO {0}.scores (user, game, year, month, day, score) VALUES  (?, ?, ? ,? ,?, ?)"""
            .format(self.keyspace_name))

        bound = prepared_insert.bind(('pcmanus', 'Coup', 2015, 5, 1, 4000))
        self.session.execute(bound)
        bound = prepared_insert.bind(('jbellis', 'Coup', 2015, 5, 3, 1750))
        self.session.execute(bound)
        bound = prepared_insert.bind(('yukim', 'Coup', 2015, 5, 3, 2250))
        self.session.execute(bound)
        bound = prepared_insert.bind(('tjake', 'Coup', 2015, 5, 3, 500))
        self.session.execute(bound)
        bound = prepared_insert.bind(('iamaleksey', 'Coup', 2015, 6, 1, 2500))
        self.session.execute(bound)
        bound = prepared_insert.bind(('tjake', 'Coup', 2015, 6, 2, 1000))
        self.session.execute(bound)
        bound = prepared_insert.bind(('pcmanus', 'Coup', 2015, 6, 2, 2000))
        self.session.execute(bound)
        bound = prepared_insert.bind(('jmckenzie', 'Coup', 2015, 6, 9, 2700))
        self.session.execute(bound)
        bound = prepared_insert.bind(('jbellis', 'Coup', 2015, 6, 20, 3500))
        self.session.execute(bound)
        bound = prepared_insert.bind(
            ('jbellis', 'Checkers', 2015, 6, 20, 1200))
        self.session.execute(bound)
        bound = prepared_insert.bind(('jbellis', 'Chess', 2015, 6, 21, 3500))
        self.session.execute(bound)
        bound = prepared_insert.bind(('pcmanus', 'Chess', 2015, 1, 25, 3200))
        self.session.execute(bound)

        # Test simple statement and alltime high filtering
        query_statement = SimpleStatement(
            "SELECT * FROM {0}.alltimehigh WHERE game='Coup'".format(
                self.keyspace_name),
            consistency_level=ConsistencyLevel.QUORUM)
        results = self.session.execute(query_statement)
        self.assertEquals(results[0].game, 'Coup')
        self.assertEquals(results[0].year, 2015)
        self.assertEquals(results[0].month, 5)
        self.assertEquals(results[0].day, 1)
        self.assertEquals(results[0].score, 4000)
        self.assertEquals(results[0].user, "pcmanus")

        # Test prepared statement and daily high filtering
        prepared_query = self.session.prepare(
            "SELECT * FROM {0}.dailyhigh WHERE game=? AND year=? AND month=? and day=?"
            .format(self.keyspace_name))
        bound_query = prepared_query.bind(("Coup", 2015, 6, 2))
        results = self.session.execute(bound_query)
        self.assertEquals(results[0].game, 'Coup')
        self.assertEquals(results[0].year, 2015)
        self.assertEquals(results[0].month, 6)
        self.assertEquals(results[0].day, 2)
        self.assertEquals(results[0].score, 2000)
        self.assertEquals(results[0].user, "pcmanus")

        self.assertEquals(results[1].game, 'Coup')
        self.assertEquals(results[1].year, 2015)
        self.assertEquals(results[1].month, 6)
        self.assertEquals(results[1].day, 2)
        self.assertEquals(results[1].score, 1000)
        self.assertEquals(results[1].user, "tjake")

        # Test montly high range queries
        prepared_query = self.session.prepare(
            "SELECT * FROM {0}.monthlyhigh WHERE game=? AND year=? AND month=? and score >= ? and score <= ?"
            .format(self.keyspace_name))
        bound_query = prepared_query.bind(("Coup", 2015, 6, 2500, 3500))
        results = self.session.execute(bound_query)
        self.assertEquals(results[0].game, 'Coup')
        self.assertEquals(results[0].year, 2015)
        self.assertEquals(results[0].month, 6)
        self.assertEquals(results[0].day, 20)
        self.assertEquals(results[0].score, 3500)
        self.assertEquals(results[0].user, "jbellis")

        self.assertEquals(results[1].game, 'Coup')
        self.assertEquals(results[1].year, 2015)
        self.assertEquals(results[1].month, 6)
        self.assertEquals(results[1].day, 9)
        self.assertEquals(results[1].score, 2700)
        self.assertEquals(results[1].user, "jmckenzie")

        self.assertEquals(results[2].game, 'Coup')
        self.assertEquals(results[2].year, 2015)
        self.assertEquals(results[2].month, 6)
        self.assertEquals(results[2].day, 1)
        self.assertEquals(results[2].score, 2500)
        self.assertEquals(results[2].user, "iamaleksey")

        # Test filtered user high scores
        query_statement = SimpleStatement(
            "SELECT * FROM {0}.filtereduserhigh WHERE game='Chess'".format(
                self.keyspace_name),
            consistency_level=ConsistencyLevel.QUORUM)
        results = self.session.execute(query_statement)
        self.assertEquals(results[0].game, 'Chess')
        self.assertEquals(results[0].year, 2015)
        self.assertEquals(results[0].month, 6)
        self.assertEquals(results[0].day, 21)
        self.assertEquals(results[0].score, 3500)
        self.assertEquals(results[0].user, "jbellis")

        self.assertEquals(results[1].game, 'Chess')
        self.assertEquals(results[1].year, 2015)
        self.assertEquals(results[1].month, 1)
        self.assertEquals(results[1].day, 25)
        self.assertEquals(results[1].score, 3200)
        self.assertEquals(results[1].user, "pcmanus")
예제 #36
0
from cassandra import ConsistencyLevel
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement

cluster = Cluster(['127.0.0.1'])
connection = cluster.connect()
createFile = 'create.cql'
with open(createFile, mode='r') as f:
    lines = f.read()
    requests = lines.split(';')
    for request in requests:
        if request != '\n\n':
            formatted_request = request.strip()
            print(f"Executing {formatted_request}")
            query = SimpleStatement(formatted_request,
                                    consistency_level=ConsistencyLevel.ANY)
            connection.execute(query)
            print(f'{formatted_request} executed!')
print("Creating Done!")
cluster.shutdown()

cluster = Cluster()
connection = cluster.connect('labwork1')
workFile = 'work.cql'
with open(workFile, mode='r') as f:
    lines = f.read()
    requests = lines.split(';')
    for request in requests:
        if request != '':
            formatted_request = request.strip()
            print(f"Executing {formatted_request}")
예제 #37
0
    def tombstone_failure_threshold_message_test(self):
        """
        Ensure nodes return an error message in case of TombstoneOverwhelmingExceptions rather
        than dropping the request. A drop makes the coordinator waits for the specified
        read_request_timeout_in_ms.
        @jira_ticket CASSANDRA-7886
        """

        self.allow_log_errors = True
        self.cluster.set_configuration_options(
            values={
                'tombstone_failure_threshold': 500,
                'read_request_timeout_in_ms': 30000,  # 30 seconds
                'range_request_timeout_in_ms': 40000
            })
        self.cluster.populate(3).start()
        node1, node2, node3 = self.cluster.nodelist()
        session = self.patient_cql_connection(node1)

        self.create_ks(session, 'test', 3)
        session.execute("CREATE TABLE test ( "
                        "id int, mytext text, col1 int, col2 int, col3 int, "
                        "PRIMARY KEY (id, mytext) )")

        # Add data with tombstones
        values = map(lambda i: str(i), range(1000))
        for value in values:
            session.execute(
                SimpleStatement(
                    "insert into test (id, mytext, col1) values (1, '{}', null) "
                    .format(value),
                    consistency_level=CL.ALL))

        failure_msg = ("Scanned over.* tombstones.* query aborted")

        @timed(25)
        def read_failure_query():
            assert_invalid(
                session,
                SimpleStatement("select * from test where id in (1,2,3,4,5)",
                                consistency_level=CL.ALL),
                expected=ReadTimeout
                if self.cluster.version() < '3' else ReadFailure,
            )

        read_failure_query()

        failure = (node1.grep_log(failure_msg) or node2.grep_log(failure_msg)
                   or node3.grep_log(failure_msg))

        self.assertTrue(
            failure, ("Cannot find tombstone failure threshold error in log "
                      "after failed query"))
        mark1 = node1.mark_log()
        mark2 = node2.mark_log()
        mark3 = node3.mark_log()

        @timed(35)
        def range_request_failure_query():
            assert_invalid(
                session,
                SimpleStatement("select * from test",
                                consistency_level=CL.ALL),
                expected=ReadTimeout
                if self.cluster.version() < '3' else ReadFailure,
            )

        range_request_failure_query()

        failure = (
            node1.watch_log_for(failure_msg, from_mark=mark1, timeout=5)
            or node2.watch_log_for(failure_msg, from_mark=mark2, timeout=5)
            or node3.watch_log_for(failure_msg, from_mark=mark3, timeout=5))

        self.assertTrue(
            failure, ("Cannot find tombstone failure threshold error in log "
                      "after range_request_timeout_query"))
예제 #38
0
파일: main.py 프로젝트: aKondratyuk/DBIS_2
from cassandra.cluster import Cluster
from cassandra import ConsistencyLevel
from cassandra.query import SimpleStatement

cluster = Cluster()
connection = cluster.connect('image2code')

path = '/home/andrii/.devcenter/DevCenter/CQLScripts/'
CQLscripts = ['create.cql', 'work.cql', 'drop.cql']

for script in CQLscripts:
    with open(path + script, mode='r') as f:
        txt = f.read()
        statements = txt.split(';')
        for statement in statements:
            statement = statement.strip()
            if statement != '':
                print(statement)
                if script == 'drop.cql':
                    query = SimpleStatement(
                        statement, consistency_level=ConsistencyLevel.QUORUM)
                elif script == 'create.cql':
                    query = SimpleStatement(
                        statement, consistency_level=ConsistencyLevel.QUORUM)
                else:
                    query = SimpleStatement(
                        statement, consistency_level=ConsistencyLevel.ONE)

# In[ ]:
예제 #39
0
    def range_query(self,
                    table_name,
                    column_names,
                    start_key,
                    end_key,
                    limit,
                    offset=0,
                    start_inclusive=True,
                    end_inclusive=True,
                    keys_only=False):
        """ 
    Gets a dense range ordered by keys. Returns an ordered list of 
    a dictionary of [key:{column1:value1, column2:value2},...]
    or a list of keys if keys only.
     
    Args:
      table_name: Name of table to access
      column_names: Columns which get returned within the key range
      start_key: String for which the query starts at
      end_key: String for which the query ends at
      limit: Maximum number of results to return
      offset: Cuts off these many from the results [offset:]
      start_inclusive: Boolean if results should include the start_key
      end_inclusive: Boolean if results should include the end_key
      keys_only: Boolean if to only keys and not values
    Raises:
      TypeError: If an argument passed in was not of the expected type.
      AppScaleDBConnectionError: If the range_query could not be performed due
        to an error with Cassandra.
    Returns:
      An ordered list of dictionaries of key=>columns/values
    """
        if not isinstance(table_name, str):
            raise TypeError('table_name must be a string')
        if not isinstance(column_names, list):
            raise TypeError('column_names must be a list')
        if not isinstance(start_key, str):
            raise TypeError('start_key must be a string')
        if not isinstance(end_key, str):
            raise TypeError('end_key must be a string')
        if not isinstance(limit, (int, long)) and limit is not None:
            raise TypeError('limit must be int, long, or NoneType')
        if not isinstance(offset, (int, long)):
            raise TypeError('offset must be int or long')

        if start_inclusive:
            gt_compare = '>='
        else:
            gt_compare = '>'

        if end_inclusive:
            lt_compare = '<='
        else:
            lt_compare = '<'

        query_limit = ''
        if limit is not None:
            query_limit = 'LIMIT {}'.format(len(column_names) * limit)

        statement = """
      SELECT * FROM "{table}" WHERE
      token({key}) {gt_compare} %s AND
      token({key}) {lt_compare} %s AND
      {column} IN %s
      {limit}
      ALLOW FILTERING
    """.format(table=table_name,
               key=ThriftColumn.KEY,
               gt_compare=gt_compare,
               lt_compare=lt_compare,
               column=ThriftColumn.COLUMN_NAME,
               limit=query_limit)

        query = SimpleStatement(statement, retry_policy=BASIC_RETRIES)
        parameters = (bytearray(start_key), bytearray(end_key),
                      ValueSequence(column_names))

        try:
            results = self.session.execute(query, parameters=parameters)

            results_list = []
            current_item = {}
            current_key = None
            for (key, column, value) in results:
                if keys_only:
                    results_list.append(key)
                    continue

                if key != current_key:
                    if current_item:
                        results_list.append({current_key: current_item})
                    current_item = {}
                    current_key = key

                current_item[column] = value
            if current_item:
                results_list.append({current_key: current_item})
            return results_list[offset:]
        except dbconstants.TRANSIENT_CASSANDRA_ERRORS:
            message = 'Exception during range_query'
            logging.exception(message)
            raise AppScaleDBConnectionError(message)
    def counter_consistency_test(self):
        """
        Do a bunch of writes with ONE, read back with ALL and check results.
        """
        cluster = self.cluster
        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()
        session = self.patient_cql_connection(node1)
        create_ks(session, 'counter_tests', 3)

        stmt = """
              CREATE TABLE counter_table (
              id uuid PRIMARY KEY,
              counter_one COUNTER,
              counter_two COUNTER,
              )
           """
        session.execute(stmt)

        counters = []
        # establish 50 counters (2x25 rows)
        for i in xrange(25):
            _id = str(uuid.uuid4())
            counters.append({_id: {'counter_one': 1, 'counter_two': 1}})

            query = SimpleStatement("""
                UPDATE counter_table
                SET counter_one = counter_one + 1, counter_two = counter_two + 1
                where id = {uuid}""".format(uuid=_id),
                                    consistency_level=ConsistencyLevel.ONE)
            session.execute(query)

        # increment a bunch of counters with CL.ONE
        for i in xrange(10000):
            counter = counters[random.randint(0, len(counters) - 1)]
            counter_id = counter.keys()[0]

            query = SimpleStatement("""
                UPDATE counter_table
                SET counter_one = counter_one + 2
                where id = {uuid}""".format(uuid=counter_id),
                                    consistency_level=ConsistencyLevel.ONE)
            session.execute(query)

            query = SimpleStatement("""
                UPDATE counter_table
                SET counter_two = counter_two + 10
                where id = {uuid}""".format(uuid=counter_id),
                                    consistency_level=ConsistencyLevel.ONE)
            session.execute(query)

            query = SimpleStatement("""
                UPDATE counter_table
                SET counter_one = counter_one - 1
                where id = {uuid}""".format(uuid=counter_id),
                                    consistency_level=ConsistencyLevel.ONE)
            session.execute(query)

            query = SimpleStatement("""
                UPDATE counter_table
                SET counter_two = counter_two - 5
                where id = {uuid}""".format(uuid=counter_id),
                                    consistency_level=ConsistencyLevel.ONE)
            session.execute(query)

            # update expectations to match (assumed) db state
            counter[counter_id]['counter_one'] += 1
            counter[counter_id]['counter_two'] += 5

        # let's verify the counts are correct, using CL.ALL
        for counter_dict in counters:
            counter_id = counter_dict.keys()[0]

            query = SimpleStatement("""
                SELECT counter_one, counter_two
                FROM counter_table WHERE id = {uuid}
                """.format(uuid=counter_id),
                                    consistency_level=ConsistencyLevel.ALL)
            rows = list(session.execute(query))

            counter_one_actual, counter_two_actual = rows[0]

            self.assertEqual(counter_one_actual,
                             counter_dict[counter_id]['counter_one'])
            self.assertEqual(counter_two_actual,
                             counter_dict[counter_id]['counter_two'])
예제 #41
0
    def test_type_keyspace_permission_isolation(self):
        """
        Confirm permissions are respected for types in different keyspaces
        """
        self.fixture_dtest_setup.ignore_log_patterns = [
            # I think this happens when permissions change and a node becomes temporarily unavailable
            # and it's probably ok to ignore on this test, as I can see the schema changes propogating
            # almost immediately after
            r'Can\'t send migration request: node.*is down',
        ]

        cluster = self.cluster
        config = {'authenticator': 'org.apache.cassandra.auth.PasswordAuthenticator',
                  'authorizer': 'org.apache.cassandra.auth.CassandraAuthorizer',
                  'permissions_validity_in_ms': 0}
        cluster.set_configuration_options(values=config)
        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()
        # need a bit of time for user to be created and propagate
        time.sleep(5)

        # do setup that requires a super user
        superuser_session = self.patient_cql_connection(node1, user='******', password='******')
        superuser_session.execute("create user ks1_user with password 'cassandra' nosuperuser;")
        superuser_session.execute("create user ks2_user with password 'cassandra' nosuperuser;")
        create_ks(superuser_session, 'ks1', 2)
        create_ks(superuser_session, 'ks2', 2)
        superuser_session.execute("grant all permissions on keyspace ks1 to ks1_user;")
        superuser_session.execute("grant all permissions on keyspace ks2 to ks2_user;")

        user1_session = self.patient_cql_connection(node1, user='******', password='******')
        user2_session = self.patient_cql_connection(node1, user='******', password='******')

        # first make sure the users can't create types in each other's ks
        self.assertUnauthorized(user1_session, "CREATE TYPE ks2.simple_type (user_number int, user_text text );", 'User ks1_user has no CREATE permission on <keyspace ks2> or any of its parents')

        self.assertUnauthorized(user2_session, "CREATE TYPE ks1.simple_type (user_number int, user_text text );", 'User ks2_user has no CREATE permission on <keyspace ks1> or any of its parents')

        # now, actually create the types in the correct keyspaces
        user1_session.execute("CREATE TYPE ks1.simple_type (user_number int, user_text text );")
        user2_session.execute("CREATE TYPE ks2.simple_type (user_number int, user_text text );")

        # each user now has a type belonging to their granted keyspace
        # let's make sure they can't drop each other's types (for which they have no permissions)

        self.assertUnauthorized(user1_session, "DROP TYPE ks2.simple_type;", 'User ks1_user has no DROP permission on <keyspace ks2> or any of its parents')

        self.assertUnauthorized(user2_session, "DROP TYPE ks1.simple_type;", 'User ks2_user has no DROP permission on <keyspace ks1> or any of its parents')

        # let's make sure they can't rename each other's types (for which they have no permissions)
        self.assertUnauthorized(user1_session, "ALTER TYPE ks2.simple_type RENAME user_number TO user_num;", 'User ks1_user has no ALTER permission on <keyspace ks2> or any of its parents')

        self.assertUnauthorized(user2_session, "ALTER TYPE ks1.simple_type RENAME user_number TO user_num;", 'User ks2_user has no ALTER permission on <keyspace ks1> or any of its parents')

        # rename the types using the correct user w/permissions to do so
        user1_session.execute("ALTER TYPE ks1.simple_type RENAME user_number TO user_num;")
        user2_session.execute("ALTER TYPE ks2.simple_type RENAME user_number TO user_num;")

        # finally, drop the types using the correct user w/permissions to do so, consistency all avoids using a sleep
        user1_session.execute(SimpleStatement("DROP TYPE ks1.simple_type;", consistency_level=ConsistencyLevel.ALL))
        user2_session.execute(SimpleStatement("DROP TYPE ks2.simple_type;", consistency_level=ConsistencyLevel.ALL))

        time.sleep(5)

        # verify user type metadata is gone from the system schema
        self.assertNoTypes(superuser_session)
예제 #42
0
 def truncate_tables(self, session):
     statement = SimpleStatement("TRUNCATE users", ConsistencyLevel.ALL)
     session.execute(statement)
     statement = SimpleStatement("TRUNCATE counters", ConsistencyLevel.ALL)
     session.execute(statement)
예제 #43
0
    def test_udt_subfield(self):
        """
        @jira_ticket CASSANDRA-7423
        @since 3.6
        """
        cluster = self.cluster
        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()
        session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.LOCAL_QUORUM)
        create_ks(session, 'user_types', 1)

        # Check we can create non-frozen table
        session.execute("CREATE TYPE udt (first ascii, second int, third int)")
        session.execute("CREATE TABLE t (id int PRIMARY KEY, v udt)")

        # Fill in a full UDT across two statements
        # Ensure all subfields are set
        session.execute("INSERT INTO t (id, v) VALUES (0, {third: 2, second: 1})")
        session.execute("UPDATE t set v.first = 'a' WHERE id=0")
        rows = list(session.execute("SELECT * FROM t WHERE id = 0"))
        assert listify(rows) == [[0, ['a', 1, 2]]]

        # Create a full udt
        # Update a subfield on the udt
        # Read back the updated udt
        session.execute("INSERT INTO t (id, v) VALUES (0, {first: 'c', second: 3, third: 33})")
        session.execute("UPDATE t set v.second = 5 where id=0")
        rows = list(session.execute("SELECT * FROM t WHERE id=0"))
        assert listify(rows) == [[0, ['c', 5, 33]]]

        # Rewrite the entire udt
        # Read back
        session.execute("INSERT INTO t (id, v) VALUES (0, {first: 'alpha', second: 111, third: 100})")
        rows = list(session.execute("SELECT * FROM t WHERE id=0"))
        assert listify(rows) == [[0, ['alpha', 111, 100]]]

        # Send three subfield updates to udt
        # Read back
        session.execute("UPDATE t set v.first = 'beta' WHERE id=0")
        session.execute("UPDATE t set v.first = 'delta' WHERE id=0")
        session.execute("UPDATE t set v.second = -10 WHERE id=0")
        rows = list(session.execute("SELECT * FROM t WHERE id=0"))
        assert listify(rows) == [[0, ['delta', -10, 100]]]

        # Send conflicting updates serially to different nodes
        # Read back
        session1 = self.exclusive_cql_connection(node1)
        session2 = self.exclusive_cql_connection(node2)
        session3 = self.exclusive_cql_connection(node3)

        session1.execute("UPDATE user_types.t set v.third = 101 WHERE id=0")
        session2.execute("UPDATE user_types.t set v.third = 102 WHERE id=0")
        session3.execute("UPDATE user_types.t set v.third = 103 WHERE id=0")
        query = SimpleStatement("SELECT * FROM t WHERE id = 0", consistency_level=ConsistencyLevel.ALL)
        rows = list(session.execute(query))
        assert listify(rows) == [[0, ['delta', -10, 103]]]
        session1.shutdown()
        session2.shutdown()
        session3.shutdown()

        # Write full UDT, set one field to null, read back
        session.execute("INSERT INTO t (id, v) VALUES (0, {first:'cass', second:3, third:0})")
        session.execute("UPDATE t SET v.first = null WHERE id = 0")
        rows = list(session.execute("SELECT * FROM t WHERE id=0"))
        assert listify(rows) == [[0, [None, 3, 0]]]

        rows = list(session.execute("SELECT v.first FROM t WHERE id=0"))
        assert listify(rows) == [[None]]
        rows = list(session.execute("SELECT v.second FROM t WHERE id=0"))
        assert listify(rows) == [[3]]
        rows = list(session.execute("SELECT v.third FROM t WHERE id=0"))
        assert listify(rows) == [[0]]
예제 #44
0
    def replace_with_reset_resume_state_test(self):
        """Test replace with resetting bootstrap progress"""

        cluster = self.cluster
        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()

        node1.stress(['write', 'n=100000', '-schema', 'replication(factor=3)'])

        session = self.patient_cql_connection(node1)
        stress_table = 'keyspace1.standard1'
        query = SimpleStatement('select * from %s LIMIT 1' % stress_table,
                                consistency_level=ConsistencyLevel.THREE)
        initialData = list(session.execute(query))

        node3.stop(gently=False)

        # kill node1 in the middle of streaming to let it fail
        t = InterruptBootstrap(node1)
        t.start()
        # replace node 3 with node 4
        debug("Starting node 4 to replace node 3")
        node4 = Node('node4',
                     cluster,
                     True, ('127.0.0.4', 9160), ('127.0.0.4', 7000),
                     '7400',
                     '0',
                     None,
                     binary_interface=('127.0.0.4', 9042))
        cluster.add(node4, False)
        try:
            node4.start(
                jvm_args=["-Dcassandra.replace_address_first_boot=127.0.0.3"])
        except NodeError:
            pass  # node doesn't start as expected
        t.join()
        node1.start()

        # restart node4 bootstrap with resetting bootstrap state
        node4.stop()
        mark = node4.mark_log()
        node4.start(jvm_args=[
            "-Dcassandra.replace_address_first_boot=127.0.0.3",
            "-Dcassandra.reset_bootstrap_progress=true"
        ])
        # check if we reset bootstrap state
        node4.watch_log_for("Resetting bootstrap progress to start fresh",
                            from_mark=mark)
        # wait for node3 ready to query
        node4.watch_log_for("Listening for thrift clients...", from_mark=mark)

        # check if 2nd bootstrap succeeded
        session = self.exclusive_cql_connection(node4)
        rows = list(
            session.execute(
                "SELECT bootstrapped FROM system.local WHERE key='local'"))
        assert len(rows) == 1
        assert rows[0][0] == 'COMPLETED', rows[0][0]

        # query should work again
        debug("Verifying querying works again.")
        finalData = list(session.execute(query))
        self.assertListEqual(initialData, finalData)
예제 #45
0
 def execute_with_all(stmt):
     return session.execute(
         SimpleStatement(stmt, consistency_level=ConsistencyLevel.ALL))
예제 #46
0
파일: main.py 프로젝트: KristinaKupar/db
session = cluster.connect('consultation')

class period_of_time(object):
    def __init__(self, start_of_consultation, end_of_consultation):
        self.start_of_consultation = start_of_consultation
        self.end_of_consultation = end_of_consultation

cluster.register_user_type('consultation', 'period_of_time', period_of_time)

insertStudent = [('kate99',83.6, 4), ('oros97', 65.7, 6), ('kuper99', 76.8, 4)]
insertTeacher = [('igorOT', 32, ['2017-04-01T16:10', '2017-04-02T16:10', '2017-04-03T16:10'], {'python':['dictionary','flask','data analysis'], 'db':['Oracle', 'noSQL', 'transactions'], 'data science':['big data', 'machine learning']}, 'KPI' ), ('volodymyrVM', 44, ['2017-04-01T16:10', '2017-04-02T16:10', '2017-04-03T16:10'], {'math analysis':['limits','Riemann integrals'], 'QA':['manual', 'automation'], 'differential equations':['homogeneous', 'systems of differential equations', 'phase portraits']}, 'KPI' ), ('tatyanaSL', 50, ['2017-04-01T16:10', '2017-04-02T16:10', '2017-04-03T16:10'], {'optimization methods':['Vengerian method','Simplex method', 'Travelling salesman problem'], 'Operations Research':['simlex method', 'golden ratio method']}, 'KPI' )]
insertConsultation = [('cons1', 'igorOT', period_of_time('2017-04-01T16:10','2017-04-01T17:30'), 'flask', 'python'), ('cons2', 'volodymyrVM', period_of_time('2017-04-01T16:10','2017-04-01T17:30'), 'Riemann integrals', 'math analysis'), ('cons3', 'volodymyrVM', period_of_time('2017-04-02T16:10','2017-04-02T17:30'), 'limits', 'math analysis')]
insertBooking = [('01kuper99', 'kuper99', 'cons1', datetime.datetime.now(), 3, 36), ('02kuper99', 'kuper99', 'cons2', datetime.datetime.now(), 5, 12), ('01oros97', 'oros97', 'cons3', datetime.datetime.now(), 1, 15)]

queryStudent = SimpleStatement(
    "insert into consultation.student (student_id, succes, course) VALUES (%s, %s, %s)",
        consistency_level=ConsistencyLevel.LOCAL_ONE)

for i in insertStudent:
    session.execute(queryStudent, i)

studentSelect = session.execute('SELECT * FROM consultation.student')
for i in studentSelect:
    print i

queryTeacher = SimpleStatement(
    "insert into consultation.teacher (teacher_id, age, start_of_consultation, disciplines, university) VALUES (%s, %s,%s,%s,%s)",
            consistency_level=ConsistencyLevel.LOCAL_ONE)

for i in insertTeacher:
    session.execute(queryTeacher, i)
예제 #47
0
    def replace_first_boot_test(self):
        debug("Starting cluster with 3 nodes.")
        cluster = self.cluster
        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()

        if DISABLE_VNODES:
            numNodes = 1
        else:
            # a little hacky but grep_log returns the whole line...
            numNodes = int(node3.get_conf_option('num_tokens'))

        debug(numNodes)

        debug("Inserting Data...")
        node1.stress(['write', 'n=10000', '-schema', 'replication(factor=3)'])

        session = self.patient_cql_connection(node1)
        stress_table = 'keyspace1.standard1'
        query = SimpleStatement('select * from %s LIMIT 1' % stress_table,
                                consistency_level=ConsistencyLevel.THREE)
        initialData = list(session.execute(query))

        # stop node, query should not work with consistency 3
        debug("Stopping node 3.")
        node3.stop(gently=False)

        debug("Testing node stoppage (query should fail).")
        with self.assertRaises(NodeUnavailable):
            try:
                session.execute(query, timeout=30)
            except (Unavailable, ReadTimeout):
                raise NodeUnavailable("Node could not be queried.")

        # replace node 3 with node 4
        debug("Starting node 4 to replace node 3")
        node4 = Node('node4',
                     cluster,
                     True, ('127.0.0.4', 9160), ('127.0.0.4', 7000),
                     '7400',
                     '0',
                     None,
                     binary_interface=('127.0.0.4', 9042))
        cluster.add(node4, False)
        node4.start(
            jvm_args=["-Dcassandra.replace_address_first_boot=127.0.0.3"],
            wait_for_binary_proto=True)

        # query should work again
        debug("Verifying querying works again.")
        finalData = list(session.execute(query))
        self.assertListEqual(initialData, finalData)

        debug("Verifying tokens migrated sucessfully")
        movedTokensList = node4.grep_log(
            "Token .* changing ownership from /127.0.0.3 to /127.0.0.4")
        debug(movedTokensList[0])
        self.assertEqual(len(movedTokensList), numNodes)

        # check that restarting node 3 doesn't work
        debug("Try to restart node 3 (should fail)")
        node3.start()
        checkCollision = node1.grep_log(
            "between /127.0.0.3 and /127.0.0.4; /127.0.0.4 is the new owner")
        debug(checkCollision)
        self.assertEqual(len(checkCollision), 1)

        # restart node4 (if error's might have to change num_tokens)
        node4.stop(gently=False)
        node4.start(wait_for_binary_proto=True)

        debug("Verifying querying works again.")
        finalData = list(session.execute(query))
        self.assertListEqual(initialData, finalData)

        # we redo this check because restarting node should not result in tokens being moved again, ie number should be same
        debug("Verifying tokens migrated sucessfully")
        movedTokensList = node4.grep_log(
            "Token .* changing ownership from /127.0.0.3 to /127.0.0.4")
        debug(movedTokensList[0])
        self.assertEqual(len(movedTokensList), numNodes)
예제 #48
0
 def remove_all_data(self):
     query = SimpleStatement("""
     TRUNCATE data_index
     """)
     self.data_index.execute(query)
 def delete(self, name):
     query = SimpleStatement("DELETE FROM %s WHERE name='%s'" %
                             (self._table, name),
                             consistency_level=self.write_consistency_level)
     self._get_session().execute(query)
     self._shutdown_session()
예제 #50
0
    def _replace_node_test(self, gently):
        """
        Check that the replace address function correctly replaces a node that has failed in a cluster.
        Create a cluster, cause a node to fail, and bring up a new node with the replace_address parameter.
        Check that tokens are migrated and that data is replicated properly.
        """
        debug("Starting cluster with 3 nodes.")
        cluster = self.cluster
        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()

        if DISABLE_VNODES:
            numNodes = 1
        else:
            # a little hacky but grep_log returns the whole line...
            numNodes = int(node3.get_conf_option('num_tokens'))

        debug(numNodes)

        debug("Inserting Data...")
        node1.stress(['write', 'n=10000', '-schema', 'replication(factor=3)'])

        session = self.patient_cql_connection(node1)
        session.default_timeout = 45
        stress_table = 'keyspace1.standard1'
        query = SimpleStatement('select * from %s LIMIT 1' % stress_table,
                                consistency_level=ConsistencyLevel.THREE)
        initialData = list(session.execute(query))

        # stop node, query should not work with consistency 3
        debug("Stopping node 3.")
        node3.stop(gently=gently, wait_other_notice=True)

        debug("Testing node stoppage (query should fail).")
        with self.assertRaises(NodeUnavailable):
            try:
                query = SimpleStatement(
                    'select * from %s LIMIT 1' % stress_table,
                    consistency_level=ConsistencyLevel.THREE)
                session.execute(query)
            except (Unavailable, ReadTimeout):
                raise NodeUnavailable("Node could not be queried.")

        # replace node 3 with node 4
        debug("Starting node 4 to replace node 3")

        node4 = Node('node4',
                     cluster,
                     True, ('127.0.0.4', 9160), ('127.0.0.4', 7000),
                     '7400',
                     '0',
                     None,
                     binary_interface=('127.0.0.4', 9042))
        cluster.add(node4, False)
        node4.start(replace_address='127.0.0.3', wait_for_binary_proto=True)

        # query should work again
        debug("Verifying querying works again.")
        query = SimpleStatement('select * from %s LIMIT 1' % stress_table,
                                consistency_level=ConsistencyLevel.THREE)
        finalData = list(session.execute(query))
        self.assertListEqual(initialData, finalData)

        debug("Verifying tokens migrated sucessfully")
        movedTokensList = node4.grep_log(
            "Token .* changing ownership from /127.0.0.3 to /127.0.0.4")
        debug(movedTokensList[0])
        self.assertEqual(len(movedTokensList), numNodes)

        # check that restarting node 3 doesn't work
        debug("Try to restart node 3 (should fail)")
        node3.start()
        checkCollision = node1.grep_log(
            "between /127.0.0.3 and /127.0.0.4; /127.0.0.4 is the new owner")
        debug(checkCollision)
        self.assertEqual(len(checkCollision), 1)
예제 #51
0
    def test_fetch_size(self):
        """
        Ensure per-statement fetch_sizes override the default fetch size.
        """
        statements_and_params = zip(
            cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]),
            [(i, ) for i in range(100)])
        execute_concurrent(self.session, list(statements_and_params))

        prepared = self.session.prepare("SELECT * FROM test3rf.test")

        self.session.default_fetch_size = 10
        result = self.session.execute(prepared, [])
        self.assertTrue(result.has_more_pages)

        self.session.default_fetch_size = 2000
        result = self.session.execute(prepared, [])
        self.assertFalse(result.has_more_pages)

        self.session.default_fetch_size = None
        result = self.session.execute(prepared, [])
        self.assertFalse(result.has_more_pages)

        self.session.default_fetch_size = 10

        prepared.fetch_size = 2000
        result = self.session.execute(prepared, [])
        self.assertFalse(result.has_more_pages)

        prepared.fetch_size = None
        result = self.session.execute(prepared, [])
        self.assertFalse(result.has_more_pages)

        prepared.fetch_size = 10
        result = self.session.execute(prepared, [])
        self.assertTrue(result.has_more_pages)

        prepared.fetch_size = 2000
        bound = prepared.bind([])
        result = self.session.execute(bound, [])
        self.assertFalse(result.has_more_pages)

        prepared.fetch_size = None
        bound = prepared.bind([])
        result = self.session.execute(bound, [])
        self.assertFalse(result.has_more_pages)

        prepared.fetch_size = 10
        bound = prepared.bind([])
        result = self.session.execute(bound, [])
        self.assertTrue(result.has_more_pages)

        bound.fetch_size = 2000
        result = self.session.execute(bound, [])
        self.assertFalse(result.has_more_pages)

        bound.fetch_size = None
        result = self.session.execute(bound, [])
        self.assertFalse(result.has_more_pages)

        bound.fetch_size = 10
        result = self.session.execute(bound, [])
        self.assertTrue(result.has_more_pages)

        s = SimpleStatement("SELECT * FROM test3rf.test", fetch_size=None)
        result = self.session.execute(s, [])
        self.assertFalse(result.has_more_pages)

        s = SimpleStatement("SELECT * FROM test3rf.test")
        result = self.session.execute(s, [])
        self.assertTrue(result.has_more_pages)

        s = SimpleStatement("SELECT * FROM test3rf.test")
        s.fetch_size = None
        result = self.session.execute(s, [])
        self.assertFalse(result.has_more_pages)
 def make_response_future(self, session):
     query = SimpleStatement("SELECT * FROM foo")
     message = QueryMessage(query=query,
                            consistency_level=ConsistencyLevel.ONE)
     return ResponseFuture(session, message, query)
예제 #53
0
    def test_paging_callbacks(self):
        """
        Test to validate callback api
        @since 3.9.0
        @jira_ticket PYTHON-733
        @expected_result callbacks shouldn't be called twice per message
        and the fetch_size should be handled in a transparent way to the user

        @test_category queries
        """
        statements_and_params = zip(
            cycle(["INSERT INTO test3rf.test (k, v) VALUES (%s, 0)"]),
            [(i, ) for i in range(100)])
        execute_concurrent(self.session, list(statements_and_params))

        prepared = self.session.prepare("SELECT * FROM test3rf.test")

        for fetch_size in (2, 3, 7, 10, 99, 100, 101, 10000):
            self.session.default_fetch_size = fetch_size
            future = self.session.execute_async("SELECT * FROM test3rf.test",
                                                timeout=20)

            event = Event()
            counter = count()
            number_of_calls = count()

            def handle_page(rows, future, counter, number_of_calls):
                next(number_of_calls)
                for row in rows:
                    next(counter)

                if future.has_more_pages:
                    future.start_fetching_next_page()
                else:
                    event.set()

            def handle_error(err):
                event.set()
                self.fail(err)

            future.add_callbacks(callback=handle_page,
                                 callback_args=(future, counter,
                                                number_of_calls),
                                 errback=handle_error)
            event.wait()
            self.assertEqual(next(number_of_calls), 100 // fetch_size + 1)
            self.assertEqual(next(counter), 100)

            # simple statement
            future = self.session.execute_async(
                SimpleStatement("SELECT * FROM test3rf.test"), timeout=20)
            event.clear()
            counter = count()
            number_of_calls = count()

            future.add_callbacks(callback=handle_page,
                                 callback_args=(future, counter,
                                                number_of_calls),
                                 errback=handle_error)
            event.wait()
            self.assertEqual(next(number_of_calls), 100 // fetch_size + 1)
            self.assertEqual(next(counter), 100)

            # prepared statement
            future = self.session.execute_async(prepared, timeout=20)
            event.clear()
            counter = count()
            number_of_calls = count()

            future.add_callbacks(callback=handle_page,
                                 callback_args=(future, counter,
                                                number_of_calls),
                                 errback=handle_error)
            event.wait()
            self.assertEqual(next(number_of_calls), 100 // fetch_size + 1)
            self.assertEqual(next(counter), 100)
예제 #54
0
 def delete_user(self, session, userid, consistency):
     statement = SimpleStatement(
         "DELETE FROM users where userid = {}".format(userid),
         consistency_level=consistency)
     session.execute(statement)
 def list(self):
     query = SimpleStatement("SELECT name FROM %s",
                             consistency_level=self.read_consistency_level)
     res = self._get_session().execute(query, [self._table])
     self._shutdown_session()
     return [row.name for row in res]
예제 #56
0
    ('Gelu', 20000, 1))
session.execute(
    'insert into debemp3 (empname , empsal , empid ) VALUES(%s,%s,%s)',
    ('Kuny Doggy', 25000, 2))
session.execute(
    'insert into debemp3 (empname , empsal , empid ) VALUES(%s,%s,%s)',
    ('Dhanamali', 30000, 3))
session.execute(
    'insert into debemp3 (empname , empsal , empid ) VALUES(%s,%s,%s)',
    ('Drishti', 35000, 4))
session.execute(
    'insert into debemp3 (empname , empsal , empid ) VALUES(%s,%s,%s)',
    ('Chhotu', 40000, 5))

print "Insert Done"

rows = SimpleStatement('SELECT * FROM debemp3',
                       consistency_level=ConsistencyLevel.QUORUM)
#rows = SimpleStatement('SELECT * FROM debemp3', consistency_level=ConsistencyLevel.LOCAL_ONE)
#rows = SimpleStatement('SELECT * FROM debemp3', consistency_level=ConsistencyLevel.ONE)
myrows = session.execute(rows)
print "myrows: ", myrows
print "Fetching of rows from table using 'Consistency-Level=Quorum'..."

for everyrow in session.execute(rows):
    print "{} - {} - {} ".format(everyrow.empid, everyrow.empname,
                                 everyrow.empsal)

# To iterate simplestatement we must use session.execute(rows). Otherwise it doesnt work.
print "Done"
 def _insert_data(self, name, value, timestamp, interval, config):
     stmt = self._insert_stmt(name, value, timestamp, interval, config)
     if stmt:
         stmt = SimpleStatement(
             stmt, consistency_level=self.write_consistency_level)
         self._get_session().execute(stmt)
예제 #58
0
data = open("testdata.nt")

insert = "BEGIN BATCH "

GlobalStart = time.time()
batch = BatchStatement()

for line in data:
    i = i+1
    triple = line.split(' ')
    triple[2] = triple[2].rstrip()
    # test = "INSERT INTO records (pk, sujet, predicat, objet) VALUES (" + str(uuid_from_time(datetime.datetime.now())) +" , $$" + triple[0].replace("$", "\$") + "$$, $$" + triple[1].replace("$", "\$") + \
    # "$$, $$" + triple[2].replace("$", "\$") + "$$ );"
    test = "INSERT INTO records (sujet, predicat, objet) VALUES ($$" + triple[0].replace("$", "\$") + "$$, $$" + triple[1].replace("$", "\$") + \
    "$$, $$" + triple[2].replace("$", "\$") + "$$ );"
    batch.add(SimpleStatement(test))
    if(i%10000==0):
        session.execute(batch)
        batch = BatchStatement()
        print("row inserted : " + str(i))

#fini de vider les requests
# if(batch):
#     session.execute(batch)
#     batch = BatchStatement()
#     print("row inserted : " + str(i))


GlobalEnd = time.time()
print("temps total " + str(GlobalEnd-GlobalStart) + " " + str(i) +" lignes inserees")
예제 #59
0
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
import config
""" Find the number of speeds > 100 in the data set. """

ap = PlainTextAuthProvider(username=config.username, password=config.password)
node_ips = config.hosts
cluster = Cluster(node_ips,
                  protocol_version=4,
                  auth_provider=ap,
                  port=config.port)
session = cluster.connect('part_3_version_0')

query = 'SELECT speed FROM loopdata_by_detector'
statement = SimpleStatement(query, fetch_size=5000)

count = 0
for row in session.execute(statement):
    if isinstance(row.speed, int) and row.speed > 100:
        count += 1

print("\nNumber of speeds > 100: " + str(count) + "\n")
cluster.shutdown()
예제 #60
0
    def test_tombstone_failure_threshold_message(self):
        """
        Ensure nodes return an error message in case of TombstoneOverwhelmingExceptions rather
        than dropping the request. A drop makes the coordinator waits for the specified
        read_request_timeout_in_ms.
        @jira_ticket CASSANDRA-7886
        """
        have_v5_protocol = self.supports_v5_protocol(self.cluster.version())

        self.fixture_dtest_setup.allow_log_errors = True
        self.cluster.set_configuration_options(
            values={
                'tombstone_failure_threshold': 500,
                'read_request_timeout_in_ms': 30000,  # 30 seconds
                'range_request_timeout_in_ms': 40000
            })
        self.cluster.populate(3).start()
        node1, node2, node3 = self.cluster.nodelist()
        proto_version = 5 if have_v5_protocol else None
        session = self.patient_cql_connection(node1,
                                              protocol_version=proto_version)

        create_ks(session, 'test', 3)
        session.execute("CREATE TABLE test ( "
                        "id int, mytext text, col1 int, col2 int, col3 int, "
                        "PRIMARY KEY (id, mytext) )")

        # Add data with tombstones
        values = [str(i) for i in range(1000)]
        for value in values:
            session.execute(
                SimpleStatement(
                    "insert into test (id, mytext, col1) values (1, '{}', null) "
                    .format(value),
                    consistency_level=CL.ALL))

        failure_msg = ("Scanned over.* tombstones.* query aborted")

        @pytest.mark.timeout(25)
        def read_failure_query():
            try:
                session.execute(
                    SimpleStatement(
                        "select * from test where id in (1,2,3,4,5)",
                        consistency_level=CL.ALL))
            except ReadFailure as exc:
                if have_v5_protocol:
                    # at least one replica should have responded with a tombstone error
                    assert exc.error_code_map is not None
                    assert 0x0001 == list(exc.error_code_map.values())[0]
            except Exception:
                raise
            else:
                self.fail('Expected ReadFailure')

        read_failure_query()

        # In almost all cases, we should find the failure message on node1 within a few seconds.
        # If it is not on node1, we grep all logs, as it *absolutely* should be somewhere.
        # If we still cannot find it then, we fail the test, as this is a problem.
        try:
            node1.watch_log_for(failure_msg, timeout=5)
        except TimeoutError:
            failure = (node1.grep_log(failure_msg)
                       or node2.grep_log(failure_msg)
                       or node3.grep_log(failure_msg))

            assert failure == "Cannot find tombstone failure threshold error in log after failed query"

        mark1 = node1.mark_log()
        mark2 = node2.mark_log()
        mark3 = node3.mark_log()

        @pytest.mark.timeout(35)
        def range_request_failure_query():
            try:
                session.execute(
                    SimpleStatement("select * from test",
                                    consistency_level=CL.ALL))
            except ReadFailure as exc:
                if have_v5_protocol:
                    # at least one replica should have responded with a tombstone error
                    assert exc.error_code_map is not None
                    assert 0x0001 == list(exc.error_code_map.values())[0]
            except Exception:
                raise
            else:
                self.fail('Expected ReadFailure')

        range_request_failure_query()

        # In almost all cases, we should find the failure message on node1 within a few seconds.
        # If it is not on node1, we grep all logs, as it *absolutely* should be somewhere.
        # If we still cannot find it then, we fail the test, as this is a problem.
        try:
            node1.watch_log_for(failure_msg, from_mark=mark1, timeout=5)
        except TimeoutError:
            failure = (node1.grep_log(failure_msg, from_mark=mark1)
                       or node2.grep_log(failure_msg, from_mark=mark2)
                       or node3.grep_log(failure_msg, from_mark=mark3))

            assert failure == "Cannot find tombstone failure threshold error in log after range_request_timeout_query"