Example #1
0
    def _test_downgrading_cl(self, keyspace, rf, accepted):
        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          execution_profiles={
                              EXEC_PROFILE_DEFAULT:
                              ExecutionProfile(
                                  TokenAwarePolicy(RoundRobinPolicy()),
                                  DowngradingConsistencyRetryPolicy())
                          })
        session = cluster.connect(wait_for_all_pools=True)

        create_schema(cluster, session, keyspace, replication_factor=rf)
        self._insert(session, keyspace, 1)
        self._query(session, keyspace, 1)
        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 1)
        self.coordinator_stats.assert_query_count_equals(self, 3, 0)

        try:
            force_stop(2)
            wait_for_down(cluster, 2)

            self._assert_writes_succeed(session, keyspace, accepted)
            self._assert_reads_succeed(session, keyspace,
                                       accepted - set([ConsistencyLevel.ANY]))
            self._assert_writes_fail(session, keyspace,
                                     SINGLE_DC_CONSISTENCY_LEVELS - accepted)
            self._assert_reads_fail(session, keyspace,
                                    SINGLE_DC_CONSISTENCY_LEVELS - accepted)
        finally:
            start(2)
            wait_for_up(cluster, 2)

        cluster.shutdown()
Example #2
0
    def test_rfthree_tokenaware_none_down(self):
        keyspace = 'test_rfthree_tokenaware_none_down'
        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          execution_profiles={
                              EXEC_PROFILE_DEFAULT:
                              ExecutionProfile(
                                  TokenAwarePolicy(RoundRobinPolicy()))
                          })
        session = cluster.connect(wait_for_all_pools=True)
        wait_for_up(cluster, 1)
        wait_for_up(cluster, 2)

        create_schema(cluster, session, keyspace, replication_factor=3)
        self._insert(session, keyspace, count=1)
        self._query(session, keyspace, count=1)
        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 1)
        self.coordinator_stats.assert_query_count_equals(self, 3, 0)

        self.coordinator_stats.reset_counts()

        self._assert_writes_succeed(session, keyspace,
                                    SINGLE_DC_CONSISTENCY_LEVELS)
        self._assert_reads_succeed(session,
                                   keyspace,
                                   SINGLE_DC_CONSISTENCY_LEVELS -
                                   set([ConsistencyLevel.ANY]),
                                   expected_reader=2)

        cluster.shutdown()
Example #3
0
    def _set_up_shuffle_test(self, keyspace, replication_factor):
        use_singledc()
        cluster, session = self._cluster_session_with_lbp(
            TokenAwarePolicy(RoundRobinPolicy(), shuffle_replicas=True)
        )
        self._wait_for_nodes_up(range(1, 4), cluster)

        create_schema(cluster, session, keyspace, replication_factor=replication_factor)
        return cluster, session
 def test_rfthree_tokenaware_downgradingcl(self):
     keyspace = 'test_rfthree_tokenaware_downgradingcl'
     with Cluster(protocol_version=PROTOCOL_VERSION,
                  execution_profiles={
                      EXEC_PROFILE_DEFAULT:
                      ExecutionProfile(TokenAwarePolicy(RoundRobinPolicy()),
                                       DowngradingConsistencyRetryPolicy())
                  }) as cluster:
         self.rfthree_downgradingcl(cluster, keyspace, False)
    def test_statement_keyspace(self):
        hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
        for host in hosts:
            host.set_up()

        cluster = Mock(spec=Cluster)
        cluster.metadata = Mock(spec=Metadata)
        replicas = hosts[2:]
        cluster.metadata.get_replicas.return_value = replicas

        child_policy = Mock()
        child_policy.make_query_plan.return_value = hosts
        child_policy.distance.return_value = HostDistance.LOCAL

        policy = TokenAwarePolicy(child_policy)
        policy.populate(cluster, hosts)

        # no keyspace, child policy is called
        keyspace = None
        routing_key = 'routing_key'
        query = Statement(routing_key=routing_key)
        qplan = list(policy.make_query_plan(keyspace, query))
        self.assertEqual(hosts, qplan)
        self.assertEqual(cluster.metadata.get_replicas.call_count, 0)
        child_policy.make_query_plan.assert_called_once_with(keyspace, query)

        # working keyspace, no statement
        cluster.metadata.get_replicas.reset_mock()
        keyspace = 'working_keyspace'
        routing_key = 'routing_key'
        query = Statement(routing_key=routing_key)
        qplan = list(policy.make_query_plan(keyspace, query))
        self.assertEqual(replicas + hosts[:2], qplan)
        cluster.metadata.get_replicas.assert_called_with(keyspace, routing_key)

        # statement keyspace, no working
        cluster.metadata.get_replicas.reset_mock()
        working_keyspace = None
        statement_keyspace = 'statement_keyspace'
        routing_key = 'routing_key'
        query = Statement(routing_key=routing_key, keyspace=statement_keyspace)
        qplan = list(policy.make_query_plan(working_keyspace, query))
        self.assertEqual(replicas + hosts[:2], qplan)
        cluster.metadata.get_replicas.assert_called_with(statement_keyspace, routing_key)

        # both keyspaces set, statement keyspace used for routing
        cluster.metadata.get_replicas.reset_mock()
        working_keyspace = 'working_keyspace'
        statement_keyspace = 'statement_keyspace'
        routing_key = 'routing_key'
        query = Statement(routing_key=routing_key, keyspace=statement_keyspace)
        qplan = list(policy.make_query_plan(working_keyspace, query))
        self.assertEqual(replicas + hosts[:2], qplan)
        cluster.metadata.get_replicas.assert_called_with(statement_keyspace, routing_key)
Example #6
0
    def test_token_aware_with_local_table(self):
        use_singledc()
        cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy()))
        self._wait_for_nodes_up(range(1, 4), cluster)

        p = session.prepare("SELECT * FROM system.local WHERE key=?")
        # this would blow up prior to 61b4fad
        r = session.execute(p, ('local',))
        self.assertEqual(r[0].key, 'local')

        cluster.shutdown()
    def test_status_updates(self):
        """
        Same test as DCAwareRoundRobinPolicyTest.test_status_updates()
        """

        hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
        for h in hosts[:2]:
            h.set_location_info("dc1", "rack1")
        for h in hosts[2:]:
            h.set_location_info("dc2", "rack1")

        policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1))
        policy.populate(self.FakeCluster(), hosts)
        policy.on_down(hosts[0])
        policy.on_remove(hosts[2])

        new_local_host = Host(4, SimpleConvictionPolicy)
        new_local_host.set_location_info("dc1", "rack1")
        policy.on_up(new_local_host)

        new_remote_host = Host(5, SimpleConvictionPolicy)
        new_remote_host.set_location_info("dc9000", "rack1")
        policy.on_add(new_remote_host)

        # we now have two local hosts and two remote hosts in separate dcs
        qplan = list(policy.make_query_plan())
        self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host]))
        self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host]))

        # since we have hosts in dc9000, the distance shouldn't be IGNORED
        self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE)

        policy.on_down(new_local_host)
        policy.on_down(hosts[1])
        qplan = list(policy.make_query_plan())
        self.assertEqual(set(qplan), set([hosts[3], new_remote_host]))

        policy.on_down(new_remote_host)
        policy.on_down(hosts[3])
        qplan = list(policy.make_query_plan())
        self.assertEqual(qplan, [])
    def test_wrap_dc_aware(self):
        cluster = Mock(spec=Cluster)
        cluster.metadata = Mock(spec=Metadata)
        hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
        for host in hosts:
            host.set_up()
        for h in hosts[:2]:
            h.set_location_info("dc1", "rack1")
        for h in hosts[2:]:
            h.set_location_info("dc2", "rack1")

        def get_replicas(keyspace, packed_key):
            index = struct.unpack('>i', packed_key)[0]
            # return one node from each DC
            if index % 2 == 0:
                return [hosts[0], hosts[2]]
            else:
                return [hosts[1], hosts[3]]

        cluster.metadata.get_replicas.side_effect = get_replicas

        policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1))
        policy.populate(cluster, hosts)

        for i in range(4):
            query = Statement(routing_key=struct.pack('>i', i), keyspace='keyspace_name')
            qplan = list(policy.make_query_plan(None, query))
            replicas = get_replicas(None, struct.pack('>i', i))

            # first should be the only local replica
            self.assertIn(qplan[0], replicas)
            self.assertEqual(qplan[0].datacenter, "dc1")

            # then the local non-replica
            self.assertNotIn(qplan[1], replicas)
            self.assertEqual(qplan[1].datacenter, "dc1")

            # then one of the remotes (used_hosts_per_remote_dc is 1, so we
            # shouldn't see two remotes)
            self.assertEqual(qplan[2].datacenter, "dc2")
            self.assertEqual(3, len(qplan))
Example #9
0
    def test_token_aware_composite_key(self):
        use_singledc()
        keyspace = 'test_token_aware_composite_key'
        table = 'composite'
        cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy()))
        self._wait_for_nodes_up(range(1, 4), cluster)

        create_schema(cluster, session, keyspace, replication_factor=2)
        session.execute('CREATE TABLE %s ('
                        'k1 int, '
                        'k2 int, '
                        'i int, '
                        'PRIMARY KEY ((k1, k2)))' % table)

        prepared = session.prepare('INSERT INTO %s '
                                   '(k1, k2, i) '
                                   'VALUES '
                                   '(?, ?, ?)' % table)
        session.execute(prepared.bind((1, 2, 3)))

        results = session.execute('SELECT * FROM %s WHERE k1 = 1 AND k2 = 2' % table)
        self.assertTrue(results[0].i)

        cluster.shutdown()
Example #10
0
def serve():

    dse_username = os.getenv('KILLRVIDEO_DSE_USERNAME')
    dse_password = os.getenv('KILLRVIDEO_DSE_PASSWORD')
    dse_contact_points = os.getenv('KILLRVIDEO_DSE_CONTACT_POINTS', 'dse').split(',')
    service_port = os.getenv('KILLRVIDEO_SERVICE_PORT', '50101')

    file = open('config.json', 'r')
    config = json.load(file)

    default_consistency_level = config['DEFAULT_CONSISTENCY_LEVEL']

    # Initialize Cassandra Driver and Mapper
    load_balancing_policy = TokenAwarePolicy(DCAwareRoundRobinPolicy())
    profile = ExecutionProfile(consistency_level=ConsistencyLevel.name_to_value[default_consistency_level],
                               load_balancing_policy=load_balancing_policy)
    graph_profile = DseGraph.create_execution_profile('killrvideo_video_recommendations')

    auth_provider = None
    if dse_username:
        auth_provider = PlainTextAuthProvider(username=dse_username, password=dse_password)

    # Wait for Cassandra (DSE) to be up
    session = None
    while not session:
        try:
            session = Cluster(contact_points=dse_contact_points,
                              execution_profiles={EXEC_PROFILE_DEFAULT: profile, EXEC_PROFILE_GRAPH_DEFAULT: graph_profile},
                              auth_provider = auth_provider).connect("killrvideo")
        except (NoHostAvailable):
            logging.info('Waiting for Cassandra (DSE) to be available')
            time.sleep(10)

    # Additional retry loop to check if dummy keyspace exists
    while True:
        logging.info('Checking for schema to be created...')
        result = session.execute('SELECT keyspace_name FROM system_schema.keyspaces WHERE keyspace_name=\'kv_init_done\'')
        if result.one(): # any result indicates keyspace has been created
            break
        time.sleep(10)

    dse.cqlengine.connection.set_session(session)

    # Initialize GRPC Server
    grpc_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))

    # Initialize Services (GRPC servicers with reference to GRPC Server and appropriate service reference
    CommentsServiceServicer(grpc_server, CommentsService(session=session))
    RatingsServiceServicer(grpc_server, RatingsService())
    SearchServiceServicer(grpc_server, SearchService(session=session))
    StatisticsServiceServicer(grpc_server, StatisticsService())
    SuggestedVideosServiceServicer(grpc_server, SuggestedVideosService(session=session))
    #UploadsServiceServicer(grpc_server, UploadsService())
    UserManagementServiceServicer(grpc_server, UserManagementService())
    VideoCatalogServiceServicer(grpc_server, VideoCatalogService(session=session))

    # Start GRPC Server
    grpc_server.add_insecure_port('[::]:' + service_port)
    grpc_server.start()

    # Keep application alive
    try:
        while True:
            time.sleep(_ONE_DAY_IN_SECONDS)
    except KeyboardInterrupt:
        grpc_server.stop(0)
Example #11
0
    def token_aware(self, keyspace, use_prepared=False):
        use_singledc()
        cluster, session = self._cluster_session_with_lbp(TokenAwarePolicy(RoundRobinPolicy()))
        self._wait_for_nodes_up(range(1, 4), cluster)

        create_schema(cluster, session, keyspace, replication_factor=1)
        self._insert(session, keyspace)
        self._query(session, keyspace, use_prepared=use_prepared)

        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 12)
        self.coordinator_stats.assert_query_count_equals(self, 3, 0)

        self.coordinator_stats.reset_counts()
        self._query(session, keyspace, use_prepared=use_prepared)

        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 12)
        self.coordinator_stats.assert_query_count_equals(self, 3, 0)

        self.coordinator_stats.reset_counts()
        force_stop(2)
        self._wait_for_nodes_down([2], cluster)

        try:
            self._query(session, keyspace, use_prepared=use_prepared)
            self.fail()
        except Unavailable as e:
            self.assertEqual(e.consistency, 1)
            self.assertEqual(e.required_replicas, 1)
            self.assertEqual(e.alive_replicas, 0)

        self.coordinator_stats.reset_counts()
        start(2)
        self._wait_for_nodes_up([2], cluster)

        self._query(session, keyspace, use_prepared=use_prepared)

        self.coordinator_stats.assert_query_count_equals(self, 1, 0)
        self.coordinator_stats.assert_query_count_equals(self, 2, 12)
        self.coordinator_stats.assert_query_count_equals(self, 3, 0)

        self.coordinator_stats.reset_counts()
        stop(2)
        self._wait_for_nodes_down([2], cluster)

        try:
            self._query(session, keyspace, use_prepared=use_prepared)
            self.fail()
        except Unavailable:
            pass

        self.coordinator_stats.reset_counts()
        start(2)
        self._wait_for_nodes_up([2], cluster)
        decommission(2)
        self._wait_for_nodes_down([2], cluster)

        self._query(session, keyspace, use_prepared=use_prepared)

        results = set([
            self.coordinator_stats.get_query_count(1),
            self.coordinator_stats.get_query_count(3)
        ])
        self.assertEqual(results, set([0, 12]))
        self.coordinator_stats.assert_query_count_equals(self, 2, 0)

        cluster.shutdown()