def test_no_nodes(self): """ Ensure query plan for an empty cluster will execute without errors """ policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1) policy.populate(None, []) qplan = list(policy.make_query_plan()) self.assertEqual(qplan, [])
def test_no_remote(self): hosts = [] for i in range(4): h = Host(i, SimpleConvictionPolicy) h.set_location_info("dc1", "rack1") hosts.append(h) policy = DCAwareRoundRobinPolicy("dc1") policy.populate(None, hosts) qplan = list(policy.make_query_plan()) self.assertEqual(sorted(qplan), sorted(hosts))
def test_get_distance(self): """ Same test as DCAwareRoundRobinPolicyTest.test_get_distance() Except a FakeCluster is needed for the metadata variable and policy.child_policy is needed to change child policy settings """ policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0)) host = Host("ip1", SimpleConvictionPolicy) host.set_location_info("dc1", "rack1") policy.populate(self.FakeCluster(), [host]) self.assertEqual(policy.distance(host), HostDistance.LOCAL) # used_hosts_per_remote_dc is set to 0, so ignore it remote_host = Host("ip2", SimpleConvictionPolicy) remote_host.set_location_info("dc2", "rack1") self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) # dc2 isn't registered in the policy's live_hosts dict policy._child_policy.used_hosts_per_remote_dc = 1 self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) # make sure the policy has both dcs registered policy.populate(self.FakeCluster(), [host, remote_host]) self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE) # since used_hosts_per_remote_dc is set to 1, only the first # remote host in dc2 will be REMOTE, the rest are IGNORED second_remote_host = Host("ip3", SimpleConvictionPolicy) second_remote_host.set_location_info("dc2", "rack1") policy.populate(self.FakeCluster(), [host, remote_host, second_remote_host]) distances = set([policy.distance(remote_host), policy.distance(second_remote_host)]) self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
def test_dc_aware_roundrobin_two_dcs_2(self): use_multidc([3, 2]) keyspace = 'test_dc_aware_roundrobin_two_dcs_2' cluster, session = self._cluster_session_with_lbp(DCAwareRoundRobinPolicy('dc2')) self._wait_for_nodes_up(range(1, 6)) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.assert_query_count_equals(self, 5, 6) cluster.shutdown()
def test_status_updates(self): """ Same test as DCAwareRoundRobinPolicyTest.test_status_updates() """ hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)] for h in hosts[:2]: h.set_location_info("dc1", "rack1") for h in hosts[2:]: h.set_location_info("dc2", "rack1") policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)) policy.populate(self.FakeCluster(), hosts) policy.on_down(hosts[0]) policy.on_remove(hosts[2]) new_local_host = Host(4, SimpleConvictionPolicy) new_local_host.set_location_info("dc1", "rack1") policy.on_up(new_local_host) new_remote_host = Host(5, SimpleConvictionPolicy) new_remote_host.set_location_info("dc9000", "rack1") policy.on_add(new_remote_host) # we now have two local hosts and two remote hosts in separate dcs qplan = list(policy.make_query_plan()) self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host])) self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host])) # since we have hosts in dc9000, the distance shouldn't be IGNORED self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE) policy.on_down(new_local_host) policy.on_down(hosts[1]) qplan = list(policy.make_query_plan()) self.assertEqual(set(qplan), set([hosts[3], new_remote_host])) policy.on_down(new_remote_host) policy.on_down(hosts[3]) qplan = list(policy.make_query_plan()) self.assertEqual(qplan, [])
def test_wrap_dc_aware(self): cluster = Mock(spec=Cluster) cluster.metadata = Mock(spec=Metadata) hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)] for host in hosts: host.set_up() for h in hosts[:2]: h.set_location_info("dc1", "rack1") for h in hosts[2:]: h.set_location_info("dc2", "rack1") def get_replicas(keyspace, packed_key): index = struct.unpack('>i', packed_key)[0] # return one node from each DC if index % 2 == 0: return [hosts[0], hosts[2]] else: return [hosts[1], hosts[3]] cluster.metadata.get_replicas.side_effect = get_replicas policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)) policy.populate(cluster, hosts) for i in range(4): query = Statement(routing_key=struct.pack('>i', i), keyspace='keyspace_name') qplan = list(policy.make_query_plan(None, query)) replicas = get_replicas(None, struct.pack('>i', i)) # first should be the only local replica self.assertIn(qplan[0], replicas) self.assertEqual(qplan[0].datacenter, "dc1") # then the local non-replica self.assertNotIn(qplan[1], replicas) self.assertEqual(qplan[1].datacenter, "dc1") # then one of the remotes (used_hosts_per_remote_dc is 1, so we # shouldn't see two remotes) self.assertEqual(qplan[2].datacenter, "dc2") self.assertEqual(3, len(qplan))
def test_no_live_nodes(self): """ Ensure query plan for a downed cluster will execute without errors """ hosts = [] for i in range(4): h = Host(i, SimpleConvictionPolicy) h.set_location_info("dc1", "rack1") hosts.append(h) policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1) policy.populate(Mock(), hosts) for host in hosts: policy.on_down(host) qplan = list(policy.make_query_plan()) self.assertEqual(qplan, [])
cross_dc_latency_ms = 30 rowcount = 10 CL = ConsistencyLevel.ONE #CL = ConsistencyLevel.ALL ks_query = """ CREATE KEYSPACE IF NOT EXISTS demo WITH replication = {'class': 'NetworkTopologyStrategy', 'AWS': 3} """ auth_provider = PlainTextAuthProvider (username='******', password='******') ssl_opts = None #ssl_opts = { # 'ca_certs': '/path/to/ca.crt', # 'ssl_version': PROTOCOL_TLSv1, # 'cert_reqs': CERT_OPTIONAL #} #End configuration section profile1 = ExecutionProfile( load_balancing_policy=DCAwareRoundRobinPolicy(local_dc=localDC, used_hosts_per_remote_dc=3), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.05, 20), consistency_level = CL ) print "Connecting to cluster" cluster = Cluster( contact_points=contactpoints, auth_provider=auth_provider, ssl_options=ssl_opts, execution_profiles={EXEC_PROFILE_DEFAULT: profile1}, ) session = cluster.connect() session.execute (ks_query)
def readStream(): coordinator = dc last_c = coordinator used_dc = dc current = time.localtime() bucket = str(current.tm_year) + str(current.tm_mon) + str( current.tm_mday) + str(current.tm_hour) + str(current.tm_min) profile1 = ExecutionProfile( load_balancing_policy=DCAwareRoundRobinPolicy( local_dc=dc, used_hosts_per_remote_dc=3), speculative_execution_policy=ConstantSpeculativeExecutionPolicy( .05, 20), consistency_level=CL) print "Connecting to cluster" cluster = Cluster( contact_points=contactpoints, auth_provider=auth_provider, ssl_options=ssl_opts, execution_profiles={EXEC_PROFILE_DEFAULT: profile1}, ) session = cluster.connect() x = 0 y = 0 while x <= count: r = {} #Results Dictionary current = time.localtime() bucket = str(current.tm_year) + str(current.tm_mon) + str( current.tm_mday) + str(current.tm_hour) + str(current.tm_min) #r["d"] = time.strftime('%Y-%m-%dT%H:%M:%S', current) query = """ select * from demo.table2 where bucket = '%s' limit 1 """ % ( bucket) readfail = 0 r["result"] = "Successful" try: results = session.execute(query) except Exception as e: print("Read failed.") readfail = 1 for i in e: errormsg = i errormsg = str(errormsg).replace('"', '') r["count"] = x r["dc"] = used_dc r["result"] = errormsg r["d"] = "00:00:00" yield json.dumps(r) + "\r\n" if readfail == 1: cluster.shutdown() return yield for row in results: r["d"] = row.d if (y == rowcount): y = 0 try: future = session.execute_async(query, trace=True) result = future.result() try: trace = future.get_query_trace(1) coordinator = trace.coordinator except: coordinator = last_c for h in session.hosts: if h.address == coordinator: used_dc = h.datacenter r["count"] = x r["dc"] = used_dc yield json.dumps(r) + "\r\n" except Exception as e: for i in e: errormsg = i errormsg = str(errormsg).replace('"', '') print("Read trace failed.") r["count"] = x r["dc"] = used_dc r["result"] = errormsg yield json.dumps(r) + "\r\n" cluster.shutdown() time.sleep(.03) # an artificial delay x = x + 1 y = y + 1 cluster.shutdown()
def test_default_dc(self): host_local = Host(1, SimpleConvictionPolicy, 'local') host_remote = Host(2, SimpleConvictionPolicy, 'remote') host_none = Host(1, SimpleConvictionPolicy) # contact point is '1' cluster = Mock(contact_points_resolved=[1]) # contact DC first policy = DCAwareRoundRobinPolicy() policy.populate(cluster, [host_none]) self.assertFalse(policy.local_dc) policy.on_add(host_local) policy.on_add(host_remote) self.assertNotEqual(policy.local_dc, host_remote.datacenter) self.assertEqual(policy.local_dc, host_local.datacenter) # contact DC second policy = DCAwareRoundRobinPolicy() policy.populate(cluster, [host_none]) self.assertFalse(policy.local_dc) policy.on_add(host_remote) policy.on_add(host_local) self.assertNotEqual(policy.local_dc, host_remote.datacenter) self.assertEqual(policy.local_dc, host_local.datacenter) # no DC policy = DCAwareRoundRobinPolicy() policy.populate(cluster, [host_none]) self.assertFalse(policy.local_dc) policy.on_add(host_none) self.assertFalse(policy.local_dc) # only other DC policy = DCAwareRoundRobinPolicy() policy.populate(cluster, [host_none]) self.assertFalse(policy.local_dc) policy.on_add(host_remote) self.assertFalse(policy.local_dc)
def writeStream(targetCluster): coordinator = dc last_c = coordinator used_dc = dc #current = time.localtime() profile1 = ExecutionProfile( load_balancing_policy=DCAwareRoundRobinPolicy( local_dc=dc, used_hosts_per_remote_dc=3), speculative_execution_policy=ConstantSpeculativeExecutionPolicy( .05, 20), consistency_level=CL) print("Connecting to cluster") if (targetCluster == "DDAC"): contactpoints = ddaccontactpoints else: contactpoints = osscontactpoints cluster = Cluster( contact_points=contactpoints, auth_provider=auth_provider, ssl_options=ssl_opts, execution_profiles={EXEC_PROFILE_DEFAULT: profile1}, ) session = cluster.connect() x = 0 y = 0 while x <= count: r = {} #Results Dictionary current = time.localtime() bucket = str(current.tm_year) + str(current.tm_mon) + str( current.tm_mday) + str(current.tm_hour) + str(current.tm_min) r["d"] = time.strftime('%Y-%m-%dT%H:%M:%S', current) data1 = randint(1, 100) data2 = randint(1, 100) data3 = randint(1, 100) query = """ INSERT INTO demo.table2 (bucket, ts, d, data1, data2, data3) VALUES ('%s', now(), '%s', '%s', '%s', '%s') """ % ( str(bucket), str(r["d"]), str(data1), str(data2), str(data3)) writefail = 0 r["result"] = "Successful" try: session.execute(query) except Exception as e: print("Write failed.") writefail = 1 for i in e: errormsg = i errormsg = str(errormsg).replace('"', '') r["count"] = x r["dc"] = used_dc r["result"] = errormsg yield json.dumps(r) + "\r\n" if writefail == 1: cluster.shutdown() return yield if (y == rowcount): y = 0 try: future = session.execute_async(query, trace=True) result = future.result() try: trace = future.get_query_trace(1) coordinator = trace.coordinator except: coordinator = last_c for h in session.hosts: if h.address == coordinator: used_dc = h.datacenter r["count"] = x r["dc"] = used_dc yield json.dumps(r) + "\r\n" except Exception as e: for i in e: errormsg = i errormsg = str(errormsg).replace('"', '') print("Trace failed.") r["count"] = x r["dc"] = used_dc r["result"] = errormsg yield json.dumps(r) + "\r\n" cluster.shutdown() time.sleep(.03) # an artificial delay x = x + 1 y = y + 1 cluster.shutdown()
def test_modification_during_generation(self): hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)] for h in hosts[:2]: h.set_location_info("dc1", "rack1") for h in hosts[2:]: h.set_location_info("dc2", "rack1") policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=3) policy.populate(Mock(), hosts) # The general concept here is to change thee internal state of the # policy during plan generation. In this case we use a grey-box # approach that changes specific things during known phases of the # generator. new_host = Host(4, SimpleConvictionPolicy) new_host.set_location_info("dc1", "rack1") # new local before iteration plan = policy.make_query_plan() policy.on_up(new_host) # local list is not bound yet, so we get to see that one self.assertEqual(len(list(plan)), 3 + 2) # remove local before iteration plan = policy.make_query_plan() policy.on_down(new_host) # local list is not bound yet, so we don't see it self.assertEqual(len(list(plan)), 2 + 2) # new local after starting iteration plan = policy.make_query_plan() next(plan) policy.on_up(new_host) # local list was is bound, and one consumed, so we only see the other original self.assertEqual(len(list(plan)), 1 + 2) # remove local after traversing available plan = policy.make_query_plan() for _ in range(3): next(plan) policy.on_down(new_host) # we should be past the local list self.assertEqual(len(list(plan)), 0 + 2) # REMOTES CHANGE new_host.set_location_info("dc2", "rack1") # new remote after traversing local, but not starting remote plan = policy.make_query_plan() for _ in range(2): next(plan) policy.on_up(new_host) # list is updated before we get to it self.assertEqual(len(list(plan)), 0 + 3) # remove remote after traversing local, but not starting remote plan = policy.make_query_plan() for _ in range(2): next(plan) policy.on_down(new_host) # list is updated before we get to it self.assertEqual(len(list(plan)), 0 + 2) # new remote after traversing local, and starting remote plan = policy.make_query_plan() for _ in range(3): next(plan) policy.on_up(new_host) # slice is already made, and we've consumed one self.assertEqual(len(list(plan)), 0 + 1) # remove remote after traversing local, and starting remote plan = policy.make_query_plan() for _ in range(3): next(plan) policy.on_down(new_host) # slice is created with all present, and we've consumed one self.assertEqual(len(list(plan)), 0 + 2) # local DC disappears after finishing it, but not starting remote plan = policy.make_query_plan() for _ in range(2): next(plan) policy.on_down(hosts[0]) policy.on_down(hosts[1]) # dict traversal starts as normal self.assertEqual(len(list(plan)), 0 + 2) policy.on_up(hosts[0]) policy.on_up(hosts[1]) # PYTHON-297 addresses the following cases, where DCs come and go # during generation # local DC disappears after finishing it, and starting remote plan = policy.make_query_plan() for _ in range(3): next(plan) policy.on_down(hosts[0]) policy.on_down(hosts[1]) # dict traversal has begun and consumed one self.assertEqual(len(list(plan)), 0 + 1) policy.on_up(hosts[0]) policy.on_up(hosts[1]) # remote DC disappears after finishing local, but not starting remote plan = policy.make_query_plan() for _ in range(2): next(plan) policy.on_down(hosts[2]) policy.on_down(hosts[3]) # nothing left self.assertEqual(len(list(plan)), 0 + 0) policy.on_up(hosts[2]) policy.on_up(hosts[3]) # remote DC disappears while traversing it plan = policy.make_query_plan() for _ in range(3): next(plan) policy.on_down(hosts[2]) policy.on_down(hosts[3]) # we continue with remainder of original list self.assertEqual(len(list(plan)), 0 + 1) policy.on_up(hosts[2]) policy.on_up(hosts[3]) another_host = Host(5, SimpleConvictionPolicy) another_host.set_location_info("dc3", "rack1") new_host.set_location_info("dc3", "rack1") # new DC while traversing remote plan = policy.make_query_plan() for _ in range(3): next(plan) policy.on_up(new_host) policy.on_up(another_host) # we continue with remainder of original list self.assertEqual(len(list(plan)), 0 + 1) # remote DC disappears after finishing it plan = policy.make_query_plan() for _ in range(3): next(plan) last_host_in_this_dc = next(plan) if last_host_in_this_dc in (new_host, another_host): down_hosts = [new_host, another_host] else: down_hosts = hosts[2:] for h in down_hosts: policy.on_down(h) # the last DC has two self.assertEqual(len(list(plan)), 0 + 2)
def test_get_distance(self): policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0) host = Host("ip1", SimpleConvictionPolicy) host.set_location_info("dc1", "rack1") policy.populate(Mock(), [host]) self.assertEqual(policy.distance(host), HostDistance.LOCAL) # used_hosts_per_remote_dc is set to 0, so ignore it remote_host = Host("ip2", SimpleConvictionPolicy) remote_host.set_location_info("dc2", "rack1") self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) # dc2 isn't registered in the policy's live_hosts dict policy.used_hosts_per_remote_dc = 1 self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) # make sure the policy has both dcs registered policy.populate(Mock(), [host, remote_host]) self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE) # since used_hosts_per_remote_dc is set to 1, only the first # remote host in dc2 will be REMOTE, the rest are IGNORED second_remote_host = Host("ip3", SimpleConvictionPolicy) second_remote_host.set_location_info("dc2", "rack1") policy.populate(Mock(), [host, remote_host, second_remote_host]) distances = set([policy.distance(remote_host), policy.distance(second_remote_host)]) self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
def test_with_remotes(self): hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)] for h in hosts[:2]: h.set_location_info("dc1", "rack1") for h in hosts[2:]: h.set_location_info("dc2", "rack1") local_hosts = set(h for h in hosts if h.datacenter == "dc1") remote_hosts = set(h for h in hosts if h.datacenter != "dc1") # allow all of the remote hosts to be used policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=2) policy.populate(Mock(), hosts) qplan = list(policy.make_query_plan()) self.assertEqual(set(qplan[:2]), local_hosts) self.assertEqual(set(qplan[2:]), remote_hosts) # allow only one of the remote hosts to be used policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1) policy.populate(Mock(), hosts) qplan = list(policy.make_query_plan()) self.assertEqual(set(qplan[:2]), local_hosts) used_remotes = set(qplan[2:]) self.assertEqual(1, len(used_remotes)) self.assertIn(qplan[2], remote_hosts) # allow no remote hosts to be used policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0) policy.populate(Mock(), hosts) qplan = list(policy.make_query_plan()) self.assertEqual(2, len(qplan)) self.assertEqual(local_hosts, set(qplan))
def test_dc_aware_roundrobin_one_remote_host(self): use_multidc([2, 2]) keyspace = 'test_dc_aware_roundrobin_one_remote_host' cluster, session = self._cluster_session_with_lbp(DCAwareRoundRobinPolicy('dc2', used_hosts_per_remote_dc=1)) self._wait_for_nodes_up(range(1, 5)) create_schema(cluster, session, keyspace, replication_strategy=[2, 2]) self._insert(session, keyspace) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.reset_counts() bootstrap(5, 'dc1') self._wait_for_nodes_up([5]) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 6) self.coordinator_stats.assert_query_count_equals(self, 4, 6) self.coordinator_stats.assert_query_count_equals(self, 5, 0) self.coordinator_stats.reset_counts() decommission(3) decommission(4) self._wait_for_nodes_down([3, 4]) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) responses = set() for node in [1, 2, 5]: responses.add(self.coordinator_stats.get_query_count(node)) self.assertEqual(set([0, 0, 12]), responses) self.coordinator_stats.reset_counts() decommission(5) self._wait_for_nodes_down([5]) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) self.coordinator_stats.assert_query_count_equals(self, 5, 0) responses = set() for node in [1, 2]: responses.add(self.coordinator_stats.get_query_count(node)) self.assertEqual(set([0, 12]), responses) self.coordinator_stats.reset_counts() decommission(1) self._wait_for_nodes_down([1]) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 12) self.coordinator_stats.assert_query_count_equals(self, 3, 0) self.coordinator_stats.assert_query_count_equals(self, 4, 0) self.coordinator_stats.assert_query_count_equals(self, 5, 0) self.coordinator_stats.reset_counts() force_stop(2) try: self._query(session, keyspace) self.fail() except NoHostAvailable: pass cluster.shutdown()
import time import string import random import decimal from dse.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT from dse.policies import DCAwareRoundRobinPolicy, TokenAwarePolicy, ConstantSpeculativeExecutionPolicy from dse import ConsistencyLevel #Configuration contactpoints = ['172.31.13.134', '172.31.4.17'] localDC = "dc1" keyspace = "cme" CL = ConsistencyLevel.ONE profile1 = ExecutionProfile( load_balancing_policy=DCAwareRoundRobinPolicy(local_dc=localDC, used_hosts_per_remote_dc=3), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.1, 20), consistency_level=CL) print "Connecting to cluster" cluster = Cluster( contact_points=contactpoints, execution_profiles={EXEC_PROFILE_DEFAULT: profile1}, ) session = cluster.connect(keyspace) c = 0 x = 0 while 1:
def serve(): dse_username = os.getenv('KILLRVIDEO_DSE_USERNAME') dse_password = os.getenv('KILLRVIDEO_DSE_PASSWORD') dse_contact_points = os.getenv('KILLRVIDEO_DSE_CONTACT_POINTS', 'dse').split(',') service_port = os.getenv('KILLRVIDEO_SERVICE_PORT', '50101') file = open('config.json', 'r') config = json.load(file) default_consistency_level = config['DEFAULT_CONSISTENCY_LEVEL'] # Initialize Cassandra Driver and Mapper load_balancing_policy = TokenAwarePolicy(DCAwareRoundRobinPolicy()) profile = ExecutionProfile(consistency_level=ConsistencyLevel.name_to_value[default_consistency_level], load_balancing_policy=load_balancing_policy) graph_profile = DseGraph.create_execution_profile('killrvideo_video_recommendations') auth_provider = None if dse_username: auth_provider = PlainTextAuthProvider(username=dse_username, password=dse_password) # Wait for Cassandra (DSE) to be up session = None while not session: try: session = Cluster(contact_points=dse_contact_points, execution_profiles={EXEC_PROFILE_DEFAULT: profile, EXEC_PROFILE_GRAPH_DEFAULT: graph_profile}, auth_provider = auth_provider).connect("killrvideo") except (NoHostAvailable): logging.info('Waiting for Cassandra (DSE) to be available') time.sleep(10) # Additional retry loop to check if dummy keyspace exists while True: logging.info('Checking for schema to be created...') result = session.execute('SELECT keyspace_name FROM system_schema.keyspaces WHERE keyspace_name=\'kv_init_done\'') if result.one(): # any result indicates keyspace has been created break time.sleep(10) dse.cqlengine.connection.set_session(session) # Initialize GRPC Server grpc_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) # Initialize Services (GRPC servicers with reference to GRPC Server and appropriate service reference CommentsServiceServicer(grpc_server, CommentsService(session=session)) RatingsServiceServicer(grpc_server, RatingsService()) SearchServiceServicer(grpc_server, SearchService(session=session)) StatisticsServiceServicer(grpc_server, StatisticsService()) SuggestedVideosServiceServicer(grpc_server, SuggestedVideosService(session=session)) #UploadsServiceServicer(grpc_server, UploadsService()) UserManagementServiceServicer(grpc_server, UserManagementService()) VideoCatalogServiceServicer(grpc_server, VideoCatalogService(session=session)) # Start GRPC Server grpc_server.add_insecure_port('[::]:' + service_port) grpc_server.start() # Keep application alive try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: grpc_server.stop(0)
auth_provider = PlainTextAuthProvider( username=config.get('CONFIG', 'clusteruser'), password=config.get('CONFIG', 'clusterpass')) if config.getint('CONFIG', 'sslenabled') == 0: ssl_opts = None else: ssl_opts = { 'ca_certs': config.get('CONFIG', 'sslca'), 'ssl_version': PROTOCOL_TLSv1, 'cert_reqs': CERT_OPTIONAL } #End Configuration profile1 = ExecutionProfile( load_balancing_policy=DCAwareRoundRobinPolicy(local_dc='dc0', used_hosts_per_remote_dc=0), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.05, 20), consistency_level=ConsistencyLevel.ONE) profile2 = ExecutionProfile( load_balancing_policy=DCAwareRoundRobinPolicy(local_dc='dc1', used_hosts_per_remote_dc=0), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.05, 20), consistency_level=ConsistencyLevel.ONE) print("Connecting to cluster") ddacCluster = Cluster( contact_points=ddaccontactpoints, auth_provider=auth_provider, ssl_options=ssl_opts,