Ejemplo n.º 1
0
 def _connect_probe_cluster(self):
     if not self.probe_cluster:
         # distinct cluster so we can see the status of nodes ignored by the LBP being tested
         self.probe_cluster = Cluster(schema_metadata_enabled=False, token_metadata_enabled=False,
                                      execution_profiles={EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=RoundRobinPolicy())})
         self.probe_session = self.probe_cluster.connect()
Ejemplo n.º 2
0
 def session_setup(self):
     self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
     self.session = self.cluster.connect()
     self.ks_name = self._testMethodName.lower()
     self.cass_version, self.cql_version = get_server_versions()
Ejemplo n.º 3
0
 def setUp(self):
     self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
     self.session = self.cluster.connect()
Ejemplo n.º 4
0
def serve():

    dse_username = os.getenv('KILLRVIDEO_DSE_USERNAME')
    dse_password = os.getenv('KILLRVIDEO_DSE_PASSWORD')
    dse_contact_points = os.getenv('KILLRVIDEO_DSE_CONTACT_POINTS', 'dse').split(',')
    service_port = os.getenv('KILLRVIDEO_SERVICE_PORT', '50101')

    file = open('config.json', 'r')
    config = json.load(file)

    default_consistency_level = config['DEFAULT_CONSISTENCY_LEVEL']

    # Initialize Cassandra Driver and Mapper
    load_balancing_policy = TokenAwarePolicy(DCAwareRoundRobinPolicy())
    profile = ExecutionProfile(consistency_level=ConsistencyLevel.name_to_value[default_consistency_level],
                               load_balancing_policy=load_balancing_policy)
    graph_profile = DseGraph.create_execution_profile('killrvideo_video_recommendations')

    auth_provider = None
    if dse_username:
        auth_provider = PlainTextAuthProvider(username=dse_username, password=dse_password)

    # Wait for Cassandra (DSE) to be up
    session = None
    while not session:
        try:
            session = Cluster(contact_points=dse_contact_points,
                              execution_profiles={EXEC_PROFILE_DEFAULT: profile, EXEC_PROFILE_GRAPH_DEFAULT: graph_profile},
                              auth_provider = auth_provider).connect("killrvideo")
        except (NoHostAvailable):
            logging.info('Waiting for Cassandra (DSE) to be available')
            time.sleep(10)

    # Additional retry loop to check if dummy keyspace exists
    while True:
        logging.info('Checking for schema to be created...')
        result = session.execute('SELECT keyspace_name FROM system_schema.keyspaces WHERE keyspace_name=\'kv_init_done\'')
        if result.one(): # any result indicates keyspace has been created
            break
        time.sleep(10)

    dse.cqlengine.connection.set_session(session)

    # Initialize GRPC Server
    grpc_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))

    # Initialize Services (GRPC servicers with reference to GRPC Server and appropriate service reference
    CommentsServiceServicer(grpc_server, CommentsService(session=session))
    RatingsServiceServicer(grpc_server, RatingsService())
    SearchServiceServicer(grpc_server, SearchService(session=session))
    StatisticsServiceServicer(grpc_server, StatisticsService())
    SuggestedVideosServiceServicer(grpc_server, SuggestedVideosService(session=session))
    #UploadsServiceServicer(grpc_server, UploadsService())
    UserManagementServiceServicer(grpc_server, UserManagementService())
    VideoCatalogServiceServicer(grpc_server, VideoCatalogService(session=session))

    # Start GRPC Server
    grpc_server.add_insecure_port('[::]:' + service_port)
    grpc_server.start()

    # Keep application alive
    try:
        while True:
            time.sleep(_ONE_DAY_IN_SECONDS)
    except KeyboardInterrupt:
        grpc_server.stop(0)
Ejemplo n.º 5
0
def main():
    module = AnsibleModule(argument_spec={
        'login_user': {
            'required': True,
            'type': 'str'
        },
        'login_password': {
            'required': True,
            'no_log': True,
            'type': 'str'
        },
        'login_hosts': {
            'required': True,
            'type': 'list'
        },
        'login_port': {
            'default': 9042,
            'type': 'int'
        },
        'is_ssl': {
            'required': True,
            'type': 'bool'
        },
        'cert_path': {
            'required': False,
            'type': 'str'
        },
        'name': {
            'required': True,
            'aliases': ['role']
        },
        'password': {
            'default': None,
            'no_log': True
        },
        'enable_login': {
            'default': False,
            'type': 'bool'
        },
        'superuser': {
            'default': False,
            'type': 'bool'
        },
        'state': {
            'default': "present",
            'choices': ["absent", "present"]
        }
    },
                           supports_check_mode=True)

    is_ssl = module.params["is_ssl"]
    cert_path = module.params["cert_path"]
    login_user = module.params["login_user"]
    login_password = module.params["login_password"]
    login_hosts = module.params["login_hosts"]
    login_port = module.params["login_port"]
    enable_login = module.params["enable_login"]
    name = module.params["name"]
    password = module.params["password"]
    superuser = module.params["superuser"]
    state = module.params["state"]

    if not cassandra_dep_found:
        module.fail_json(msg="the python cassandra-driver module is required")

    session = None
    changed = False
    ssl_options = dict(certfile=cert_path, ssl_version=ssl.PROTOCOL_TLSv1)

    try:
        if not login_user:
            cluster = Cluster(login_hosts, port=login_port)

        else:
            auth_provider = PlainTextAuthProvider(username=login_user,
                                                  password=login_password)

            if is_ssl:
                cluster = Cluster(login_hosts,
                                  auth_provider=auth_provider,
                                  protocol_version=3.3,
                                  port=login_port,
                                  ssl_options=ssl_options)
            else:
                cluster = Cluster(login_hosts,
                                  auth_provider=auth_provider,
                                  protocol_version=3.3,
                                  port=login_port)

            session = cluster.connect()
            session.row_factory = dict_factory
    except Exception, e:
        module.fail_json(
            msg=
            "unable to connect to cassandra, check login_user and login_password are correct. Exception message: %s"
            % e)
Ejemplo n.º 6
0
#!/usr/bin/python

from dse.cluster import Cluster
import sys
import json
import shutil

argv_len = len(sys.argv)
if (argv_len < 3 or sys.argv[1] == '--help' or sys.argv[1] == '-h'):
    print("Usage: generate.py contact_point file_name [port]")
    exit(1)

port = '9103'
if (argv_len == 4):
    port = sys.argv[3]

cluster = Cluster([sys.argv[1]])
session = cluster.connect()
metadata = cluster.metadata

host_list = [
    host.broadcast_address + ':' + port for host in metadata.all_hosts()
]

data = {'labels': {'cluster': metadata.cluster_name}, 'targets': host_list}
filename = sys.argv[2] + '.tmp'
with open(filename, 'w') as f:
    json.dump(data, f)

shutil.move(filename, sys.argv[2])
Ejemplo n.º 7
0
from dse.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT
from dse.auth import PlainTextAuthProvider
from dse.policies import DCAwareRoundRobinPolicy, TokenAwarePolicy, ConstantSpeculativeExecutionPolicy
from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext
from dse import ConsistencyLevel
from kafka import KafkaConsumer
import json

#configs
topic = "dse-iot"
contactpoints = ['127.0.0.1']

#Connect to DSE
cluster = Cluster(contact_points=contactpoints)
session = cluster.connect()

#Setup prepared statements
iotwrite = session.prepare(
    "INSERT INTO demo.iot (id, bucket, ts, sensor, type, reading) VALUES (now(), ?, ?, ?, ?, ?)"
)

#Setup Spark Context for DSE
conf = SparkConf() \
 .setAppName("IOT Demo Consumer") \
 .set('spark.executor.cores', '3') \
 .set('spark.cores.max', '3') \
 .set('spark.driver.memory','2g')
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
Ejemplo n.º 8
0
import requests
from dse.cluster import Cluster
from dse.auth import DSEPlainTextAuthProvider

app = Flask(__name__)
CORS(app)

#Configuration
contactpoints = ['66.70.191.99']
auth_provider = DSEPlainTextAuthProvider(username='******',
                                         password='******')
keyspace = "whois"

print "Connecting to cluster"

cluster = Cluster(contact_points=contactpoints, auth_provider=auth_provider)

session = cluster.connect(keyspace)


@app.route('/')
def index():
    return "tacos and burritos"


@app.route('/incoming/info', methods=['POST'])
def info():
    #   if not request.json in request.json:
    #      abort(400)

    #print json.dumps(request.json)
 def test_set_connection_class(self):
     cluster = Cluster(connection_class='test')
     self.assertEqual('test', cluster.connection_class)
 def setUp(self):
     self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
Ejemplo n.º 11
0
    def test_graph_profile(self):
        """
            Test verifying various aspects of graph config properties.

            @since 1.0.0
            @jira_ticket PYTHON-570

            @test_category dse graph
            """
        hosts = self.cluster.metadata.all_hosts()
        first_host = hosts[0].address
        second_hosts = "1.2.3.4"

        generate_classic(self.session)
        # Create variou execution policies
        exec_dif_factory = GraphExecutionProfile(
            row_factory=single_object_row_factory)
        exec_dif_factory.graph_options.graph_name = self.graph_name
        exec_dif_lbp = GraphExecutionProfile(
            load_balancing_policy=WhiteListRoundRobinPolicy([first_host]))
        exec_dif_lbp.graph_options.graph_name = self.graph_name
        exec_bad_lbp = GraphExecutionProfile(
            load_balancing_policy=WhiteListRoundRobinPolicy([second_hosts]))
        exec_dif_lbp.graph_options.graph_name = self.graph_name
        exec_short_timeout = GraphExecutionProfile(
            request_timeout=1,
            load_balancing_policy=WhiteListRoundRobinPolicy([first_host]))
        exec_short_timeout.graph_options.graph_name = self.graph_name

        # Add a single exection policy on cluster creation
        local_cluster = Cluster(
            protocol_version=PROTOCOL_VERSION,
            execution_profiles={"exec_dif_factory": exec_dif_factory})
        local_session = local_cluster.connect()
        rs1 = self.session.execute_graph('g.V()')
        rs2 = local_session.execute_graph('g.V()',
                                          execution_profile='exec_dif_factory')

        # Verify default and non default policy works
        self.assertFalse(isinstance(rs2[0], Vertex))
        self.assertTrue(isinstance(rs1[0], Vertex))
        # Add other policies validate that lbp are honored
        local_cluster.add_execution_profile("exec_dif_ldp", exec_dif_lbp)
        local_session.execute_graph('g.V()', execution_profile="exec_dif_ldp")
        local_cluster.add_execution_profile("exec_bad_lbp", exec_bad_lbp)
        with self.assertRaises(NoHostAvailable):
            local_session.execute_graph('g.V()',
                                        execution_profile="exec_bad_lbp")

        # Try with missing EP
        with self.assertRaises(ValueError):
            local_session.execute_graph('g.V()',
                                        execution_profile='bad_exec_profile')

        # Validate that timeout is honored
        local_cluster.add_execution_profile("exec_short_timeout",
                                            exec_short_timeout)
        with self.assertRaises(Exception) as e:
            self.assertTrue(
                isinstance(e, InvalidRequest)
                or isinstance(e, OperationTimedOut))
            local_session.execute_graph(
                'java.util.concurrent.TimeUnit.MILLISECONDS.sleep(2000L);',
                execution_profile='exec_short_timeout')
Ejemplo n.º 12
0
def main():
    module = AnsibleModule(argument_spec={
        'login_user': {
            'required': True,
            'type': 'str'
        },
        'login_password': {
            'required': True,
            'no_log': True,
            'type': 'str'
        },
        'login_hosts': {
            'required': True,
            'type': 'list'
        },
        'login_port': {
            'default': 9042,
            'type': 'int'
        },
        'permission': {
            'required':
            False,
            'choices': [
                "all", "create", "alter", "drop", "select", "modify",
                "authorize"
            ]
        },
        'role': {
            'required': True,
            'aliases': ['name']
        },
        'inherit_role': {
            'required': False,
            'default': None
        },
        'keyspace': {
            'required': False,
            'default': None,
            'type': 'str'
        },
        'all_keyspaces': {
            'default': False,
            'type': 'bool'
        },
        'mode': {
            'default': "grant",
            'choices': ["grant", "revoke"]
        }
    },
                           supports_check_mode=True)
    login_user = module.params["login_user"]
    login_password = module.params["login_password"]
    login_hosts = module.params["login_hosts"]
    login_port = module.params["login_port"]
    permission = module.params["permission"]
    role = module.params["role"]
    inherit_role = module.params["inherit_role"]
    keyspace = module.params["keyspace"]
    all_keyspaces = module.params["all_keyspaces"]
    mode = module.params["mode"]

    if not cassandra_dep_found:
        module.fail_json(msg="the python cassandra-driver module is required")

    session = None
    changed = False
    try:
        if not login_user:
            cluster = Cluster(login_hosts, port=login_port)
        else:
            auth_provider = PlainTextAuthProvider(username=login_user,
                                                  password=login_password)
            cluster = Cluster(login_hosts,
                              auth_provider=auth_provider,
                              protocol_version=2,
                              port=login_port)
        session = cluster.connect()
        session.row_factory = dict_factory
    except Exception, e:
        module.fail_json(
            msg=
            "unable to connect to cassandra, check login_user and login_password are correct. Exception message: %s"
            % e)
Ejemplo n.º 13
0
    def readStream():
        coordinator = dc
        last_c = coordinator
        used_dc = dc
        current = time.localtime()
        bucket = str(current.tm_year) + str(current.tm_mon) + str(
            current.tm_mday) + str(current.tm_hour) + str(current.tm_min)

        profile1 = ExecutionProfile(
            load_balancing_policy=DCAwareRoundRobinPolicy(
                local_dc=dc, used_hosts_per_remote_dc=3),
            speculative_execution_policy=ConstantSpeculativeExecutionPolicy(
                .05, 20),
            consistency_level=CL)

        print "Connecting to cluster"

        cluster = Cluster(
            contact_points=contactpoints,
            auth_provider=auth_provider,
            ssl_options=ssl_opts,
            execution_profiles={EXEC_PROFILE_DEFAULT: profile1},
        )

        session = cluster.connect()

        x = 0
        y = 0
        while x <= count:
            r = {}  #Results Dictionary
            current = time.localtime()
            bucket = str(current.tm_year) + str(current.tm_mon) + str(
                current.tm_mday) + str(current.tm_hour) + str(current.tm_min)
            #r["d"] = time.strftime('%Y-%m-%dT%H:%M:%S', current)
            query = """ select * from demo.table2 where bucket = '%s' limit 1 """ % (
                bucket)
            readfail = 0
            r["result"] = "Successful"
            try:
                results = session.execute(query)
            except Exception as e:
                print("Read failed.")
                readfail = 1
                for i in e:
                    errormsg = i
                    errormsg = str(errormsg).replace('"', '')
                r["count"] = x
                r["dc"] = used_dc
                r["result"] = errormsg
                r["d"] = "00:00:00"
                yield json.dumps(r) + "\r\n"
            if readfail == 1:
                cluster.shutdown()
                return
                yield

            for row in results:
                r["d"] = row.d

            if (y == rowcount):
                y = 0
                try:
                    future = session.execute_async(query, trace=True)
                    result = future.result()
                    try:
                        trace = future.get_query_trace(1)
                        coordinator = trace.coordinator
                    except:
                        coordinator = last_c
                    for h in session.hosts:
                        if h.address == coordinator:
                            used_dc = h.datacenter
                    r["count"] = x
                    r["dc"] = used_dc
                    yield json.dumps(r) + "\r\n"
                except Exception as e:
                    for i in e:
                        errormsg = i
                        errormsg = str(errormsg).replace('"', '')
                    print("Read trace failed.")
                    r["count"] = x
                    r["dc"] = used_dc
                    r["result"] = errormsg
                    yield json.dumps(r) + "\r\n"
                    cluster.shutdown()

            time.sleep(.03)  # an artificial delay
            x = x + 1
            y = y + 1
        cluster.shutdown()
Ejemplo n.º 14
0
def main():
    arguments = docopt(__doc__)

    host = [arguments['--host']] if arguments["--host"] else ["localhost"]

    session = Cluster(host).connect()

    graph = arguments['<keyspace>']
    if graph and graph not in session.cluster.metadata.keyspaces:
        print "Graph {}{}{} not found".format(Fore.RED, graph, Style.RESET_ALL)
        sys.exit(1)

    session.default_graph_options.graph_name = graph
    print Fore.GREEN + "Connected to {}/{}".format(host[0],
                                                   graph) + Style.RESET_ALL

    accum = None
    eof = None

    print "Gremlin REPL, use heredocs for multiline ex:<<EOF, help for help\n"

    while True:
        graph = session.default_graph_options.graph_name
        prompt = "gremlin [{}/{}]> ".format(
            host[0], graph) if eof is None else "gremlin (cont)> "
        input = raw_input(prompt)
        output_time = False
        start_time = time.time()

        if input.startswith("<<"):
            # heredoc
            print "Multiline mode activated"
            eof = input[2:]
            accum = []
            continue

        if eof and input == eof:
            eof = None
            input = "\n".join(accum)
            print input

        elif eof:
            accum.append(input)
            continue

        if input == "quit" or input == "exit":
            break

        if input == "%schema":
            continue

        if input == "help":
            print_help()
            continue
        if input == "docs":
            webbrowser.open(
                "http://docs.datastax.com/en/datastax_enterprise/5.0/datastax_enterprise/graph/graphTOC.html#graphTOC__missing-elem-id--graphTOC"
            )
            continue

        total_time = None
        try:

            try:
                parsed = parse_line(input)
                result = parsed.execute(session)
                print_result_set(result)
                continue
            except ParseError as e:
                pass
            except Exception as e:
                print e
                continue

            stmt = SimpleGraphStatement(input)

            if input.startswith("a"):
                print Fore.GREEN + "Spark Graph Traversal Enabled, this may take a while..." + Style.RESET_ALL
                stmt.options.graph_source = "a"
                stmt.options.graph_alias = "a"

            start = time.time()

            result = session.execute_graph(stmt)
            total_time = time.time() - start

        except Exception as e:
            print e
            continue

        if isinstance(result, ResultSet):
            print_result_set(result)
            if total_time:
                print Fore.RED + "Query Time: {}s".format(round(
                    total_time, 3)) + Style.RESET_ALL
        else:
            try:

                print "Unknown result", type(result), result
            except Exception as e:
                print e
Ejemplo n.º 15
0
    def test_refresh_schema_no_wait(self):

        contact_points = [DSE_IP]
        with Cluster(protocol_version=PROTOCOL_VERSION,
                     max_schema_agreement_wait=10,
                     contact_points=contact_points,
                     execution_profiles={
                         EXEC_PROFILE_DEFAULT:
                         ExecutionProfile(
                             load_balancing_policy=WhiteListRoundRobinPolicy(
                                 contact_points))
                     }) as cluster:
            session = cluster.connect()

            schema_ver = session.execute(
                "SELECT schema_version FROM system.local WHERE key='local'"
            )[0][0]
            new_schema_ver = uuid4()
            session.execute(
                "UPDATE system.local SET schema_version=%s WHERE key='local'",
                (new_schema_ver, ))

            try:
                agreement_timeout = 1

                # cluster agreement wait exceeded
                c = Cluster(protocol_version=PROTOCOL_VERSION,
                            max_schema_agreement_wait=agreement_timeout)
                c.connect()
                self.assertTrue(c.metadata.keyspaces)

                # cluster agreement wait used for refresh
                original_meta = c.metadata.keyspaces
                start_time = time.time()
                self.assertRaisesRegexp(
                    Exception, r"Schema metadata was not refreshed.*",
                    c.refresh_schema_metadata)
                end_time = time.time()
                self.assertGreaterEqual(end_time - start_time,
                                        agreement_timeout)
                self.assertIs(original_meta, c.metadata.keyspaces)

                # refresh wait overrides cluster value
                original_meta = c.metadata.keyspaces
                start_time = time.time()
                c.refresh_schema_metadata(max_schema_agreement_wait=0)
                end_time = time.time()
                self.assertLess(end_time - start_time, agreement_timeout)
                self.assertIsNot(original_meta, c.metadata.keyspaces)
                self.assertEqual(original_meta, c.metadata.keyspaces)

                c.shutdown()

                refresh_threshold = 0.5
                # cluster agreement bypass
                c = Cluster(protocol_version=PROTOCOL_VERSION,
                            max_schema_agreement_wait=0)
                start_time = time.time()
                s = c.connect()
                end_time = time.time()
                self.assertLess(end_time - start_time, refresh_threshold)
                self.assertTrue(c.metadata.keyspaces)

                # cluster agreement wait used for refresh
                original_meta = c.metadata.keyspaces
                start_time = time.time()
                c.refresh_schema_metadata()
                end_time = time.time()
                self.assertLess(end_time - start_time, refresh_threshold)
                self.assertIsNot(original_meta, c.metadata.keyspaces)
                self.assertEqual(original_meta, c.metadata.keyspaces)

                # refresh wait overrides cluster value
                original_meta = c.metadata.keyspaces
                start_time = time.time()
                self.assertRaisesRegexp(
                    Exception,
                    r"Schema metadata was not refreshed.*",
                    c.refresh_schema_metadata,
                    max_schema_agreement_wait=agreement_timeout)
                end_time = time.time()
                self.assertGreaterEqual(end_time - start_time,
                                        agreement_timeout)
                self.assertIs(original_meta, c.metadata.keyspaces)
                c.shutdown()
            finally:
                # TODO once fixed this connect call
                session = cluster.connect()
                session.execute(
                    "UPDATE system.local SET schema_version=%s WHERE key='local'",
                    (schema_ver, ))
Ejemplo n.º 16
0
import random, datetime

from dse.cluster import Cluster

cluster = Cluster()
session = cluster.connect()

start_time = datetime.datetime.fromtimestamp(1564524000)
end_time = datetime.datetime.fromtimestamp(15646104000)

bucket_size = 60

product_ids = ["PARENT-ROLL", "TOIL-1", "TOIL-2", "TOIL-3"]

current_time = start_time
while current_time <= end_time:
    interaction_count = 1
    product_count = 0
    for i in range(0, 60):
        time = current_time + datetime.timedelta(minutes=i)
        session.execute(
            """
            INSERT INTO prodcat.user_interaction_product_history (user_id, product_id, year, month, day, hour, interaction_time, type)
            VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
            """, (0, product_ids[product_count], time.year, time.month,
                  time.day, time.hour, time, str(interaction_count % 3 == 1)))

        session.execute(
            """
            INSERT INTO prodcat.user_interaction_product (user_id, product_id, interaction_time, type)
            VALUES (%s, %s, %s, %s)
Ejemplo n.º 17
0
    def test_idle_heartbeat(self):
        interval = 2
        cluster = Cluster(protocol_version=PROTOCOL_VERSION,
                          idle_heartbeat_interval=interval)
        session = cluster.connect(wait_for_all_pools=True)

        # This test relies on impl details of connection req id management to see if heartbeats
        # are being sent. May need update if impl is changed
        connection_request_ids = {}
        for h in cluster.get_connection_holders():
            for c in h.get_connections():
                # make sure none are idle (should have startup messages
                self.assertFalse(c.is_idle)
                with c.lock:
                    connection_request_ids[id(c)] = deque(
                        c.request_ids)  # copy of request ids

        # let two heatbeat intervals pass (first one had startup messages in it)
        time.sleep(2 * interval + interval / 2)

        connections = [
            c for holders in cluster.get_connection_holders()
            for c in holders.get_connections()
        ]

        # make sure requests were sent on all connections
        for c in connections:
            expected_ids = connection_request_ids[id(c)]
            expected_ids.rotate(-1)
            with c.lock:
                self.assertListEqual(list(c.request_ids), list(expected_ids))

        # assert idle status
        self.assertTrue(all(c.is_idle for c in connections))

        # send messages on all connections
        statements_and_params = [("SELECT release_version FROM system.local",
                                  ())] * len(cluster.metadata.all_hosts())
        results = execute_concurrent(session, statements_and_params)
        for success, result in results:
            self.assertTrue(success)

        # assert not idle status
        self.assertFalse(
            any(c.is_idle if not c.is_control_connection else False
                for c in connections))

        # holders include session pools and cc
        holders = cluster.get_connection_holders()
        self.assertIn(cluster.control_connection, holders)
        self.assertEqual(len(holders),
                         len(cluster.metadata.all_hosts()) +
                         1)  # hosts pools, 1 for cc

        # include additional sessions
        session2 = cluster.connect(wait_for_all_pools=True)

        holders = cluster.get_connection_holders()
        self.assertIn(cluster.control_connection, holders)
        self.assertEqual(len(holders), 2 * len(cluster.metadata.all_hosts()) +
                         1)  # 2 sessions' hosts pools, 1 for cc

        cluster._idle_heartbeat.stop()
        cluster._idle_heartbeat.join()
        assert_quiescent_pool_state(self, cluster)

        cluster.shutdown()
Ejemplo n.º 18
0
 def connect_and_query(self, auth_provider, execute_as=None):
     self.cluster = Cluster(auth_provider=auth_provider)
     self.session = self.cluster.connect()
     query = "SELECT * FROM testproxy.testproxy"
     rs = self.session.execute(query, execute_as=execute_as)
     return rs
Ejemplo n.º 19
0
 def setup_class(cls):
     cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
     cls.session = cls.cluster.connect(wait_for_all_pools=True)
Ejemplo n.º 20
0
    def setUp(self):

        self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
        self.session = self.cluster.connect(wait_for_all_pools=True)
        self.session.execute("TRUNCATE test3rf.test")
Ejemplo n.º 21
0
actor = __.actor


def print_header(title, subtitle=""):
    print()
    t = "* " + title
    print(t)
    st = ""
    if subtitle:
        st = "[" + subtitle + "]"
        print(st)

    line = "-" * max(len(st), len(t))
    print(line)

c = Cluster()
session = c.connect()

# initialize the TraversalSource for the DSL using the DSE Python Driver
# https://github.com/datastax/python-dse-driver
killr = Graph().traversal(KillrVideoTraversalSource).withRemote(DSESessionRemoteGraphConnection(session, "killrvideo"))

print_header("Actors for Young Guns", "killr.movies('Young Guns').actors().values('name')")
for n in killr.movies("Young Guns").actors().values("name").toList():
    print(n)

print_header("Ratings Distribution by Age for Young Guns", "killr.movies('Young Guns').ratings().distribution_for_ages(18, 40)")
ratingsByAge = killr.movies("Young Guns").ratings().distribution_for_ages(18, 40).next()
print(ratingsByAge)

print_header("Failed Validation", "killr.movies('Young Guns').ratings().distribution_for_ages(17,40)")
Ejemplo n.º 22
0
                                                  used_hosts_per_remote_dc=3),
    speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.05, 20),
    consistency_level=CL)

ssl_opts = None
#ssl_opts = {
#    'ca_certs': '/path/to/my/ca.certs',
#    'ssl_version': PROTOCOL_TLSv1,
#    'cert_reqs':  CERT_OPTIONAL
#}

print "Connecting to cluster"

cluster = Cluster(
    contact_points=contactpoints,
    auth_provider=auth_provider,
    ssl_options=ssl_opts,
    execution_profiles={EXEC_PROFILE_DEFAULT: profile1},
)

session = cluster.connect()

#session.execute (""" CREATE KEYSPACE IF NOT EXISTS stats WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': '3', 'dc2': '3'}  AND durable_writes = true """)
session.execute(
    """ CREATE KEYSPACE IF NOT EXISTS stats WITH replication = {'class': 'NetworkTopologyStrategy', 'AWS': '3'}  AND durable_writes = true """
)
session.execute(
    """ CREATE TABLE IF NOT EXISTS  stats.player_stats ( name text, game text, ts timeuuid, stat1 int, stat2 int, stat3 int, PRIMARY KEY ((name, game), ts)) """
)

c = 0
x = 0
Ejemplo n.º 23
0
def get_cluster(cluster_ips_file: str = 'ips.txt') -> Cluster:
    auth = PlainTextAuthProvider(username=os.environ['DSE_USER'], password=os.environ['DSE_PASS'])
    with open(cluster_ips_file) as ips_file:
        ips = list(map(lambda ele: ele.strip(), ips_file.read().split(",")))
    logger.info(f'Read cluster nodes IPs from {cluster_ips_file}: {ips}')
    return Cluster(contact_points=ips, auth_provider=auth)
Ejemplo n.º 24
0
#Configuration
contactpoints = ['172.31.13.134', '172.31.4.17']
localDC = "dc1"
keyspace = "cme"
CL = ConsistencyLevel.ONE
profile1 = ExecutionProfile(
    load_balancing_policy=DCAwareRoundRobinPolicy(local_dc=localDC,
                                                  used_hosts_per_remote_dc=3),
    speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.1, 20),
    consistency_level=CL)

print "Connecting to cluster"

cluster = Cluster(
    contact_points=contactpoints,
    execution_profiles={EXEC_PROFILE_DEFAULT: profile1},
)

session = cluster.connect(keyspace)

c = 0
x = 0
while 1:
    quotetime = calendar.timegm(time.gmtime())
    code = random.choice(string.letters).lower() + random.choice(
        string.letters).lower() + random.choice(string.letters).lower()
    quoteprice = decimal.Decimal(random.randrange(10000)) / 100
    print quotetime, code, quoteprice

    session.execute(
        """ INSERT INTO cme.quotes (code, quotetime, quoteprice) VALUES (%s, %s, %s) """,
Ejemplo n.º 25
0
# At the time of this blog post, dse_graph only supports gremlinpython version 3.2.x
# This script was tested using gremlinpython version 3.2.6

from dse.cluster import Cluster, GraphExecutionProfile, EXEC_PROFILE_GRAPH_DEFAULT, EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT
from dse.graph import GraphOptions
from dse_graph import DseGraph
from gremlin_python.process.graph_traversal import __
from gremlin_python.structure.graph import Vertex

graph_name = 'modern'
ep_schema = GraphExecutionProfile(graph_options=GraphOptions(graph_name=graph_name))
ep = DseGraph.create_execution_profile(graph_name)

cluster = Cluster(execution_profiles={'schema': ep_schema, EXEC_PROFILE_GRAPH_DEFAULT: ep})
session = cluster.connect()

# Define schema
session.execute_graph("system.graph(name).create()", { 'name': graph_name }, execution_profile = EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT)
session.execute_graph("schema.propertyKey('neighborhood').Bigint().create()", execution_profile = 'schema')
session.execute_graph("schema.propertyKey('name').Text().create()", execution_profile = 'schema')
session.execute_graph("schema.propertyKey('age').Bigint().create()", execution_profile = 'schema')
session.execute_graph("schema.propertyKey('weight').Float().create()", execution_profile = 'schema')
session.execute_graph("schema.vertexLabel('person').partitionKey('neighborhood').clusteringKey('name').properties('age').create()", execution_profile = 'schema')
session.execute_graph("schema.edgeLabel('knows').properties('weight').connection('person', 'person').create()", execution_profile = 'schema')

# Execute batch
batch = DseGraph.batch()
batch.add(__.addV('person').property('neighborhood', 0).property('name', 'bob').property('age', 23))
batch.add(__.addV('person').property('neighborhood', 0).property('name', 'alice').property('age', 21))
batch.add(__.addE('knows')
        .from_(Vertex({ 'neighborhood': 0, 'name': 'bob', '~label' : 'person' }))
Ejemplo n.º 26
0
    def test_metrics_per_cluster(self):
        """
        Test to validate that metrics can be scopped to invdividual clusters
        @since 3.6.0
        @jira_ticket PYTHON-561
        @expected_result metrics should be scopped to a cluster level

        @test_category metrics
        """

        cluster2 = Cluster(
            metrics_enabled=True,
            protocol_version=PROTOCOL_VERSION,
            execution_profiles={
                EXEC_PROFILE_DEFAULT:
                ExecutionProfile(retry_policy=FallthroughRetryPolicy())
            })
        cluster2.connect(self.ks_name, wait_for_all_pools=True)

        self.assertEqual(len(cluster2.metadata.all_hosts()), 3)

        query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name),
                                consistency_level=ConsistencyLevel.ALL)
        self.session.execute(query)

        # Pause node so it shows as unreachable to coordinator
        get_node(1).pause()

        try:
            # Test write
            query = SimpleStatement(
                "INSERT INTO {0}.{0} (k, v) VALUES (2, 2)".format(
                    self.ks_name),
                consistency_level=ConsistencyLevel.ALL)
            with self.assertRaises(WriteTimeout):
                self.session.execute(query, timeout=None)
        finally:
            get_node(1).resume()

        # Change the scales stats_name of the cluster2
        cluster2.metrics.set_stats_name('cluster2-metrics')

        stats_cluster1 = self.cluster.metrics.get_stats()
        stats_cluster2 = cluster2.metrics.get_stats()

        # Test direct access to stats
        self.assertEqual(1, self.cluster.metrics.stats.write_timeouts)
        self.assertEqual(0, cluster2.metrics.stats.write_timeouts)

        # Test direct access to a child stats
        self.assertNotEqual(0.0, self.cluster.metrics.request_timer['mean'])
        self.assertEqual(0.0, cluster2.metrics.request_timer['mean'])

        # Test access via metrics.get_stats()
        self.assertNotEqual(0.0, stats_cluster1['request_timer']['mean'])
        self.assertEqual(0.0, stats_cluster2['request_timer']['mean'])

        # Test access by stats_name
        self.assertEqual(
            0.0,
            scales.getStats()['cluster2-metrics']['request_timer']['mean'])

        cluster2.shutdown()
Ejemplo n.º 27
0
 def session_setup(cls):
     cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
     cls.session = cls.cluster.connect()
     cls.ks_name = cls.__name__.lower()
     cls.cass_version, cls.cql_version = get_server_versions()
     cls.graph_name = cls.__name__.lower()
Ejemplo n.º 28
0
 def test_set_keyspace_twice(self):
     cluster = Cluster(protocol_version=PROTOCOL_VERSION)
     session = cluster.connect()
     session.execute("USE system")
     session.execute("USE system")
     cluster.shutdown()
Ejemplo n.º 29
0
    def setUp(self):

        self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
        self.session = self.cluster.connect(wait_for_all_pools=True)
Ejemplo n.º 30
0
 def cluster_as(self, usr, pwd):
     return Cluster(protocol_version=PROTOCOL_VERSION,
                    idle_heartbeat_interval=0,
                    auth_provider=self.get_authentication_provider(username=usr, password=pwd))