示例#1
0
def check_cql(ip, ssl_context=None):
    try:
        cluster = get_cql_cluster(ip, ssl_context)
        cluster.connect()
        cluster.shutdown()
    except cassandra.cluster.NoHostAvailable:
        raise NotYetUp
示例#2
0
文件: test_ssl.py 项目: xemul/scylla
def try_connect(orig_cluster, ssl_version):
    cluster = cassandra.cluster.Cluster(
        contact_points=orig_cluster.contact_points,
        port=orig_cluster.port,
        protocol_version=orig_cluster.protocol_version,
        auth_provider=orig_cluster.auth_provider,
        ssl_context=ssl.SSLContext(ssl_version))
    cluster.connect()
    cluster.shutdown()
示例#3
0
def try_connect(orig_cluster, ssl_version):
    ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS)
    ssl_context.minimum_version = ssl_version
    ssl_context.maximum_version = ssl_version
    cluster = cassandra.cluster.Cluster(
        contact_points=orig_cluster.contact_points,
        port=orig_cluster.port,
        protocol_version=orig_cluster.protocol_version,
        auth_provider=orig_cluster.auth_provider,
        ssl_context=ssl_context)
    cluster.connect()
    cluster.shutdown()
示例#4
0
def test_non_tls_on_tls(cql):
    if not cql.cluster.ssl_context:
        pytest.skip("SSL-specific tests are skipped without the '--ssl' option")
    # Copy the configuration of the existing "cql", just not the ssl_context
    cluster = cassandra.cluster.Cluster(
        contact_points=cql.cluster.contact_points,
        port=cql.cluster.port,
        protocol_version=cql.cluster.protocol_version,
        auth_provider=cql.cluster.auth_provider)
    with pytest.raises(cassandra.cluster.NoHostAvailable, match="ProtocolError"):
        cluster.connect()
    cluster.shutdown() # can't be reached
示例#5
0
def get_cluster(host_name='localhost', wait_timeout=300, login=None, password=None, quiet=False):
    start_at = time.time()
    wait_println_counter = 0

    auth_provider = None
    if login is not None and password is not None:
        auth_provider = PlainTextAuthProvider(username=login, password=password)

    while time.time() - start_at < wait_timeout:
        try:
            cluster = cassandra.cluster.Cluster([host_name],
                                                load_balancing_policy=
                                                cassandra.cluster.TokenAwarePolicy(
                                                    cassandra.cluster.DCAwareRoundRobinPolicy(
                                                        local_dc='datacenter1'
                                                    )
                                                ),
                                                auth_provider=auth_provider
                                                )
            return cluster.connect()
        except (cassandra.cluster.NoHostAvailable, cassandra.UnresolvableContactPoints) as e:
            if not quiet:
                print(repr(e))
            wait_println_counter += 3
            if wait_println_counter == 3:
                if not quiet:
                    print("Waiting 30 more seconds...")
                wait_println_counter = 0
            time.sleep(10)
        else:
            sys.exit(0)
    else:
        if not quiet:
            print("Waiting time exceeded, aborting...")
        sys.exit(1)
示例#6
0
def setup_session(csv_hosts, port):
    """Connect to a Cassandra cluster"""
    hosts = [host.strip() for host in csv_hosts.split(',')]
    cluster = cassandra.cluster.Cluster(hosts, port=port)
    session = cluster.connect()

    return session
示例#7
0
def connect_cassandra(is_client_encryption_enable, tls_version=PROTOCOL_TLS):
    connected = False
    attempt = 0
    session = None
    _ssl_context = None

    if is_client_encryption_enable:

        ssl_context = SSLContext(tls_version)
        ssl_context.load_verify_locations(certfile)
        ssl_context.verify_mode = CERT_REQUIRED
        ssl_context.load_cert_chain(certfile=usercert, keyfile=userkey)
        _ssl_context = ssl_context

    while not connected and attempt < 10:
        try:
            cluster = Cluster(contact_points=["127.0.0.1"],
                              ssl_context=_ssl_context,
                              protocol_version=ProtocolVersion.V4)
            session = cluster.connect()
            connected = True
        except cassandra.cluster.NoHostAvailable:
            attempt += 1
            time.sleep(10)

    if tls_version is not PROTOCOL_TLS:  # other TLS versions used for testing, close the session
        session.shutdown()

    return session
def create_database():
    '''
    Connects to the local Cassandra cluster.

    Creates the sparkifydb keyspace (if it doesn't exist).
  
    Sets the keyspace of the session to sparkifydb.

    Parameters:
        None

    Returns:
        cluster: Local Cassandra cluster
        session: Current Cassandra session
    '''
    
    # connect to cluster
    cluster = cassandra.cluster.Cluster(['127.0.0.1'])
    session = cluster.connect()

    # create keyspace
    session.execute(
        '''
        CREATE KEYSPACE IF NOT EXISTS sparkify
        WITH REPLICATION = 
        { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }
        '''
    )

    # set current keyspace for session
    session.set_keyspace('sparkify')

    return cluster, session
def main():
    # Process command line arguments
    options = docopt.docopt(__doc__)
    cassandra_ip_list = options['<cassandra_ip_list>']

    # Open connection to Cassandra
    cluster = cassandra.cluster.Cluster(cassandra_ip_list,
                                        control_connection_timeout=60,
                                        protocol_version=3)
    session = cluster.connect('ooi')

    # Extract Stream Metadata from Cassandra
    stream_metadata = execute_query(session, ALL_STREAM_METADATA,
                                    STREAM_METADATA_COLUMNS)

    # Extract Partition Metadata from Cassandra
    partition_metadata = execute_query(session, ALL_PARTITION_METADATA,
                                       PARTITION_METADATA_COLUMNS)

    # Close connection to Cassandra
    cluster.shutdown()

    # Write Stream Metadata to CSV file
    write_csv_file(STREAM_METADATA_FILEPATH, stream_metadata,
                   STREAM_METADATA_COLUMNS)

    # Write Partition Metadata to CSV file
    write_csv_file(PARTITION_METADATA_FILEPATH, partition_metadata,
                   PARTITION_METADATA_COLUMNS)
示例#10
0
 def __init__(self, cluster):
     self._session = cluster.connect('bombard')
     self._thread = threading.Thread(target=self._go)
     self._thread.daemon = True
     self._thread.start()
     self._symbol = Bomber.count
     Bomber.count += 1
示例#11
0
    def run():
        query = textwrap.dedent(f"""
            SELECT
            starttime, stoptime, start_lon, start_lat, stop_lon, stop_lat, bikeid
            FROM
            trip_by_startdayhour
            WHERE
                    starttime_year={dt.year}
                AND
                    starttime_month={dt.month}
                AND
                    starttime_day={dt.day}
                AND
                    starttime_hour={dt.hour}
            ;
            """)
        cluster = cassandra.cluster.Cluster()
        session = cluster.connect("paroisem")
        for r in session.execute(query):
            yield Trip(
                r.starttime,
                r.stoptime,
                r.start_lon,
                r.start_lat,
                r.stop_lon,
                r.stop_lat,
                r.bikeid,
            )
# Ou se connecter une seule fois seulement, faire une classe avec la connection en attribut
        cluster.shutdown()
示例#12
0
def connect(username=None, password=None, timeout=CONNECT_TIMEOUT):
    # We pull the currently configured listen address and port from the
    # yaml, rather than the service configuration, as it may have been
    # overridden.
    cassandra_yaml = read_cassandra_yaml()
    address = cassandra_yaml['rpc_address']
    if address == '0.0.0.0':
        address = 'localhost'
    port = cassandra_yaml['native_transport_port']

    auth_provider = get_auth_provider(username, password)

    addresses = set([address])
    cluster_ep = reactive.endpoint_from_name('cluster')
    addresses.update(cluster_ep.get_bootstrapped_ips())

    # Although we specify a reconnection_policy, it does not apply to
    # the initial connection so we retry in a loop.
    start = time.time()
    until = start + timeout
    while True:
        cluster = cassandra.cluster.Cluster(list(addresses), port=port, auth_provider=auth_provider)
        try:
            session = cluster.connect()
            session.default_timeout = timeout
            break
        except cassandra.cluster.NoHostAvailable as x:
            cluster.shutdown()
            if time.time() > until:
                raise
        time.sleep(1)
    try:
        yield session
    finally:
        cluster.shutdown()
def main():
    '''
    Script entry point:
    - Queries the data in the sparkify Cassandra keyspace, and prints the results

    Parameters:
        None

    Returns:
        None
    '''
    # connect to cluster
    cluster = cassandra.cluster.Cluster(['127.0.0.1'])
    session = cluster.connect()

    # set current keyspace for session
    session.set_keyspace('sparkify')

    # query data
    ## sessions for query 1
    print_query(session, 'Query 1')

    ## user_sessions for query 2
    print_query(session, 'Query 2')

    ## song_listeners for query 3
    print_query(session, 'Query 3')

    # shut down session and cluster
    session.shutdown()
    cluster.shutdown()
示例#14
0
    def test_prepared_metadata_generation(self):
        """
        Test to validate that result metadata is appropriately populated across protocol version

        In protocol version 1 result metadata is retrieved everytime the statement is issued. In all
        other protocol versions it's set once upon the prepare, then re-used. This test ensures that it manifests
        it's self the same across multiple protocol versions.

        @since 3.6.0
        @jira_ticket PYTHON-71
        @expected_result result metadata is consistent.
        """

        base_line = None
        for proto_version in get_supported_protocol_versions():
            cluster = Cluster(protocol_version=proto_version)
            session = cluster.connect()
            select_statement = session.prepare("SELECT * FROM system.local")
            if proto_version == 1:
                self.assertEqual(select_statement.result_metadata, None)
            else:
                self.assertNotEqual(select_statement.result_metadata, None)
            future = session.execute_async(select_statement)
            results = future.result()
            if base_line is None:
                base_line = results[0].__dict__.keys()
            else:
                self.assertEqual(base_line, results[0].__dict__.keys())
            cluster.shutdown()
示例#15
0
    def test_prepared_metadata_generation(self):
        """
        Test to validate that result metadata is appropriately populated across protocol version

        In protocol version 1 result metadata is retrieved everytime the statement is issued. In all
        other protocol versions it's set once upon the prepare, then re-used. This test ensures that it manifests
        it's self the same across multiple protocol versions.

        @since 3.6.0
        @jira_ticket PYTHON-71
        @expected_result result metadata is consistent.
        """

        base_line = None
        for proto_version in get_supported_protocol_versions():
            cluster = Cluster(protocol_version=proto_version)
            session = cluster.connect()
            select_statement = session.prepare("SELECT * FROM system.local")
            if proto_version == 1:
                self.assertEqual(select_statement.result_metadata, None)
            else:
                self.assertNotEqual(select_statement.result_metadata, None)
            future = session.execute_async(select_statement)
            results = future.result()
            if base_line is None:
                base_line = results[0].__dict__.keys()
            else:
                self.assertEqual(base_line, results[0].__dict__.keys())
            cluster.shutdown()
    def get_data(
        self,
        table,
        partition_keys,
        keys,
        row_limit=None,
    ):

        if row_limit is None:
            row_limit = self.config.cassandra.row_limit

        auth_provider = cassandra.auth.PlainTextAuthProvider(
            username=self.config.cassandra.username,
            password=self.config.cassandra.password)
        cluster = cassandra.cluster.Cluster([self.config.cassandra.host],
                                            auth_provider=auth_provider)
        session = cluster.connect()
        session.set_keyspace(self.config.cassandra.keyspace)
        session.row_factory = cassandra.query.ordered_dict_factory

        data = []
        offset = datetime.timedelta()
        partition_key_names, partition_key_value_lists = zip(
            *partition_keys.items())
        try:
            for vs in itertools.product(*partition_key_value_lists):
                if len(data) >= row_limit:
                    print('CassandraClient: row limit hit')
                    break
                statement_string = 'SELECT * FROM {}{}{}'.format(
                    table,
                    ' WHERE ' if partition_keys else '',
                    ' and '.join("{}='{}'".format(k, v)
                                 for k, v in zip(partition_key_names, vs)),
                )
                statement = cassandra.query.SimpleStatement(
                    statement_string,
                    fetch_size=self.config.cassandra.fetch_size,
                )
                print('CassandraClient: executing statement {!r}...'.format(
                    statement_string),
                      end='',
                      flush=True)
                dt_begin = datetime.datetime.now(tz=datetime.timezone.utc)
                for row in session.execute(statement):
                    #print('{}: {}'.format(row.get('countertype', None), ', '.join(row.get('datadefinition', {}).keys())))
                    data.append([
                        diagnoser.tools.getitemitem(row, key, None)
                        for key in keys
                    ])
                dt_end = datetime.datetime.now(tz=datetime.timezone.utc)
                print(' time for last request: {:.4f} seconds, '
                      'retrieved {} records so far'.format(
                          (dt_end - dt_begin).total_seconds(), len(data)))
        except KeyboardInterrupt:
            print('cassandra_data_manager: request aborted, returning '
                  'partial data')

        return pd.DataFrame(data, columns=['.'.join(k) for k in keys])
示例#17
0
def writeStation(dataToInsert):
    cluster = cassandra.cluster.Cluster(['localhost'])
    session = cluster.connect('adamloic_lixiang_projet')

    i = 0
    for data in dataToInsert:
        insertionQuery = f"""
            INSERT INTO AsosItalyStation (station, year, month, day, hour, 
                                          minute, longitude, latitude, temperatureF, 
                                          dewPoIntF, relativeHumidity, windDirection, 
                                          windSpeed, precipitation, pressure, seaLevelPressure, 
                                          visibility, windGust, skyLevel1coverage, skyLevel2coverage, 
                                          skyLevel3coverage, skyLevel4coverage, skyLevel1altitude, 
                                          skyLevel2altitude, skyLevel3altitude, skyLevel4altitude, 
                                          weatherCode, iceAccretion1, iceAccretion3, iceAccretion6, 
                                          peakWindGust, peakWindDirection, peakWindTime, temperature, 
                                          observation) 
            VALUES(
                {data["station"]},
                {data["valid"][0]},
                {data["valid"][1]},
                {data["valid"][2]},
                {data["valid"][3]},
                {data["valid"][4]},
                {data["lon"]},
                {data["lat"]},
                {data["tmpf"]},
                {data["dwpf"]},
                {data["relh"]},
                {data["drct"]},
                {data["sknt"]},
                {data["p01i"]},
                {data["alti"]},
                {data["mslp"]},
                {data["vsby"]},
                {data["gust"]},
                {data["skyc1"]},
                {data["skyc2"]},
                {data["skyc3"]},
                {data["skyc4"]},
                {data["skyl1"]},
                {data["skyl2"]},
                {data["skyl3"]},
                {data["skyl4"]},
                {data["wxcodes"]},
                {data["ice_accretion_1hr"]},
                {data["ice_accretion_3hr"]},
                {data["ice_accretion_6hr"]},
                {data["peak_wind_gust"]},
                {data["peak_wind_drct"]},
                {data["peak_wind_time"]},
                {data["feel"]},
                {data["observation"]}
            )
        """
        session.execute(insertionQuery)
        i = i + 1
        print(i)
示例#18
0
def connect(username=None,
            password=None,
            timeout=CONNECT_TIMEOUT,
            auth_timeout=CONNECT_TIMEOUT):
    # We pull the currently configured listen address and port from the
    # yaml, rather than the service configuration, as it may have been
    # overridden.
    cassandra_yaml = read_cassandra_yaml()
    address = cassandra_yaml['rpc_address']
    if address == '0.0.0.0':
        address = 'localhost'
    port = cassandra_yaml['native_transport_port']

    if username is None or password is None:
        username, password = superuser_credentials()

    auth = hookenv.config()['authenticator']
    if auth == 'AllowAllAuthenticator':
        auth_provider = None
    else:
        auth_provider = cassandra.auth.PlainTextAuthProvider(username=username,
                                                             password=password)

    # Although we specify a reconnection_policy, it does not apply to
    # the initial connection so we retry in a loop.
    start = time.time()
    until = start + timeout
    auth_until = start + auth_timeout
    while True:
        cluster = cassandra.cluster.Cluster([address],
                                            port=port,
                                            auth_provider=auth_provider)
        try:
            session = cluster.connect()
            session.default_timeout = timeout
            break
        except cassandra.cluster.NoHostAvailable as x:
            cluster.shutdown()
            now = time.time()
            # If every node failed auth, reraise one of the
            # AuthenticationFailed exceptions. Unwrapping the exception
            # means call sites don't have to sniff the exception bundle.
            # We don't retry on auth fails; this method should not be
            # called if the system_auth data is inconsistent.
            auth_fails = [
                af for af in x.errors.values()
                if isinstance(af, cassandra.AuthenticationFailed)
            ]
            if auth_fails:
                if now > auth_until:
                    raise auth_fails[0]
            if now > until:
                raise
        time.sleep(1)
    try:
        yield session
    finally:
        cluster.shutdown()
示例#19
0
    def __init__(self, KEYSPACE):
        cluster = cassandra.cluster.Cluster()
        session = cluster.connect()
        session.execute(f"""
                CREATE KEYSPACE IF NOT EXISTS {KEYSPACE}
                WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': '2' }}
                """)

        session.set_keyspace(KEYSPACE)
示例#20
0
 def __init__(self, refdes, uframe_ip, cassandra_ip_list):
     self.refdes = refdes
     self.subsite, self.node, self.sensor = self.parse_refdes(refdes)
     # For now use version=3 against the current cassandra.
     cluster = cassandra.cluster.Cluster(cassandra_ip_list, control_connection_timeout=60, protocol_version=3)
     self.session = cluster.connect('ooi')
     stream_url = STREAM_METADATA_SERVICE_URL_TEMPLATE.format(uframe_ip)
     partition_url = PARTITION_METADATA_SERVICE_URL_TEMPLATE.format(uframe_ip)
     self.metadata_service_api = MetadataServiceAPI(stream_url, partition_url)
示例#21
0
def connect(username=None, password=None, timeout=CONNECT_TIMEOUT,
            auth_timeout=CONNECT_TIMEOUT):
    # We pull the currently configured listen address and port from the
    # yaml, rather than the service configuration, as it may have been
    # overridden.
    cassandra_yaml = read_cassandra_yaml()
    address = cassandra_yaml['rpc_address']
    if address == '0.0.0.0':
        address = 'localhost'
    port = cassandra_yaml['native_transport_port']

    if username is None or password is None:
        username, password = superuser_credentials()

    auth = hookenv.config()['authenticator']
    if auth == 'AllowAllAuthenticator':
        auth_provider = None
    else:
        auth_provider = cassandra.auth.PlainTextAuthProvider(username=username,
                                                             password=password)

    # Although we specify a reconnection_policy, it does not apply to
    # the initial connection so we retry in a loop.
    start = time.time()
    until = start + timeout
    auth_until = start + auth_timeout
    while True:
        cluster = cassandra.cluster.Cluster([address], port=port,
                                            auth_provider=auth_provider)
        try:
            session = cluster.connect()
            session.default_timeout = timeout
            break
        except cassandra.cluster.NoHostAvailable as x:
            cluster.shutdown()
            now = time.time()
            # If every node failed auth, reraise one of the
            # AuthenticationFailed exceptions. Unwrapping the exception
            # means call sites don't have to sniff the exception bundle.
            # We don't retry on auth fails; this method should not be
            # called if the system_auth data is inconsistent.
            auth_fails = [af for af in x.errors.values()
                          if isinstance(af, cassandra.AuthenticationFailed)]
            if auth_fails:
                if now > auth_until:
                    raise auth_fails[0]
            if now > until:
                raise
        time.sleep(1)
    try:
        yield session
    finally:
        cluster.shutdown()
示例#22
0
	def connect(self):
		while(True):
			try:
				cluster = Cluster(['131.204.27.98'])
				self.session = cluster.connect()
				break;
			except cassandra.cluster.NoHostAvailable:
				print "Connection failed, retrying.."
				time.sleep(1)
			except Exception as e:
				print str(e)
				time.sleep(1)
		print "Connected to cassandra database."
示例#23
0
def _try_connect_to_cluster(hosts, policy=None):
    if hosts[0] is 'localhost':
        p = None
    else:
        p = os.environ.get('KEPLER_PASSWORD') or getpass.getpass("Kepler password on host %s: " % hosts[0])
    auth_provider = PlainTextAuthProvider(username='******', password=p)
    if policy is not None:
        cluster = cassandra.cluster.Cluster(hosts, auth_provider=auth_provider, load_balancing_policy=policy)
    else:
        cluster = cassandra.cluster.Cluster(hosts, auth_provider=auth_provider)
    session = cluster.connect('kepler')
    cluster.register_user_type('kepler', 'parameter', Parameter)
    return session
示例#24
0
def getDataFromDatabase(yearValue, monthValue, dateValue, varToPick):

    cluster = cassandra.cluster.Cluster(['localhost'])
    session = cluster.connect('adamloic_lixiang_projet')

    selectQuery = f"""
        SELECT longitude, latitude, {varToPick}
        FROM asosItalyTime
        WHERE year = {yearValue}
        AND month = {monthValue}
        AND validDate = '{dateValue}'
        """
    return session.execute(selectQuery)
示例#25
0
def _insert_datastream(fname1, fname2, limit=None):
    '''
		from @jbl : insert in database
	'''
    cluster = cassandra.cluster.Cluster()
    session = cluster.connect("ladeveya_projet")

    if limit != None:
        stream = fd.limiteur(fd.read_csv_flight_data(fname1, fname2), limit)
    else:
        stream = fd.read_csv_flight_data(fname1, fname2)

    for r in stream:
        session.execute(_insert_flight(r))
示例#26
0
def insert_datastream(stream):
    """Insert datas into the DB.

    Parameters
    ----------
    stream : iterable
             Iterable where values to insert are taken.
    """
    cluster = cassandra.cluster.Cluster()
    session = cluster.connect("paroisem")
    for trip in stream:
        for q in INSERTS_Q:
            query = q(trip)
            session.execute(query)
示例#27
0
def get_cassandra_session():
    global _cassandra_enabled

    config = get_config()

    hostname = config.get('cassandra', 'hostname')
    username = config.get('cassandra', 'username')
    password = config.get('cassandra', 'password')
    keyspace = config.get('cassandra', 'keyspace')

    try:
        auth_provider = cassandra.auth.PlainTextAuthProvider(username, password)
        cluster = cassandra.cluster.Cluster([hostname], auth_provider=auth_provider)

        return cluster.connect(keyspace)
    except Exception as ex:
        get_logger().warn('could not connect to Cassandra; saving is DISABLED', ex=ex)
        _cassandra_enabled = False
示例#28
0
def get_cassandra_session(stat=None):
    """creates cluster and gets the session base on key space"""
    # be aware that session cannot be shared between threads/processes
    # or it will raise OperationTimedOut Exception
    if db_config.CLUSTER_HOST2:
        cluster = cassandra.cluster.Cluster([db_config.CLUSTER_HOST1, db_config.CLUSTER_HOST2])
    else:
        # if only one address is available, we have to use older protocol version
        cluster = cassandra.cluster.Cluster([db_config.CLUSTER_HOST1], protocol_version=2)

    if stat and type(stat) == BatchStatement:
        retry_policy = cassandra.cluster.RetryPolicy()
        retry_policy.on_write_timeout(BatchStatement, ConsistencyLevel, WriteType.BATCH_LOG, db_config.WRITE_CONSISTENCY,
                                      db_config.WRITE_CONSISTENCY, retry_num=0)
        cluster.default_retry_policy = retry_policy
    session = cluster.connect(db_config.KEY_SPACE)
    session.default_timeout = 30.0
    return session
示例#29
0
    def test_prepared_statement(self):
        """
        Highlight the difference between Prepared and Bound statements
        """

        cluster = Cluster(protocol_version=PROTOCOL_VERSION)
        session = cluster.connect()

        prepared = session.prepare('INSERT INTO test3rf.test (k, v) VALUES (?, ?)')
        prepared.consistency_level = ConsistencyLevel.ONE

        self.assertEqual(str(prepared),
                         '<PreparedStatement query="INSERT INTO test3rf.test (k, v) VALUES (?, ?)", consistency=ONE>')

        bound = prepared.bind((1, 2))
        self.assertEqual(str(bound),
                         '<BoundStatement query="INSERT INTO test3rf.test (k, v) VALUES (?, ?)", values=(1, 2), consistency=ONE>')

        cluster.shutdown()
示例#30
0
    def test_prepared_statement(self):
        """
        Highlight the difference between Prepared and Bound statements
        """

        cluster = Cluster(protocol_version=PROTOCOL_VERSION)
        session = cluster.connect()

        prepared = session.prepare('INSERT INTO test3rf.test (k, v) VALUES (?, ?)')
        prepared.consistency_level = ConsistencyLevel.ONE

        self.assertEqual(str(prepared),
                         '<PreparedStatement query="INSERT INTO test3rf.test (k, v) VALUES (?, ?)", consistency=ONE>')

        bound = prepared.bind((1, 2))
        self.assertEqual(str(bound),
                         '<BoundStatement query="INSERT INTO test3rf.test (k, v) VALUES (?, ?)", values=(1, 2), consistency=ONE>')

        cluster.shutdown()
示例#31
0
def main():
    '''
    Script entry point:
    - Extracts, transforms and loads data from `event_data` CSV files
    into tables in the sparkify Cassandra keyspace.

    Parameters:
        None

    Returns:
        None
    '''
    # connect to cluster
    cluster = cassandra.cluster.Cluster(['127.0.0.1'])
    session = cluster.connect()

    # set current keyspace for session
    session.set_keyspace('sparkify')

    # create denormalized CSV
    denormalized_csv_filename = create_denormalized_csv()

    # insert data
    insert_query_tuples = [
        (csql_queries.sessions_table_insert, lambda line:
         (int(line[8]), int(line[3]), line[0], line[9], float(line[5]))),
        (csql_queries.user_sessions_table_insert, lambda line:
         (int(line[10]), int(line[8]), int(line[3]), line[0], line[9], line[1],
          line[4])),
        (csql_queries.song_listeners_table_insert, lambda line:
         (line[9], int(line[10]), line[1], line[4]))
    ]
    insert_rows(session, denormalized_csv_filename, insert_query_tuples)

    # delete denormalized CSV
    delete_denormalized_csv(denormalized_csv_filename)

    # shut down session and cluster
    session.shutdown()
    cluster.shutdown()
def main():
    # Process command line arguments
    options = docopt.docopt(__doc__)
    cassandra_ip_list = options['<cassandra_ip_list>']

    # Open connection to Cassandra
    cluster = cassandra.cluster.Cluster(cassandra_ip_list, control_connection_timeout=60, protocol_version=3)
    session = cluster.connect('ooi')

    # Extract Stream Metadata from Cassandra
    stream_metadata = execute_query(session, ALL_STREAM_METADATA, STREAM_METADATA_COLUMNS)

    # Extract Partition Metadata from Cassandra
    partition_metadata = execute_query(session, ALL_PARTITION_METADATA, PARTITION_METADATA_COLUMNS)

    # Close connection to Cassandra
    cluster.shutdown()

    # Write Stream Metadata to CSV file
    write_csv_file(STREAM_METADATA_FILEPATH, stream_metadata, STREAM_METADATA_COLUMNS)

    # Write Partition Metadata to CSV file
    write_csv_file(PARTITION_METADATA_FILEPATH, partition_metadata, PARTITION_METADATA_COLUMNS)
示例#33
0
def init_database():
    es = elasticsearch.Elasticsearch(setting.ES_HOSTS)
    indexes = elasticsearch.client.IndicesClient(es)
    if not indexes.exists(setting.INDEX_NAME):
        indexes.create(index=setting.INDEX_NAME, body=INDEX_TEMPLATE)

    cluster = cassandra.cluster.Cluster(setting.CASSANDRA_HOSTS)
    session = cluster.connect()
    session.execute("create keyspace if not exists chat with replication = {'class':'SimpleStrategy','replication_factor':1};")
    session.execute("""
create table if not exists chat.messages (
    id bigint, 
    content varchar,
    room_id int,
    user_name varchar,
    primary key ((room_id), id))
    with clustering order by (id desc);""")
    session.execute("""
create table if not exists chat.search_perf (
    id bigint,
    query_time int,
    room_count int,
    all_count int,
    primary key (id));""")
示例#34
0
args = vars(ap.parse_args())
Y = int(args["year"])
M = int(args["month"])
D = int(args["day"])
h = int(args["hour"])
m = int(args["minute"])

keyspace = 'penonque_project'

# load the keyspace name
indicator = args["indicator"]
outfile = args["outfilename"]

cluster = cassandra.cluster.Cluster(['localhost'])
session = cluster.connect(keyspace)
session.row_factory = named_tuple_factory
rows = session.execute(
    f"select lon,lat,{indicator} from metar_bydate where year={Y} and month={M} and day={D} and hour={h} and minute={m};"
)

#plot les points des stations (lat et long) et afficher valeur de l'indicateur à côté
map = Basemap(llcrnrlon=-18.87,
              llcrnrlat=26.75,
              urcrnrlon=5.34,
              urcrnrlat=43.8,
              resolution='i',
              projection='tmerc',
              lat_0=39.5,
              lon_0=-3.25)
示例#35
0
def get_session():
    cluster = cassandra.cluster.Cluster(['localhost'])
    session = cluster.connect()
    session.set_keyspace('adamloic_lixiang_projet')
    return session
示例#36
0
 def __init__(self, KEYSPACE):
     cluster = cassandra.cluster.Cluster()
     self.session = cluster.connect(KEYSPACE)
示例#37
0
            return jsonify(result={'account': rows[0]['account']})
    abort(404)


@app.route('/createSlaveDomain/<ip>/<domain>', methods=['PUT'])
def create_slave_domain(ip, domain):
    ''' create a new slave domain '''
    db_session.execute(
        """
        INSERT INTO domains (zone, kind, masters)
        VALUES (%s, 'SLAVE', %s)
        """, (domain, [ip]))
    return jsonify(result=True)


if __name__ == '__main__':
    app.config['HOST'] = os.getenv('HOST', '::1')
    app.config['PORT'] = os.getenv('HOST', 5000)
    app.config['DEBUG'] = os.getenv('DEBUG', False)
    app.config['KEYSPACE'] = os.getenv('KEYSPACE', 'powerdns')

    cassandra_nodes = os.getenv('CASSANDRA_NODES')
    if not cassandra_nodes:
    	raise SystemExit("CASSANDRA_NODES is not set")
    app.config['cassandra_nodes'] = cassandra_nodes.split(',')

    cluster = cassandra.cluster.Cluster(app.config['cassandra_nodes'])
    db_session = cluster.connect(app.config['KEYSPACE'])
    db_session.row_factory = cassandra.query.dict_factory
    app.run(host=app.config['HOST'], port=app.config['PORT'])
示例#38
0
import cassandra.cluster
import threading
import sys
import argparse
import uuid
import time
import random

parser = argparse.ArgumentParser()
parser.add_argument('-b', '--bootstrap', action='store_true', help="setup keyspace and table")
parser.add_argument('threads', type=int, help="number of simultanious sessions")
arguments = parser.parse_args()

cluster = cassandra.cluster.Cluster()
if arguments.bootstrap:
    session = cluster.connect()
    session.execute("DROP KEYSPACE IF EXISTS bombard")
    session.execute("CREATE KEYSPACE bombard WITH REPLICATION = {'class':'SimpleStrategy','replication_factor':1}")
    session.execute('USE bombard')
    session.execute("CREATE TABLE tokens (id uuid PRIMARY KEY, thing uuid)")
    quit()


class Bomber(object):
    count = 0

    def __init__(self, cluster):
        self._session = cluster.connect('bombard')
        self._thread = threading.Thread(target=self._go)
        self._thread.daemon = True
        self._thread.start()
示例#39
0
    def load_provenance(self):
        cluster = cassandra.cluster.Cluster(self.cassandra_ip_list,
                                            control_connection_timeout=60,
                                            protocol_version=3)
        session = cluster.connect('ooi')
        provenance = self.execute_query(session, ALL_DATASET_L0_PROVENANCE,
                                        DATASET_L0_PROVENANCE_COLUMNS)
        cluster.shutdown()

        with sqlite3.connect(self.database) as conn:

            def get_streamed_start(method, filename):
                if not streamed(method):
                    return None
                date = time_util.parse_basic_iso8601_date(filename)
                if not date:
                    return None
                return time_util.java_time_from_basic_iso8601_date(date)

            def get_streamed_stop(start):
                if not start:
                    return None
                return start + time_util.MILLIS_PER_DAY - 1

            conn.create_function("get_streamed_start", 2, get_streamed_start)
            conn.create_function("get_streamed_stop", 1, get_streamed_stop)

            conn.execute("DROP TABLE IF EXISTS PROVENANCE")
            conn.execute("""CREATE TABLE PROVENANCE (subsite TEXT, node TEXT,
                 sensor TEXT, method TEXT, deployment INTEGER,
                 rowid TEXT, filename TEXT, parsername TEXT,
                 parserversion TEXT)""")

            for row in provenance:
                subsite = row['subsite']
                node = row['node']
                sensor = row['sensor']
                method = row['method']
                deployment = row['deployment']
                rowid = str(row['id'])
                filename = row['fileName']
                parsername = row['parserName']
                parserversion = row['parserVersion']

                record = [
                    subsite, node, sensor, method, deployment, rowid, filename,
                    parsername, parserversion
                ]

                conn.execute(
                    """INSERT INTO PROVENANCE (subsite, node, sensor,
                    method, deployment, rowid, filename, parsername,
                    parserversion) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);""",
                    record)

            conn.execute("""ALTER TABLE PROVENANCE ADD COLUMN
                         start INTEGER""")
            conn.execute("""UPDATE PROVENANCE SET
                         start=get_streamed_start(method, filename)""")
            conn.execute("""ALTER TABLE PROVENANCE ADD COLUMN
                         stop INTEGER""")
            conn.execute("""UPDATE PROVENANCE SET
                         stop=get_streamed_stop(start)""")

            conn.commit()
示例#40
0
import cassandra.cluster
import sys
import uuid

build_id = sys.argv[1]
return_code = 1

# Cassandra setup
cluster = cassandra.cluster.Cluster()
session = cluster.connect('test')

select_statement = session.prepare(
		'SELECT event_type, data '
		'FROM build_events '
		'WHERE build_id=? ORDER BY clock')
for row in session.execute(select_statement, [uuid.UUID(build_id)]):
	if row.event_type == 'stdout':
		sys.stdout.write('\033[0m' + row.data + '\033[0m')
	elif row.event_type == 'stderr':
		sys.stderr.write('\033[91m' + row.data + '\033[0m')
	elif row.event_type == 'exit':
		return_code = int(row.data)
exit(return_code)
示例#41
0
    for zone in zones:
        inter = dict(
            zone=zone['zone'],
            kind=zone['kind'],
            masters=zone['masters'],
            serial=zone['serial'],
            notified_serial=zone['notified_serial'],
            last_check=zone['last_check'],
        )
        result.append(inter)

    return jsonify(result=result)


if __name__ == '__main__':
    app.config['HOST'] = os.getenv('HOST', '192.168.123.91')
    app.config['PORT'] = os.getenv('HOST', 5000)
    app.config['DEBUG'] = os.getenv('DEBUG', False)
    ''' use your own keyspace in place of osnworld_pdns_backend'''
    app.config['KEYSPACE'] = os.getenv('KEYSPACE', 'osnworld_pdns_backend')

    cassandra_nodes = os.getenv('CASSANDRA_NODES')
    if not cassandra_nodes:
        raise SystemExit("CASSANDRA_NODES is not set")
    app.config['cassandra_nodes'] = cassandra_nodes.split(',')

    cluster = cassandra.cluster.Cluster(app.config['cassandra_nodes'])
    db_session = cluster.connect(app.config['KEYSPACE'])
    db_session.row_factory = cassandra.query.dict_factory
    app.run(host=app.config['HOST'], port=app.config['PORT'])
示例#42
0
import cassandra
import folium
import re
import cassandra.cluster

KEYSPACE = 'thbourge_td3'
TABLE = 'projectq2'

cluster = cassandra.cluster.Cluster()
session = cluster.connect(KEYSPACE)

def getMapStationInformationbyFullDate(date):
    dateparser = re.compile(r"(?P<year>\d+)-(?P<month>\d+)-(?P<day>\d+) (?P<hour>\d+):(?P<minute>\d+)")
    match_date = dateparser.match(date)
    if not match_date:
        print("Rentrez une date valide YYYY-MM-DD HH:MM")
        return 
    date_dict = match_date.groupdict()
    rows = session.execute(f"""SELECT * FROM {KEYSPACE}.{TABLE} where year={int(date_dict['year'])} AND month={int(date_dict['month'])} AND day={int(date_dict['day'])} AND hour={int(date_dict['hour'])} AND minute={int(date_dict['minute'])}""")
    m = folium.Map(location=[47.029895, 2.440967], zoom_start=6)
    if rows:
        for row in rows:
            html=f"""
                <b>{row.station} informations le {date}</b></br></br>
                <ul>
                    <li>Temperature réelle: {round(row.tmpf,1)} F</li>
                    <li>Temperature ressentie: {round(row.feel,1)} F</li>
                    <li>Pression atmosphérique : {round(row.alti,1)} inches</li>
                    <li>Force du vent : {round(row.sknt,1)} noeuds</li>
                    <li>Direction du vent : {round(row.drct,1)} degrée</li>
                    <li>Humidité de l'air : {round(row.relh,1)} %</li>
    def load_provenance(self):
        cluster = cassandra.cluster.Cluster(
            self.cassandra_ip_list, control_connection_timeout=60,
            protocol_version=3)
        session = cluster.connect('ooi')
        provenance = self.execute_query(
            session, ALL_DATASET_L0_PROVENANCE,
            DATASET_L0_PROVENANCE_COLUMNS)
        cluster.shutdown()

        with sqlite3.connect(self.database) as conn:

            def get_streamed_start(method, filename):
                if not streamed(method):
                    return None
                date = time_util.parse_basic_iso8601_date(filename)
                if not date:
                    return None
                return time_util.java_time_from_basic_iso8601_date(date)

            def get_streamed_stop(start):
                if not start:
                    return None
                return start + time_util.MILLIS_PER_DAY - 1

            conn.create_function("get_streamed_start", 2, get_streamed_start)
            conn.create_function("get_streamed_stop", 1, get_streamed_stop)

            conn.execute("DROP TABLE IF EXISTS PROVENANCE")
            conn.execute(
                """CREATE TABLE PROVENANCE (subsite TEXT, node TEXT,
                 sensor TEXT, method TEXT, deployment INTEGER,
                 rowid TEXT, filename TEXT, parsername TEXT,
                 parserversion TEXT)""")

            for row in provenance:
                subsite = row['subsite']
                node = row['node']
                sensor = row['sensor']
                method = row['method']
                deployment = row['deployment']
                rowid = str(row['id'])
                filename = row['fileName']
                parsername = row['parserName']
                parserversion = row['parserVersion']

                record = [subsite, node, sensor, method, deployment, rowid,
                          filename, parsername, parserversion]

                conn.execute(
                    """INSERT INTO PROVENANCE (subsite, node, sensor,
                    method, deployment, rowid, filename, parsername,
                    parserversion) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);""",
                    record)

            conn.execute("""ALTER TABLE PROVENANCE ADD COLUMN
                         start INTEGER""")
            conn.execute("""UPDATE PROVENANCE SET
                         start=get_streamed_start(method, filename)""")
            conn.execute("""ALTER TABLE PROVENANCE ADD COLUMN
                         stop INTEGER""")
            conn.execute("""UPDATE PROVENANCE SET
                         stop=get_streamed_stop(start)""")

            conn.commit()