def cassandra_connection(env, config): ssl_options = None if config.get(env, 'ssl_enabled'): ssl_options = {} ssl_options['ca_certs'] = config.get(env, 'ssl_ca_certs') ssl_options['ssl_version'] = config.get(env, 'ssl_version') if ssl_options['ssl_version'] == 'TLSv1': ssl_options['ssl_version'] = ssl.PROTOCOL_TLSv1 elif ssl_options['ssl_version'] == 'TLSv1.1': ssl_options['ssl_version'] = ssl.PROTOCOL_TLSv1_1 elif ssl_options['ssl_version'] == 'TLSv1.2': ssl_options['ssl_version'] = ssl.PROTOCOL_TLSv1_2 else: print('Unknown SSL Version') sys.exit(4) auth_provider = None if config.get(env, 'auth_enabled'): auth_provider = auth.PlainTextAuthProvider( username=config.get(env, 'username'), password=config.get(env, 'password')) cluster_connection = cluster.Cluster( config.get(env, 'cluster').split(","), auth_provider=auth_provider, port=config.get(env, 'port'), ssl_options=ssl_options, ) return cluster_connection
def _connect(self, cassandra): if not self.cluster: self.cluster = cassandra_cluster.Cluster( **cassandra.connection_params()) if not self.session: self.session = self.cluster.connect() return self.session
def setUpClass(cls): """Create the test Cassandra Cluster as cls.cassandra.""" super(TestCaseWithAccessor, cls).setUpClass() cls.cassandra = None if CASSANDRA_HOSTPORT: host, cls.port = CASSANDRA_HOSTPORT.split(':') cls.contact_points = [host] else: cls.setUpCassandra() # Make it easy to do raw queries to Cassandra. cls.cluster = c_cluster.Cluster(cls.contact_points, cls.port) cls.session = cls.cluster.connect() cls._reset_keyspace(cls.session, cls.KEYSPACE) cls._reset_keyspace(cls.session, cls.KEYSPACE + "_metadata") cls.accessor = bg_cassandra.build( keyspace=cls.KEYSPACE, contact_points=cls.contact_points, port=cls.port, timeout=60, **cls.ACCESSOR_SETTINGS ) cls.accessor.syncdb() cls.accessor.connect()
def setUpClass(cls): """Create the test Cassandra Cluster as cls.cassandra.""" super(TestCaseWithAccessor, cls).setUpClass() cls.cassandra = _SlowerTestingCassandra( auto_start=False, boot_timeout=_SlowerTestingCassandra.BOOT_TIMEOUT ) try: cls.cassandra.setup() cls.cassandra.start() except Exception as e: logging.exception(e) print("fail to starting cassandra, logging potentially useful debug info", file=sys.stderr) for attr in "cassandra_home", "cassandra_yaml", "cassandra_bin", "base_dir", "settings": print(attr, ":", getattr(cls.cassandra, attr, "Unknown"), file=sys.stderr) cls.cassandra.cleanup() raise # testing.cassandra is meant to be used with the Thrift API, so we need to # extract the IPs and native port for use with the native driver. cls.contact_points = [s.split(":")[0] for s in cls.cassandra.server_list()] cls.port = cls.cassandra.cassandra_yaml["native_transport_port"] # Make it easy to do raw queries to Cassandra. cls.cluster = c_cluster.Cluster(cls.contact_points, cls.port) cls.session = cls.cluster.connect() cls._reset_keyspace(cls.session, cls.KEYSPACE) cls._reset_keyspace(cls.session, cls.KEYSPACE + "_metadata")
def get_connection(keyspace=None): global _SYCALLDB_CON if not _SYCALLDB_CON: _SYCALLDB_CON = cluster.Cluster(**cluster_config).\ connect(keyspace) return _SYCALLDB_CON
def __init__(self): try: nodes = ['10.41.87.47', '10.41.87.48'] db = 'adtech_dsp' self.cluster = cluster.Cluster(nodes) self.session = self.cluster.connect(db) except: print "error on connecting to cassandra."
def connectCassandra ( ip): try: connection = cluster.Cluster([ip]) session = connection.connect() return connection, session except Exception: logging.exception(Fore.RED + "Connection failed:") return None, None
def connect(self, ip_list = None): if ip_list is not None: self.ip_list.append(ip_list) try: if self.db_user: cluster = CC.Cluster(self.ip_list, auth_provider=self.cluster_auth) else: cluster = CC.Cluster(self.ip_list) cluster.protocol_version = 2 self.session = cluster.connect() self.updateIPList() return True except Exception: self.session = None return False
def _check_cassandra_status(): try: cassandra = cluster.Cluster( CONF.cassandra.cluster_ip_addresses.split(',')) session = cassandra.connect(CONF.cassandra.keyspace) session.shutdown() except Exception as ex: LOG.exception(str(ex)) return False, str(ex) return True, 'OK'
def _connect(self): with self.__connection_lock: if self.__cluster is not None: self._disconnect() cluster = cassandra_cluster.Cluster(**self.__cluster_params) session = cluster.connect() session.row_factory = cassandra_query.dict_factory session.default_timeout = self.__query_timeout self.__cluster = cluster self.__session = session
def _connection(keyspace=None): LOG.info('Initiating connection to cassandra') ssl_options = None if conf['cassandra']['ssl_enabled']: LOG.info('SSL is enabled') ssl_options = {} ssl_options['ca_certs'] = conf['cassandra']['ssl_ca_certs'] ssl_version = conf['cassandra']['ssl_version'] if ssl_version == 'TLSv1': ssl_options['ssl_version'] = ssl.PROTOCOL_TLSv1 elif ssl_version == 'TLSv1.1': ssl_options['ssl_version'] = ssl.PROTOCOL_TLSv1_1 elif ssl_version == 'TLSv1.2': ssl_options['ssl_version'] = ssl.PROTOCOL_TLSv1_2 else: LOG.info('Unknown SSL Version') LOG.info('Finished SSL part') LOG.info('Starting password part') auth_provider = None if conf['cassandra']['auth_enabled']: LOG.info('Password authentication is enabled') auth_provider = auth.PlainTextAuthProvider( username=conf['cassandra']['username'], password=conf['cassandra']['password'] ) LOG.info('Finished password part') LOG.info('Trying to connect to cassandra') cluster_connection = cluster.Cluster( conf['cassandra']['cluster'], auth_provider=auth_provider, port=conf['cassandra']['port'], ssl_options=ssl_options, ) LOG.info('Connected to cassandra') LOG.info('Cassandra connection is established') session = cluster_connection.connect() if not keyspace: keyspace = conf['cassandra']['keyspace'] try: session.set_keyspace(keyspace) except InvalidRequest: _create_keyspace(session, keyspace, conf['cassandra']['replication_strategy']) _run_migrations(conf['cassandra']['migrations'], session) session.row_factory = query.dict_factory return session
def __init__(self, clusterAddress=os.getenv("CLUSTER_ADDRESS", "54.67.105.220")): """ """ super().__init__(__name__) self.assignFlaskRoutesFromMethods() self.eastern = pytz.timezone("US/Eastern") self.clusterTwitterSeries = cluster.Cluster([clusterAddress]) self.sessionTwitterSeries = self.clusterTwitterSeries.connect( "twitterseries") self.clusterTopTrendingStreaming = cluster.Cluster([clusterAddress]) self.sessionTopTrendingStreaming = self.clusterTopTrendingStreaming.connect( "twittertrendingstreaming") self.clusterTopTrending = cluster.Cluster([clusterAddress]) self.sessionTopTrending = self.clusterTopTrendingStreaming.connect( "twittertrending") self.clusterStockData = cluster.Cluster([clusterAddress]) self.sessionStockData = self.clusterStockData.connect("stockdata") self.clusterTweets = cluster.Cluster([clusterAddress]) self.sessionTweets = self.clusterTweets.connect("latesttweets")
def __init__(self): super(AbstractCassandraRepository, self).__init__() self.conf = cfg.CONF self._cassandra_cluster = cluster.Cluster( self.conf.cassandra.cluster_ip_addresses.split(',')) self.cassandra_session = self._cassandra_cluster.connect( self.conf.cassandra.keyspace) self._batch_stmt = query.BatchStatement()
def initialize(self, db_ip, db_port, **args): ips, default_port = _parse_hosts(args['config'].remote_db_hosts) lb_policy = self._get_loadbalancing_policy( self.config.load_balancing) consistency = self._get_consistency_level( self.config.consistency_level) self.client = cluster.Cluster(ips, port=default_port, load_balancing_policy=lb_policy) self.session = self.client.connect(ROOT_KS) self.session.default_consistency_level = consistency self.session.row_factory = query.dict_factory
def _connection(conf, datacenter, keyspace=None): """connection. :param datacenter :returns session """ ssl_options = None if conf.ssl_enabled: ssl_options = { 'ca_certs': conf.ssl_ca_certs, 'ssl_version': ssl.PROTOCOL_TLSv1 } auth_provider = None if conf.auth_enabled: auth_provider = auth.PlainTextAuthProvider( username=conf.username, password=conf.password ) load_balancing_policy_class = getattr(policies, conf.load_balance_strategy) if load_balancing_policy_class is policies.DCAwareRoundRobinPolicy: load_balancing_policy = load_balancing_policy_class(datacenter) else: load_balancing_policy = load_balancing_policy_class() cluster_connection = cluster.Cluster( conf.cluster, auth_provider=auth_provider, load_balancing_policy=load_balancing_policy, port=conf.port, ssl_options=ssl_options, max_schema_agreement_wait=conf.max_schema_agreement_wait ) session = cluster_connection.connect() if not keyspace: keyspace = conf.keyspace try: session.set_keyspace(keyspace) except cassandra.InvalidRequest: _create_keyspace(session, keyspace, conf.replication_strategy) if conf.automatic_schema_migration: migration_session = copy.copy(session) migration_session.default_consistency_level = \ getattr(cassandra.ConsistencyLevel, conf.migrations_consistency_level) _run_migrations(conf.migrations_path, migration_session) session.row_factory = query.dict_factory return session
def main(): nodes = ['192.168.20.2'] c = cluster.Cluster(nodes, port=9042) session = c.connect('test') log.info('Execute commands') pool = guv.GreenPool() for i in range(5): pool.spawn(do_query, session) pool.waitall() c.shutdown()
def setUpClass(cls): """Create the test Cassandra Cluster as cls.cassandra.""" cls.cassandra = None if CASSANDRA_HOSTPORT: host, cls.port = CASSANDRA_HOSTPORT.split(":") cls.contact_points = [host] else: cls.setUpCassandra() # Make it easy to do raw queries to Cassandra. cls.cluster = c_cluster.Cluster(cls.contact_points, cls.port) cls.session = cls.cluster.connect() cls._reset_keyspace(cls.session, cls.KEYSPACE) cls._reset_keyspace(cls.session, cls.KEYSPACE + "_metadata")
def _connect(self): with self.__connection_lock: if self.__cluster is not None: self._disconnect() count = len(atexit._exithandlers) try: cluster = cassandra_cluster.Cluster(**self.__cluster_params) session = cluster.connect() finally: while len(atexit._exithandlers) > count: atexit._exithandlers.pop() session.row_factory = cassandra_query.dict_factory session.default_timeout = self.__query_timeout self.__cluster = cluster self.__session = session
def __init__(self, connection_params: ConnectionParams = None): self._params = connection_params if connection_params \ else ConnectionParams() self._params.host = self._params.host[0] if isinstance( self._params.host, list) else self._params.host self.cluster = cluster.Cluster( [self._params.host], load_balancing_policy=self._params.load_balancing_policy, port=self._params.port, auth_provider=self._params.auth_provider) if hasattr(self.cluster, "ssl_context"): self.cluster.ssl_context = self._params.ssl_context else: # driver versions < 3.17.0 do not have support for ssl_context self.cluster.ssl_options = self._params.ssl_options
def session(hosts=["127.0.0.1"]): """ session returns the Cassandra connection. """ global sess, film_by_episode_stmt, films_stmt, people_stmt, planets_stmt, species_stmt, starships_stmt, vehicles_stmt if sess is None: # TODO (NF 2018-02-13): Use connection pool. lbp = policies.RoundRobinPolicy() sess = cluster.Cluster(hosts, load_balancing_policy=lbp).connect() models.init(sess) film_by_episode_stmt = sess.prepare(SELECT_FILM_BY_EPISODE) films_stmt = sess.prepare(SELECT_FILMS) people_stmt = sess.prepare(SELECT_PEOPLE) planets_stmt = sess.prepare(SELECT_PLANETS) species_stmt = sess.prepare(SELECT_SPECIES) starships_stmt = sess.prepare(SELECT_STARSHIPS) vehicles_stmt = sess.prepare(SELECT_VEHICLES) return sess
def _init_cluster(self): session = cassandra_driver.Cluster(contact_points=["localhost"]).connect() try: session.execute("DROP KEYSPACE cfm;") except ConfigurationException: logging.debug("keyspace was not there") session.execute( """ CREATE KEYSPACE cfm WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': '1'} """ ) session.execute( """ CREATE TABLE IF NOT EXISTS cfm.pantry ( pantry_id text primary key, blob text ); """ )
def send_partition(entries, table_name, crit_size=500): """ Collects rdd entries and sends as batch of CQL commands. Required by "save_to_database" function. """ # Initializes keyspace and CQL batch executor in Cassandra database db_session = cassc.Cluster(p["cassandra"]).connect(p["cassandra_key"]) cql_batch = cassq.BatchStatement(consistency_level= \ cass.ConsistencyLevel.QUORUM) batch_size = 0 # Prepares CQL statement, with interpolated table name, and placeholders cql_command = db_session.prepare(""" UPDATE {} SET metric = ? + metric WHERE group = ? AND cycle = ? AND id = ?; """.format(table_name)) for e in entries: # Interpolates prepared CQL statement with values from entry cql_batch.add(cql_command, parameters= \ [cassq.ValueSequence((e[3],)), \ e[0], \ e[1], \ e[2],]) batch_size += 1 # Executes collected CQL commands, then re-initializes collection if batch_size == crit_size: db_session.execute(cql_batch) cql_batch = cassq.BatchStatement(consistency_level= \ cass.ConsistencyLevel.QUORUM) batch_size = 0 # Executes final set of remaining batches and closes Cassandra session db_session.execute(cql_batch) db_session.shutdown() return None
def get_session(cls, seeds, **kwargs): _ = kwargs if cls.__session is None: # Allow dependency injection session = kwargs.get("session") if session is None: cluster = c_cluster.Cluster(seeds) session = cluster.connect() keyspace = kwargs["keyspace"] replication = kwargs["replication"] if kwargs.get("drop_keyspace", False): session.execute(cls.QUERY_DROP_KEYSPACE.format(keyspace)) session.execute( cls.QUERY_CREATE_KEYSPACE.format( keyspace=keyspace, replication=str(replication), )) session.set_keyspace(keyspace) cls.__session = session return cls.__session
class TestMethods(unittest.TestCase): conf = SparkConf().setAppName('TestCountingSyslogsByHours') sc = SparkContext(conf=conf) cluster = cascl.Cluster() KeyspaceName = 'test' TableName = 'statistics' tmpFile = app.NamedTemporaryFile(delete=True) tmpFile.close() def setUp(self): app.CreateKeySpaceAndTable(cluster, KeyspaceName, TableName) sc.emptyRDD().saveAsPickleFile(tmpFile.name) tmpFile.close() def test_LineMap(self): s = '77: Dec 23 23:16:38 divine0ff-Aspire-E1-570G anacron[1024]: Normal exit (1 job run)' print(sys.argv) self.assertEqual(app.LineMap(s), (523, 1)) def test_spark(self): res = app.SparkCalculate(sc, 'logfile.test', tmpFile) print('test computedRdd:', res) self.assertCountEqual(res, [(606, 1), (511, 2), (611, 2)]) def test_saveToDB(self): app.SaveToDB([(606, 1), (511, 2), (611, 2)], cluster, KeyspaceName=KeyspaceName, TableName=TableName) res = app.printFromDb(cluster, file=None, KeyspaceName=KeyspaceName, TableName=TableName) print('test saved table:', res) self.assertCountEqual(res, [(6, 6, 1), (11, 6, 2), (11, 5, 2)])
def connect_db(): cluster = cass_cluster.Cluster(contact_points=['localhost'], port=9042) session = cluster.connect() return session
def connect(cls): cls.cluster = cluster.Cluster( ['172.18.0.2', '172.18.0.3'], port=9042) #['172.18.0.2', '172.18.0.3'], port=9042 cls.session = cls.cluster.connect('resto', wait_for_all_pools=True) cls.session.execute('USE resto')
lambda a, b: a + b) # oldrdd U rdd->sort->(key,amount) tmpFile = NamedTemporaryFile(delete=True) tmpFile.close() newrdd.saveAsPickleFile(tmpFile.name) open(filename, "w") #remove all logs from logfile result = newrdd.collect() return result if __name__ == '__main__': if len(sys.argv) != 2: print('Usage: app.py <logfile>') sys.exit(-1) KeyspaceName = 'syslog' TableName = 'statistics' cluster = cascl.Cluster() CreateKeySpaceAndTable(cluster, KeyspaceName, TableName) conf = SparkConf().setAppName('CountingSyslogsByHours') sc = SparkContext(conf=conf) tmpFile = NamedTemporaryFile(delete=True) tmpFile.close() sc.emptyRDD().saveAsPickleFile(tmpFile.name) statistics = SparkCalculate(sc, sys.argv[1], tmpFile=tmpFile) #writing to Cassandra SaveToDB(statistics, cluster) #printing from Cassandra printFromDb(cluster) cluster.shutdown() sc.stop()
def __init__(self, *args, **kwargs): self.setting = kwargs kwargs["connection_class"] = libevreactor.LibevConnection cluster.Session.default_fetch_size = None self.cluster = cluster.Cluster(*args, **kwargs)
import scapy.all as scapy_all import scapy.utils as scapy_ut import dash import dash_core_components as dcc import dash_html_components as dhc import dash.dependencies as ddeps import cassandra.cluster as ccass # ----------------------------------------------------------------------------- # Main program # ----------------------------------------------------------------------------- #-- connect to Cassandra cluster = ccass.Cluster(port=9042) session = cluster.connect() #-- get initial list of hosts h = set() hosts = [] for _ in session.execute("SELECT hostid FROM test1.t7 LIMIT 10"): h.add(_.hostid) for _ in h: hosts.append({"label": str(_), "value": f"{_}"}) #-- define app and its layout css = [ "https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css", { 'href':
def _connection(conf): cassandra_cluster = cluster.Cluster(conf.cluster) session = cassandra_cluster.connect(conf.keyspace) return session