def _cassandra_init(self, server_list): # 1. Ensure keyspace and schema/CFs exist # 2. Read in persisted data and publish to ifmap server uuid_ks_name = VncCassandraClient._UUID_KEYSPACE_NAME obj_uuid_cf_info = (VncCassandraClient._OBJ_UUID_CF_NAME, None) obj_fq_name_cf_info = (VncCassandraClient._OBJ_FQ_NAME_CF_NAME, None) uuid_cf_info = (VncCassandraClient._UUID_CF_NAME, None) fq_name_cf_info = (VncCassandraClient._FQ_NAME_CF_NAME, None) ifmap_id_cf_info = (VncCassandraClient._IFMAP_ID_CF_NAME, None) subnet_cf_info = (VncCassandraClient._SUBNET_CF_NAME, None) children_cf_info = ( VncCassandraClient._CHILDREN_CF_NAME, TIME_UUID_TYPE) self._cassandra_ensure_keyspace( server_list, uuid_ks_name, [obj_uuid_cf_info, obj_fq_name_cf_info, uuid_cf_info, fq_name_cf_info, ifmap_id_cf_info, subnet_cf_info, children_cf_info]) useragent_ks_name = VncCassandraClient._USERAGENT_KEYSPACE_NAME useragent_kv_cf_info = (VncCassandraClient._USERAGENT_KV_CF_NAME, None) self._cassandra_ensure_keyspace(server_list, useragent_ks_name, [useragent_kv_cf_info]) uuid_pool = pycassa.ConnectionPool( uuid_ks_name, server_list, max_overflow=-1, pool_timeout=300, max_retries=100, timeout=300) useragent_pool = pycassa.ConnectionPool( useragent_ks_name, server_list, max_overflow=-1, pool_timeout=300, max_retries=100, timeout=300) rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM self._obj_uuid_cf = pycassa.ColumnFamily( uuid_pool, VncCassandraClient._OBJ_UUID_CF_NAME, read_consistency_level = rd_consistency, write_consistency_level = wr_consistency) self._obj_fq_name_cf = pycassa.ColumnFamily( uuid_pool, VncCassandraClient._OBJ_FQ_NAME_CF_NAME, read_consistency_level = rd_consistency, write_consistency_level = wr_consistency) self._useragent_kv_cf = pycassa.ColumnFamily( useragent_pool, VncCassandraClient._USERAGENT_KV_CF_NAME, read_consistency_level = rd_consistency, write_consistency_level = wr_consistency) self._subnet_cf = pycassa.ColumnFamily( uuid_pool, VncCassandraClient._SUBNET_CF_NAME, read_consistency_level = rd_consistency, write_consistency_level = wr_consistency)
def insertDataset( dataset ): # connect to Cassandra cpool = pycassa.ConnectionPool( KEY_SPACE, [HOST] ) # finding Nemo => navigating to the family col_family = pycassa.ColumnFamily( cpool, COLUMN_FAMILY ) col_family.batch_insert( dataset )
def _prepare_for_test(nodes_cluster): log('Start actualizing scheme') sys = pycassa.SystemManager('127.0.0.1') sys.create_keyspace(Keyspace, pycassa.SIMPLE_STRATEGY, {'replication_factor': '3'}) sys.create_column_family(Keyspace, TableName) log('Created keyspace {} and column family {}'.format( Keyspace, TableName)) nodes_cluster.nodelist()[2].stop() log('Stopped third node') pool = pycassa.ConnectionPool(Keyspace, server_list=['127.0.0.1', '127.0.0.2'], timeout=0.5) cf = pycassa.ColumnFamily(pool, TableName) for key, value in ThriftMultigetTestCase.Payload: cf.insert(key, {'value': value}, write_consistency_level=pycassa.ConsistencyLevel.QUORUM) log('Inserted {} keys in the table {}.{}'.format( len(ThriftMultigetTestCase.Payload), Keyspace, TableName)) nodes_cluster.nodelist()[2].start() log('Started third node') nodes_cluster.nodelist()[2].wait_for_thrift_interface() log('Thrift interface is active for the third node')
def _cassandra_init_conn_pools(self): for ks, cf_list in self._keyspaces.items(): pool = pycassa.ConnectionPool(ks, self._server_list, max_overflow=-1, use_threadlocal=True, prefill=True, pool_size=20, pool_timeout=120, max_retries=-1, timeout=5) rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM for (cf, _) in cf_list: self._cf_dict[cf] = ColumnFamily( pool, cf, read_consistency_level=rd_consistency, write_consistency_level=wr_consistency) ConnectionState.update(conn_type=ConnectionType.DATABASE, name='Cassandra', status=ConnectionStatus.UP, message='', server_addrs=self._server_list) self._conn_state = ConnectionStatus.UP msg = 'Cassandra connection ESTABLISHED' self._logger(msg, level=SandeshLevel.SYS_NOTICE)
def _cassandra_init_conn_pools(self): socket_factory = self._make_socket_factory() for ks, cf_dict in itertools.chain(list(self._rw_keyspaces.items()), list(self._ro_keyspaces.items())): keyspace = '%s%s' % (self._db_prefix, ks) pool = pycassa.ConnectionPool(keyspace, self._server_list, max_overflow=5, use_threadlocal=True, prefill=True, pool_size=self._pool_size, pool_timeout=120, max_retries=15, timeout=5, credentials=self._credential, socket_factory=socket_factory) for cf_name in cf_dict: cf_kwargs = cf_dict[cf_name].get('cf_args', {}) self._cf_dict[cf_name] = ColumnFamily( pool, cf_name, read_consistency_level=ConsistencyLevel.QUORUM, write_consistency_level=ConsistencyLevel.QUORUM, dict_class=dict, **cf_kwargs) ConnectionState.update(conn_type=ConnType.DATABASE, name='Cassandra', status=ConnectionStatus.UP, message='', server_addrs=self._server_list) self._conn_state = ConnectionStatus.UP msg = 'Cassandra connection ESTABLISHED' self._logger(msg, level=SandeshLevel.SYS_NOTICE)
def returner(ret): """ Return data to a Cassandra ColumnFamily """ consistency_level = getattr( pycassa.ConsistencyLevel, __opts__["cassandra.consistency_level"] ) pool = pycassa.ConnectionPool( __opts__["cassandra.keyspace"], __opts__["cassandra.servers"] ) ccf = pycassa.ColumnFamily( pool, __opts__["cassandra.column_family"], write_consistency_level=consistency_level, ) columns = {"fun": ret["fun"], "id": ret["id"]} if isinstance(ret["return"], dict): for key, value in ret["return"].items(): columns["return.{}".format(key)] = str(value) else: columns["return"] = str(ret["return"]) log.debug(columns) ccf.insert(ret["jid"], columns)
def __init__(self, username=None, password=None, database='MessageStore', host='localhost', base_delay=None, multi_dc=False, create_schema=True): """Create a Cassandra backend for the Message Queue :param host: Hostname, accepts either an IP, hostname, hostname:port, or a comma seperated list of 'hostname:port' """ hosts = parse_hosts(host) if create_schema: self._create_schema(hosts[0], database) credentials = None if username and password is not None: credentials = dict(username=username, password=password) self.pool = pool = pycassa.ConnectionPool( keyspace=database, server_list=hosts, credentials=credentials, ) self.message_fam = pycassa.ColumnFamily(pool, 'Messages') self.meta_fam = pycassa.ColumnFamily(pool, 'MessageMetadata') self.delay = int(base_delay) if base_delay else 0 self.cl = ONE if len(hosts) < 2 else None self.multi_dc = multi_dc
def get_test(): child = False for i in range(10): pid = os.fork() if pid == 0: child = True print '[%d] Starting proc' % i cp = pycassa.ConnectionPool('test', server_list=[ 'esb-a-test.sensors.elex.be', 'esb-b-test.sensors.elex.be' ]) indexcf = pycassa.ColumnFamily(cp, 'INDEXCF') datacf = pycassa.ColumnFamily(cp, 'DATACF') before = time.time() l = list(slice_query(indexcf, datacf, 'a', 'z')) delta = time.time() - before print '[%d] Got results in %d' % (i, delta) break if not child: time.sleep(10000)
def _cassandra_init(self): sys_mgr = SystemManager(self._args.cassandra_server_list[0]) if self._args.reset_config: try: sys_mgr.drop_keyspace(SvcMonitor._KEYSPACE) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) try: sys_mgr.create_keyspace(SvcMonitor._KEYSPACE, SIMPLE_STRATEGY, {'replication_factor': '1'}) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) column_families = [self._SVC_VM_CF, self._CLEANUP_CF] for cf in column_families: try: sys_mgr.create_column_family(SvcMonitor._KEYSPACE, cf) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) conn_pool = pycassa.ConnectionPool(SvcMonitor._KEYSPACE, self._args.cassandra_server_list) self._svc_vm_cf = pycassa.ColumnFamily(conn_pool, self._SVC_VM_CF) self._cleanup_cf = pycassa.ColumnFamily(conn_pool, self._CLEANUP_CF)
def setUp(self): super(TestSubmission, self).setUp() # We need to set the config before importing. os.environ['OOPS_HOST'] = config.cassandra_hosts[0] self.keyspace = self.useFixture(TemporaryOOPSDB()).keyspace os.environ['OOPS_KEYSPACE'] = self.keyspace creds = { 'username': config.cassandra_username, 'password': config.cassandra_password } self.pool = pycassa.ConnectionPool(self.keyspace, config.cassandra_hosts, credentials=creds) config.cassandra_keyspace = self.keyspace schema.create() oops_config = oopsconfig.get_config() oops_config['username'] = config.cassandra_username oops_config['password'] = config.cassandra_password oopsschema.create(oops_config) self.temp = tempfile.mkdtemp() config_dir = os.path.join(self.temp, 'config') sandbox_dir = os.path.join(self.temp, 'sandbox') os.makedirs(config_dir) os.makedirs(sandbox_dir) self.architecture = 'amd64' # Don't depend on apport-retrace being installed. with mock.patch('daisy.retracer.Popen') as popen: popen.return_value.returncode = 0 popen.return_value.communicate.return_value = ['/bin/false'] self.retracer = retracer.Retracer(config_dir, sandbox_dir, self.architecture, False, False)
def __init__(self, args_str): self._parse_args(args_str) self._logger = utils.ColorLog(logging.getLogger(__name__)) log_level = 'ERROR' if self._args.verbose: log_level = 'INFO' if self._args.debug: log_level = 'DEBUG' self._logger.setLevel(log_level) logformat = logging.Formatter("%(levelname)s: %(message)s") stdout = logging.StreamHandler() stdout.setLevel(log_level) stdout.setFormatter(logformat) self._logger.addHandler(stdout) # cassandra connection self._cassandra_servers = self._api_args.cassandra_server_list db_info = vnc_cfg_ifmap.VncServerCassandraClient.get_db_info() rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM self._cf_dict = {} for ks_name, cf_name_list in db_info: pool = pycassa.ConnectionPool(keyspace=ks_name, server_list=self._cassandra_servers, prefill=False) for cf_name in cf_name_list: self._cf_dict[cf_name] = pycassa.ColumnFamily( pool, cf_name, read_consistency_level=rd_consistency) # ifmap connection self._connect_to_ifmap_servers()
def main(release, start, end, verbose=False): start = start.replace(hour=0, minute=0, second=0, microsecond=0) end = end.replace(hour=0, minute=0, second=0, microsecond=0) creds = { 'username': config.cassandra_username, 'password': config.cassandra_password } pool = pycassa.ConnectionPool(config.cassandra_keyspace, config.cassandra_hosts, timeout=600, credentials=creds) systems = pycassa.ColumnFamily(pool, 'SystemsForErrorsByRelease') uniquesys = pycassa.ColumnFamily(pool, 'UniqueSystemsForErrorsByRelease') while start <= end: target_date = start working_date = target_date - datetime.timedelta(days=RAMP_UP - 1) one_day = datetime.timedelta(days=1) unique = set() while working_date <= target_date: [unique.add(x) for x, y in systems.xget((release, working_date))] working_date += one_day if verbose: print start, len(unique) uniquesys.insert(release, {start: len(unique)}) start += one_day
def _cassandra_init(self, server_list): # column name <table-name>, <id1>, <id2> disco_cf_info = (self._disco_cf_name, CompositeType(AsciiType(), UTF8Type(), UTF8Type()), AsciiType()) # 1. Ensure keyspace and schema/CFs exist self._cassandra_ensure_keyspace(server_list, self._keyspace_name, [disco_cf_info]) pool = pycassa.ConnectionPool(self._keyspace_name, server_list, max_overflow=-1, use_threadlocal=True, prefill=True, pool_size=100, pool_timeout=20, max_retries=5, timeout=0.5) rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.ONE wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.ONE self._disco_cf = pycassa.ColumnFamily( pool, self._disco_cf_name, read_consistency_level=rd_consistency, write_consistency_level=wr_consistency)
def db_export(self): db_contents = {'cassandra': {}, 'zookeeper': {}} cassandra_contents = db_contents['cassandra'] for ks_name in (set(KEYSPACES) - set(self._args.omit_keyspaces or [])): if self._api_args.cluster_id: full_ks_name = '%s_%s' % (self._api_args.cluster_id, ks_name) else: full_ks_name = ks_name cassandra_contents[ks_name] = {} socket_factory = pycassa.connection.default_socket_factory if self._api_args.cassandra_use_ssl: socket_factory = pycassa.connection.make_ssl_socket_factory( self._api_args.cassandra_ca_certs, validate=False) pool = pycassa.ConnectionPool(full_ks_name, self._api_args.cassandra_server_list, pool_timeout=120, max_retries=-1, timeout=5, socket_factory=socket_factory) creds = None if (self._api_args.cassandra_user and self._api_args.cassandra_password): creds = { 'username': self._api_args.cassandra_user, 'password': self._api_args.cassandra_password } sys_mgr = SystemManager(self._api_args.cassandra_server_list[0], credentials=creds) for cf_name in sys_mgr.get_keyspace_column_families(full_ks_name): cassandra_contents[ks_name][cf_name] = {} cf = pycassa.ColumnFamily(pool, cf_name) for r, c in cf.get_range(column_count=10000000, include_timestamp=True): cassandra_contents[ks_name][cf_name][r] = c def get_nodes(path): if not zk.get_children(path): return [(path, zk.get(path))] nodes = [] for child in zk.get_children(path): nodes.extend(get_nodes('%s%s/' % (path, child))) return nodes zk = kazoo.client.KazooClient(self._api_args.zk_server_ip) zk.start() nodes = get_nodes(self._api_args.cluster_id + '/') zk.stop() db_contents['zookeeper'] = json.dumps(nodes) f = open(self._args.export_to, 'w') try: f.write(json.dumps(db_contents)) finally: f.close()
def connectToKeyspace(self, keyspace="ros"): self.keyspace = keyspace try: self.pool = pycassa.ConnectionPool( self.keyspace, [self.host + ":" + str(self.port)]) except: return False return True
def open_connection(self, host, port, **params): self.pool = pycassa.ConnectionPool(self.keyspace) try: self.cf = pycassa.ColumnFamily(self.pool, self.column_family) except pycassa.NotFoundException: log.info("Creating new %s ColumnFamily." % self.column_family) system_manager = pycassa.system_manager.SystemManager() system_manager.create_column_family(self.keyspace, self.column_family) self.cf = pycassa.ColumnFamily(self.pool, self.column_family)
def __init__(self, keyspace='data', ip='127.0.0.1'): """ # self.cfs: # ColumnFamilys object collection # data format: {key: ColumnFamily Object} # example: {'cpu', ColumnFamily()} """ self.cfs = dict() self.db = pycassa.ConnectionPool(keyspace, server_list=[ip])
def get_metrics_for_shards(shards, server): pool = pycassa.ConnectionPool('DATA', server_list=[server]) cf = pycassa.ColumnFamily(pool, 'metrics_locator') metrics_for_shards = {} for shard in range(128): # retrieve metrics locators for each shard metrics_for_shards[shard] = get_metrics_for_shard(shard, cf) return metrics_for_shards
def _get_column_family(self): if self._column_family is None: conn = pycassa.ConnectionPool(self.keyspace, server_list=self.servers, **self.cassandra_options) self._column_family = \ pycassa.ColumnFamily(conn, self.column_family, read_consistency_level=self.read_consistency, write_consistency_level=self.write_consistency) return self._column_family
def __init__(self): """ Constructor. """ self.host = file_io.read('/etc/appscale/my_private_ip') self.port = CASS_DEFAULT_PORT self.pool = pycassa.ConnectionPool(keyspace=KEYSPACE, server_list=[self.host+":"+str(self.port)], prefill=False)
def get_metrics_state_for_shards(shards, servers): pool = pycassa.ConnectionPool('DATA', server_list=servers) cf = pycassa.ColumnFamily(pool, 'metrics_state') metrics_state_for_shards = {} for shard in shards: metrics_state_for_shards[shard] = get_metrics_state_for_shard( shard, cf) return metrics_state_for_shards
def connect_db(self): try: self._pool = pycassa.ConnectionPool( COLLECTOR_KEYSPACE, server_list=self._cassandra_server_list, timeout=None) self._logger.info("Connection to AnalyticsDb is Established!") except Exception as e: self._logger.error( "Exception: Failure in connection to AnalyticsDb %s" % e)
def _cassandra_init(self): server_idx = 0 num_dbnodes = len(self._args.cassandra_server_list) connected = False while not connected: try: cass_server = self._args.cassandra_server_list[server_idx] sys_mgr = SystemManager(cass_server) connected = True except Exception as e: server_idx = (server_idx + 1) % num_dbnodes time.sleep(3) if self._args.reset_config: try: sys_mgr.drop_keyspace(SvcMonitor._KEYSPACE) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) try: sys_mgr.create_keyspace(SvcMonitor._KEYSPACE, SIMPLE_STRATEGY, {'replication_factor': str(num_dbnodes)}) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) column_families = [ self._SVC_VM_CF, self._SVC_CLEANUP_CF, self._SVC_SI_CF ] for cf in column_families: try: sys_mgr.create_column_family(SvcMonitor._KEYSPACE, cf) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) conn_pool = pycassa.ConnectionPool(SvcMonitor._KEYSPACE, self._args.cassandra_server_list) rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM self._svc_vm_cf = pycassa.ColumnFamily( conn_pool, self._SVC_VM_CF, read_consistency_level=rd_consistency, write_consistency_level=wr_consistency) self._svc_si_cf = pycassa.ColumnFamily( conn_pool, self._SVC_SI_CF, read_consistency_level=rd_consistency, write_consistency_level=wr_consistency) self._cleanup_cf = pycassa.ColumnFamily( conn_pool, self._SVC_CLEANUP_CF, read_consistency_level=rd_consistency, write_consistency_level=wr_consistency)
def __init__(self, keyspace=None, host_list=None): try: self.client = pycassa.ConnectionPool(keyspace, host_list, pool_timeout=-1, max_retries=-1) self.is_connect = True logger.info('Connection cassandra OK') except pycassa.pool.AllServersUnavailable as e: self.is_connect = False raise pycassa.pool.AllServersUnavailable('error to connect with' 'cassandra')
def __init__(self, name): self.name = name self.pool = pycassa.ConnectionPool("dev") self.cf = pycassa.ColumnFamily( self.pool, "cf23", read_consistency_level=cass_types.ConsistencyLevel.QUORUM, write_consistency_level=cass_types.ConsistencyLevel.QUORUM) self.cf23 = CF23(name, "cf23", self.pool) return
def __init__(self, engine, **kw): super(CassandraStore, self).__init__(engine, **kw) spliturl = urlsplit(engine) _, keyspace, column_family = spliturl.path.split('/') try: self._store = pycassa.ColumnFamily( pycassa.ConnectionPool(keyspace, [spliturl.hostname]), column_family, ) except pycassa.InvalidRequestException: from pycassa.system_manager import SystemManager # @UnresolvedImport @IgnorePep8 system_manager = SystemManager(spliturl[1]) system_manager.create_keyspace( keyspace, pycassa.system_manager.SIMPLE_STRATEGY, dict(replication_factor=native(kw.get('replication', 1))), ) system_manager.create_column_family(keyspace, column_family) self._store = pycassa.ColumnFamily( pycassa.ConnectionPool(keyspace, [spliturl.netloc]), column_family, )
def process_message(self, peer, mailfrom, rcpttos, data): now = time.strftime('%a %b %d %T %Y', time.gmtime()) head = 'From {0} {1}\n'.format(mailfrom, now) email = head + data user = re.search('(.*)@(.*)', rcpttos[0]).group(1) mbox = 'inbox' c = pycassa.ConnectionPool('Mail') cl = pycassa.cassandra.ttypes.ConsistencyLevel.ONE cf = pycassa.ColumnFamily(c, 'Mailboxes', write_consistency_level=cl) cf.insert('{0}:{1}'.format(user, mbox), {uuid.uuid1(): email}) cf = pycassa.ColumnFamily(c, 'Users', write_consistency_level=cl) cf.insert(user, {mbox: ''}) return
def _cassandra_init(self): server_idx = 0 num_dbnodes = len(self._args.cassandra_server_list) connected = False while not connected: try: cass_server = self._args.cassandra_server_list[server_idx] sys_mgr = SystemManager(cass_server) connected = True except Exception as e: server_idx = (server_idx + 1) % num_dbnodes time.sleep(3) if self._args.reset_config: try: sys_mgr.drop_keyspace(self._keyspace) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) try: sys_mgr.create_keyspace(self._keyspace, SIMPLE_STRATEGY, {'replication_factor': str(num_dbnodes)}) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) # set up column families column_families = [self._F5_LB_CF] for cf in column_families: try: sys_mgr.create_column_family(self._keyspace, cf) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) conn_pool = pycassa.ConnectionPool(self._keyspace, self._args.cassandra_server_list, max_overflow=10, use_threadlocal=True, prefill=True, pool_size=10, pool_timeout=30, max_retries=-1, timeout=0.5) rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM self._f5_lb_cf = pycassa.ColumnFamily( conn_pool, self._F5_LB_CF, read_consistency_level=rd_consistency, write_consistency_level=wr_consistency)
def _check_cassandra(): pool = pycassa.ConnectionPool(Keyspace, server_list=['127.0.0.3'], timeout=60 * 60 * 1000) cf = pycassa.ColumnFamily(pool, TableName) result_set = list( filter( lambda x: x[1] is not None, cf.multiget( [s[0].decode() for s in ThriftMultigetTestCase.Payload], read_consistency_level=pycassa.cassandra.ttypes. ConsistencyLevel.QUORUM).items())) log('Queried {} records, returned {} records'.format( len(ThriftMultigetTestCase.Payload), len(result_set))) return len(result_set) == len(ThriftMultigetTestCase.Payload)
def GetTest(self): import pycassa cf_str = "cpu" key = "*****@*****.**" super_column = 'total' column_start, column_finish = '', '' column_count = 5 column_reversed = True pool = pycassa.ConnectionPool('data', server_list=['127.0.0.1']) cf = pycassa.ColumnFamily(pool, cf_str) rs = cf.get(key=key, super_column=super_column, column_start=column_start, column_finish=column_finish, column_reversed=True, column_count=column_count) print rs