def setup_module(): global pool, cf, scf credentials = {'username': '******', 'password': '******'} pool = ConnectionPool(keyspace='PycassaTestKeyspace', credentials=credentials) cf = ColumnFamily(pool, 'Standard1') scf = ColumnFamily(pool, 'Super1')
def test_packing_disabled(self): self.cf = ColumnFamily(pool, 'Standard1', autopack_names=False, autopack_values=False) self.cf.insert('key', {'col': 'val'}) assert_raises(TypeError, self.cf.insert, args=('key', {123: 'val'})) assert_raises(TypeError, self.cf.insert, args=('key', {'col': 123})) assert_raises(TypeError, self.cf.insert, args=('key', {123: 123})) self.cf.remove('key')
class TestSuperColumnFamilyMap: def setUp(self): credentials = {'username': '******', 'password': '******'} self.pool = ConnectionPool(keyspace='Keyspace1', credentials=credentials) self.cf = ColumnFamily(self.pool, 'Super2') self.map = ColumnFamilyMap(TestUTF8, self.cf) def tearDown(self): for key, columns in self.cf.get_range(): self.cf.remove(key) def instance(self, key, super_column): instance = TestUTF8() instance.key = key instance.super_column = super_column instance.strcol = '1' instance.intcol = 2 instance.floatcol = 3.5 instance.datetimecol = datetime.now().replace(microsecond=0) instance.intstrcol = 8 instance.floatstrcol = 4.6 instance.datetimestrcol = datetime.now().replace(microsecond=0) return instance def test_super(self): instance = self.instance('TestSuperColumnFamilyMap.test_super', 'super1') assert_raises(NotFoundException, self.map.get, instance.key) self.map.insert(instance) res = self.map.get(instance.key)[instance.super_column] assert_equal(res, instance) assert_equal(self.map.multiget([instance.key])[instance.key][instance.super_column], instance) assert_equal(list(self.map.get_range(start=instance.key, finish=instance.key)), [{instance.super_column: instance}])
def test_packing_enabled(self): self.cf = ColumnFamily(pool, 'Standard1') self.cf.insert('key', {'col': 'val'}) assert_raises(TypeError, self.cf.insert, args=('key', {123: 'val'})) assert_raises(TypeError, self.cf.insert, args=('key', {'col': 123})) assert_raises(TypeError, self.cf.insert, args=('key', {123: 123})) self.cf.remove('key')
def test_null_pool_failover(self): listener = _TestListener() pool = NullPool(keyspace='Keyspace1', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) conn = pool.get() cf = ColumnFamily(conn, 'Standard1') for i in range(1,5): setattr(cf.client._connection.client, 'batch_mutate', _timeout) # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert('key', {'col': 'val'}) assert_equal(listener.failure_count, i) cf.get('key') pool.dispose() listener.reset() pool = NullPool(keyspace='Keyspace1', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) threads = [] args = (pool, 'key', {'col':'val'}) for i in range(0, 5): threads.append(threading.Thread(target=_five_fails, args=args)) threads[-1].start() for thread in threads: thread.join() assert_equal(listener.failure_count, 25) pool.dispose()
class Database: def __init__(self, column_family): """ ColumnFamily: - Thông tin người dùng - Tìm kiếm - Thông tin vé - Nhật ký hệ thống - Thông tin nhà cung cấp """ # Connect to Cassandra servers client = connect(CASSANDRA_HOSTS) self.db = ColumnFamily(client, CASSANDRA_KEYSPACE, column_family, super=False) def insert(self, key, columns): key = md5(capwords(key).lower()).hexdigest() return self.db.insert(key, columns) def get(self, key, columns=None): key = md5(capwords(key).lower()).hexdigest() return self.db.get(key=key, columns=columns) def remove(self, key, column=None): key = md5(capwords(key).lower()).hexdigest() return self.db.remove(key=key, column=column)
def __call__(self, uuids=None, cassandra_servers=None, force=False, **kwargs): super(CheckBadRefs, self).__call__(**kwargs) self.force = force pool = ConnectionPool('config_db_uuid', server_list=cassandra_servers) uuid_cf = ColumnFamily(pool, 'obj_uuid_table') if uuids: def uuids_g(): for uuid in uuids: yield uuid else: def uuids_g(): for k, v in uuid_cf.get_range(column_count=1, filter_empty=True): yield k for uuid in uuids_g(): values = dict(uuid_cf.xget(uuid)) res = self._get_current_resource(uuid, values) bad_refs = self._check_resource_refs(uuid, values) if not res or bad_refs: printo(self._props_to_json(values)) if not res and not self.check: if self.force or continue_prompt(message="Delete ?"): self._delete(uuid_cf, uuid)
def get_connection(): """ Creates a connection to Cassandra. Returs: pool """ cassandra_host = os.environ.get('CASSANDRA_HOST', 'localhost') sys_mgr = SystemManager() try: sys_mgr.describe_ring(KEYSPACE) except: sys_mgr.create_keyspace(KEYSPACE, SIMPLE_STRATEGY, {'replication_factor': '1'}) pool = ConnectionPool(KEYSPACE, server_list=[cassandra_host]) for cf_name in [CF_LOGS, CF_LOGS_BY_APP, CF_LOGS_BY_HOST, CF_LOGS_BY_SEVERITY]: try: cf = ColumnFamily(pool, cf_name) except: sys_mgr.create_column_family(KEYSPACE, cf_name, comparator_type=TimeUUIDType()) cf = ColumnFamily(pool, cf_name) cf.get_count(str(uuid.uuid4())) sys_mgr.close() return pool
def test_queue_failover(self): for prefill in (True, False): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool( pool_size=1, max_overflow=0, recycle=10000, prefill=prefill, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(1, 5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'}) assert_equal(stats_logger.stats['failed'], i) assert_equal(cf.get('key'), { 'col': 'val%d' % i, 'col2': 'val' }) pool.dispose()
def remove(self, colfam, key, columns=None): cf = ColumnFamily(self.db, colfam) if columns is not None: return cf.remove(key, columns) else: return cf.remove(key)
def test_basic_pools(self): pool = ConnectionPool('PycassaTestKeyspace', credentials=_credentials) pool.dispose() pool = pool.recreate() cf = ColumnFamily(pool, 'Standard1') cf.insert('key1', {'col':'val'}) pool.status() pool.dispose()
def test_has_defaults(self): key = "TestColumnFamilyMap.test_has_defaults" ColumnFamily.insert(self.map, key, {"strcol": "1"}) instance = self.map.get(key) assert_equal(instance.intcol, TestUTF8.intcol.default) assert_equal(instance.floatcol, TestUTF8.floatcol.default) assert_equal(instance.datetimecol, TestUTF8.datetimecol.default)
def test_has_defaults(self): key = uuid.uuid4() ColumnFamily.insert(self.map, key, {'strcol': '1'}) instance = self.map.get(key) assert_equal(instance.intcol, TestUTF8.intcol.default) assert_equal(instance.floatcol, TestUTF8.floatcol.default) assert_equal(instance.datetimecol, TestUTF8.datetimecol.default)
def setup_module(): global pool, cf, scf, indexed_cf, sys_man credentials = {'username': '******', 'password': '******'} pool = ConnectionPool(keyspace='PycassaTestKeyspace', credentials=credentials) cf = ColumnFamily(pool, 'Standard1', dict_class=TestDict) scf = ColumnFamily(pool, 'Super1', dict_class=dict) indexed_cf = ColumnFamily(pool, 'Indexed1') sys_man = SystemManager()
def setup_module(): global pool, cf, scf, counter_cf, super_counter_cf, sysman credentials = {'username': '******', 'password': '******'} pool = ConnectionPool(keyspace='PycassaTestKeyspace', credentials=credentials) cf = ColumnFamily(pool, 'Standard1') scf = ColumnFamily(pool, 'Super1') sysman = SystemManager() counter_cf = ColumnFamily(pool, 'Counter1') super_counter_cf = ColumnFamily(pool, 'SuperCounter1')
def test_basic_pools(self): for pool_cls in _pools: pool = pool_cls(keyspace='Keyspace1', credentials=_credentials) pool.dispose() pool = pool.recreate() cf = ColumnFamily(pool, 'Standard1') cf.insert('key1', {'col':'val'}) pool.status() pool.dispose()
def _five_fails(pool, key, column): conn = pool.get() cf = ColumnFamily(conn, 'Standard1') for i in range(0,5): setattr(cf.client._connection.client, 'batch_mutate', _timeout) # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert(key, column) cf.get(key)
def test_insert_get_indexed_slices(self): indexed_cf = ColumnFamily(self.client, 'Indexed1') columns = {'birthdate': 1L} key = 'key1' indexed_cf.insert(key, columns, write_consistency_level=ConsistencyLevel.ONE) key = 'key2' indexed_cf.insert(key, columns, write_consistency_level=ConsistencyLevel.ONE) key = 'key3' indexed_cf.insert(key, columns, write_consistency_level=ConsistencyLevel.ONE) expr = index.create_index_expression(column_name='birthdate', value=1L) clause = index.create_index_clause([expr]) result = indexed_cf.get_indexed_slices(clause) assert len(result) == 3 assert result.get('key1') == columns assert result.get('key2') == columns assert result.get('key3') == columns
def __init__(self): parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument( "-s", "--source", help="Generally the prod cassandra path, list of machines: \ localhost:9162 localhost:9163", nargs="*", required=True, ) parser.add_argument( "-d", "--destination", help="Cassandra path where you need your data: \ localhost:9160 localhost:9161", nargs="*", required=True, ) parser.add_argument("-ks", "--keyspace", help="The keyspace: myks", required=True) parser.add_argument("-cf", "--column_family", help="The Column family: mycf", required=True) parser.add_argument("-k", "--key", help="A specific key to be imported", required=False) parser.add_argument("-c", "--count", help="Total count of keys to be imported", required=False) parser.add_argument("-a", "--all", action="store_true", help="Get all. Not recommended!", required=False) args = vars(parser.parse_args()) """Connection setting with cassandra The script is meant to sync data. So source and destination KS and CF shold be the same.""" try: source_pool = ConnectionPool(args["keyspace"], args["source"]) destination_pool = ConnectionPool(args["keyspace"], args["destination"]) self.source_cf = ColumnFamily(source_pool, args["column_family"]) self.source_cf.autopack_names = False self.source_cf.autopack_values = False self.source_cf.autopack_keys = False self.source_cf.default_validation_class = pycassa.types.UTF8Type() self.destination_cf = ColumnFamily(destination_pool, args["column_family"]) self.destination_cf.autopack_names = False self.destination_cf.autopack_values = False self.destination_cf.autopack_keys = False self.destination_cf.default_validation_class = pycassa.types.UTF8Type() except Exception as e: print "ERROR: The keyspace or the column family does not exist or request is timing out!" sys.exit() # Optional data self.count = args["count"] if self.count: self.count = int(self.count) self.key = args["key"] self.all = args["all"]
def test_basic_pools(self): for pool_cls in _pools: print "Pool class: %s" % pool_cls.__name__ pool = pool_cls(keyspace='Keyspace1', credentials=_credentials) pool.dispose() pool = pool.recreate() conn = pool.get() cf = ColumnFamily(conn, 'Standard1') cf.insert('key1', {'col':'val'}) pool.status() pool.return_conn(conn)
def __init__(self, column_family, client, read_consistency_level = CL_ONE, write_consistency_level = CL_QUORUM): self.column_family = column_family self.client = client self.read_consistency_level = read_consistency_level self.write_consistency_level = write_consistency_level self.cf = ColumnFamily(self.client, self.column_family, read_consistency_level = read_consistency_level, write_consistency_level = write_consistency_level)
def initFromDB(self, mysql, sql, family, id_name, val_name): cursor = mysql.cursor(MySQLdb.cursors.DictCursor) cursor.execute(sql) inserting = defaultdict(dict) for row in cursor.fetchall(): val = row[val_name] id = pack(row[id_name]) inserting[id][val] = val fam = ColumnFamily(self.connection, family) fam.truncate() logging.info('Initializing %s' % family) fam.batch_insert(inserting)
def setUp(self): self.client = connect() self.client.login('Keyspace1', {'username': '******', 'password': '******'}) self.cf = ColumnFamily(self.client, 'Keyspace1', 'Standard2', write_consistency_level=ConsistencyLevel.ONE, buffer_size=2, timestamp=self.timestamp, dict_class=TestDict) try: self.timestamp_n = int(self.cf.get('meta')['timestamp']) except NotFoundException: self.timestamp_n = 0 self.clear()
def get(self, colfam, key, columns=None, column_start="", column_finish="", column_reversed=False, column_count=100, include_timestamp=False, super_column=None, read_consistency_level=None): cf = ColumnFamily(self.db, colfam) try: return cf.get(key, columns, column_start, column_finish, column_reversed, column_count, include_timestamp, super_column, read_consistency_level) except NotFoundException: return None
def setUp(self): credentials = {'username': '******', 'password': '******'} self.pool = ConnectionPool(keyspace='Keyspace1', credentials=credentials) self.cf = ColumnFamily(self.pool, 'Standard2', autopack_names=False, autopack_values=False) self.indexed_cf = ColumnFamily(self.pool, 'Indexed1', autopack_names=False, autopack_values=False) self.map = ColumnFamilyMap(TestUTF8, self.cf) self.indexed_map = ColumnFamilyMap(TestIndex, self.indexed_cf) self.empty_map = ColumnFamilyMap(TestEmpty, self.cf, raw_columns=True)
def __init__(cls, name, bases, dct): type.__init__(cls, name, bases, dct) if cls._use_db: if cls._type_prefix is None: # default to the class name cls._type_prefix = name if '_' in cls._type_prefix: raise TdbException("Cannot have _ in type prefix %r (for %r)" % (cls._type_prefix, name)) if cls._type_prefix in thing_types: raise InvariantException("Redefining type %r?" % (cls._type_prefix)) # if we weren't given a specific _cf_name, we can use the # classes's name cf_name = cls._cf_name or name thing_types[cls._type_prefix] = cls cls._read_consistency_level = read_consistency_level cls._write_consistency_level = write_consistency_level try: cls._cf = ColumnFamily( cassandra, cf_name, read_consistency_level=read_consistency_level, write_consistency_level=write_consistency_level) except NotFoundException: if not db_create_tables: raise manager = get_manager() log.warning("Creating Cassandra Column Family %s" % (cf_name, )) with make_lock('cassandra_schema'): manager.create_column_family( keyspace, cf_name, comparator_type=cls._compare_with) log.warning("Created Cassandra Column Family %s" % (cf_name, )) # try again to look it up cls._cf = ColumnFamily( cassandra, cf_name, read_consistency_level=read_consistency_level, write_consistency_level=write_consistency_level) cls._kind = name
def setUp(self): credentials = {'username': '******', 'password': '******'} self.client = connect_thread_local('Keyspace1', credentials=credentials) self.cf = ColumnFamily(self.client, 'Super2', write_consistency_level=ConsistencyLevel.ONE, timestamp=self.timestamp, super=True) self.map = ColumnFamilyMap(TestUTF8, self.cf) try: self.timestamp_n = int(self.cf.get('meta')['meta']['timestamp']) except NotFoundException: self.timestamp_n = 0 self.clear()
def setUp(self): self.client = connect_thread_local() self.client.login('Keyspace1', {'username': '******', 'password': '******'}) self.cf = ColumnFamily(self.client, 'Keyspace1', 'Super2', write_consistency_level=ConsistencyLevel.ONE, buffer_size=2, timestamp=self.timestamp, super=True) try: self.timestamp_n = int(self.cf.get('meta')['meta']['timestamp']) except NotFoundException: self.timestamp_n = 0 self.clear()
def setup_module(): global pool, cf, scf, indexed_cf, counter_cf, counter_scf, sys_man, have_counters credentials = {'username': '******', 'password': '******'} pool = ConnectionPool(keyspace='PycassaTestKeyspace', credentials=credentials) cf = ColumnFamily(pool, 'Standard1', dict_class=TestDict) scf = ColumnFamily(pool, 'Super1', dict_class=dict) indexed_cf = ColumnFamily(pool, 'Indexed1') sys_man = SystemManager() have_counters = sys_man._conn.version != CASSANDRA_07 if have_counters: counter_cf = ColumnFamily(pool, 'Counter1') counter_scf = ColumnFamily(pool, 'SuperCounter1')
def setUp(self): credentials = {'username': '******', 'password': '******'} self.client = connect_thread_local('Keyspace1', credentials=credentials) self.cf = ColumnFamily(self.client, 'Standard2') self.cf_long = ColumnFamily(self.client, 'StdLong') self.cf_int = ColumnFamily(self.client, 'StdInteger') self.cf_time = ColumnFamily(self.client, 'StdTimeUUID') self.cf_lex = ColumnFamily(self.client, 'StdLexicalUUID') self.cf_ascii = ColumnFamily(self.client, 'StdAscii') self.cf_utf8 = ColumnFamily(self.client, 'StdUTF8') self.cf_bytes = ColumnFamily(self.client, 'StdBytes') self.cf_suplong = ColumnFamily(self.client, 'SuperLong', super=True) self.cf_supint = ColumnFamily(self.client, 'SuperInt', super=True) self.cf_suptime = ColumnFamily(self.client, 'SuperTime', super=True) self.cf_suplex = ColumnFamily(self.client, 'SuperLex', super=True) self.cf_supascii = ColumnFamily(self.client, 'SuperAscii', super=True) self.cf_suputf8 = ColumnFamily(self.client, 'SuperUTF8', super=True) self.cf_supbytes = ColumnFamily(self.client, 'SuperBytes', super=True) self.cf_suplong_sublong = ColumnFamily(self.client, 'SuperLongSubLong', super=True) self.cf_suplong_subint = ColumnFamily(self.client, 'SuperLongSubInt', super=True) self.cf_suplong_subtime = ColumnFamily(self.client, 'SuperLongSubTime', super=True) self.cf_suplong_sublex = ColumnFamily(self.client, 'SuperLongSubLex', super=True) self.cf_suplong_subascii = ColumnFamily(self.client, 'SuperLongSubAscii', super=True) self.cf_suplong_subutf8 = ColumnFamily(self.client, 'SuperLongSubUTF8', super=True) self.cf_suplong_subbytes = ColumnFamily(self.client, 'SuperLongSubBytes', super=True) self.cf_valid_long = ColumnFamily(self.client, 'ValidatorLong') self.cf_valid_int = ColumnFamily(self.client, 'ValidatorInt') self.cf_valid_time = ColumnFamily(self.client, 'ValidatorTime') self.cf_valid_lex = ColumnFamily(self.client, 'ValidatorLex') self.cf_valid_ascii = ColumnFamily(self.client, 'ValidatorAscii') self.cf_valid_utf8 = ColumnFamily(self.client, 'ValidatorUTF8') self.cf_valid_bytes = ColumnFamily(self.client, 'ValidatorBytes') self.cfs = [self.cf_long, self.cf_int, self.cf_time, self.cf_lex, self.cf_ascii, self.cf_utf8, self.cf_bytes, self.cf_suplong, self.cf_supint, self.cf_suptime, self.cf_suplex, self.cf_supascii, self.cf_suputf8, self.cf_supbytes, self.cf_suplong_subint, self.cf_suplong_subint, self.cf_suplong_subtime, self.cf_suplong_sublex, self.cf_suplong_subascii, self.cf_suplong_subutf8, self.cf_suplong_subbytes, self.cf_valid_long, self.cf_valid_int, self.cf_valid_time, self.cf_valid_lex, self.cf_valid_ascii, self.cf_valid_utf8, self.cf_valid_bytes] try: self.timestamp_n = int(self.cf.get('meta')['timestamp']) except NotFoundException: self.timestamp_n = 0 self.clear()
def setup_module(): global pool, cf, indexed_cf, pool_stub, indexed_cf_stub, cf_stub credentials = {'username': '******', 'password': '******'} pool = ConnectionPool(keyspace='PycassaTestKeyspace', credentials=credentials, timeout=1.0) cf = ColumnFamily(pool, 'Standard1', dict_class=TestDict) indexed_cf = ColumnFamily(pool, 'Indexed1') pool_stub = ConnectionPoolStub(keyspace='PycassaTestKeyspace', credentials=credentials, timeout=1.0) cf_stub = ColumnFamilyStub(pool_stub, 'Standard1', dict_class=TestDict) indexed_cf_stub = ColumnFamilyStub(pool_stub, 'Indexed1')
def test_queue_threadlocal_failover(self): listener = _TestListener() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=True, timeout=0.05, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(1,5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'}) assert_equal(listener.failure_count, i) assert_equal(cf.get('key'), {'col': 'val%d' % i, 'col2': 'val'}) pool.dispose() listener.reset() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, timeout=0.05, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() threads = [] args=('key', {'col': 'val', 'col2': 'val'}) for i in range(5): threads.append(threading.Thread(target=cf.insert, args=args)) threads[-1].start() for thread in threads: thread.join() assert_equal(listener.failure_count, 5) pool.dispose()
def test_queue_pool_recycle(self): listener = _TestListener() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=1, prefill=True, pool_timeout=0.5, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False) cf = ColumnFamily(pool, 'Standard1') columns = {'col1': 'val', 'col2': 'val'} for i in range(10): cf.insert('key', columns) assert_equal(listener.recycle_count, 5) pool.dispose() listener.reset() # Try with threadlocal=True pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=1, prefill=False, pool_timeout=0.5, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True) cf = ColumnFamily(pool, 'Standard1') for i in range(10): cf.insert('key', columns) pool.dispose() assert_equal(listener.recycle_count, 5)
def test_queue_pool_recycle(self): listener = _TestListener() pool = QueuePool(pool_size=5, max_overflow=5, recycle=1, prefill=True, pool_timeout=0.5, timeout=1, keyspace='Keyspace1', credentials=_credentials, listeners=[listener], use_threadlocal=False) conn = pool.get() cf = ColumnFamily(conn, 'Standard1') for i in range(10): cf.insert('key', {'col': 'val'}) conn.return_to_pool() assert_equal(listener.recycle_count, 1) pool.dispose() listener.reset() # Try with threadlocal=True pool = QueuePool(pool_size=5, max_overflow=5, recycle=10, prefill=True, pool_timeout=0.5, timeout=1, keyspace='Keyspace1', credentials=_credentials, listeners=[listener], use_threadlocal=True) conn = pool.get() cf = ColumnFamily(conn, 'Standard1') for i in range(10): cf.insert('key', {'col': 'val'}) conn.return_to_pool() assert_equal(listener.recycle_count, 1)
def setUp(self): credentials = {'username': '******', 'password': '******'} self.client = connect('Keyspace1', credentials=credentials) self.cf = ColumnFamily(self.client, 'Standard2', write_consistency_level=ConsistencyLevel.ONE, timestamp=self.timestamp) self.scf = ColumnFamily(self.client, 'Super1', write_consistency_level=ConsistencyLevel.ONE, super=True, timestamp=self.timestamp) try: self.timestamp_n = int(self.cf.get('meta')['timestamp']) except NotFoundException: self.timestamp_n = 0 self.clear()
def __call__(self, force=False, parent_type=None, cassandra_servers=None): valid_acl = [] parents = Collection(parent_type, fetch=True, recursive=2) for parent in parents: if 'access_control_lists' in parent.keys(): valid_acl += [ acl['uuid'] for acl in parent['access_control_lists'] ] valid_acl = list(set(valid_acl)) orphaned_acls = set([]) # Due to a bug in contrail API, we cannot list more than 10000 elements # on a resource and there is no way to list ACL by tenant. # So that ugly hack directly fetch all ACL UUIDs from the cassandra database :( pool = ConnectionPool('config_db_uuid', server_list=cassandra_servers) fqname_cf = ColumnFamily(pool, 'obj_fq_name_table') for key, value in fqname_cf.xget('access_control_list'): acl_uuid = decode_string(key).split(':')[-1] if acl_uuid in valid_acl: continue acl = Resource('access-control-list', uuid=acl_uuid, fetch=True) if ('parent_uuid' in acl.keys() and 'parent_type' in acl.keys() and acl['parent_type'] == parent_type and acl.uuid not in valid_acl): try: parent_acl = acl.parent except ResourceNotFound: msg = ("The %s parent ACL %s was not found." % (parent_type.replace('-', ' '), acl['parent_uuid'])) if force: msg = msg + " Delete orphan ACL %s." % acl.uuid acl.delete() logger.debug(msg) orphaned_acls.add(acl['uuid']) else: logger.debug( "The ACL %(acl)s have a %(parent_type)s %(parent_acl)s which exists but \ was not found in the precedent %(parent_type)s list. Not delete it." % { 'acl': acl, 'parent_type': parent_type.replace('-', ' '), 'parent_acl': parent_acl }) if force: logger.debug("%d orphaned ACL were deleted" % len(orphaned_acls)) else: logger.debug("Found %d orphaned ACL to delete" % len(orphaned_acls))
def _cassandra_init_conn_pools(self): for ks, cf_list in self._keyspaces.items(): pool = pycassa.ConnectionPool(ks, self._server_list, max_overflow=-1, use_threadlocal=True, prefill=True, pool_size=20, pool_timeout=120, max_retries=-1, timeout=5) rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM for (cf, _) in cf_list: self._cf_dict[cf] = ColumnFamily( pool, cf, read_consistency_level=rd_consistency, write_consistency_level=wr_consistency) ConnectionState.update(conn_type=ConnectionType.DATABASE, name='Cassandra', status=ConnectionStatus.UP, message='', server_addrs=self._server_list) self._conn_state = ConnectionStatus.UP msg = 'Cassandra connection ESTABLISHED' self._logger(msg, level=SandeshLevel.SYS_NOTICE)
def test_queue_threadlocal_retry_limit(self): listener = _TestListener() pool = ConnectionPool( pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Standard1') assert_raises(MaximumRetryException, cf.insert, 'key', { 'col': 'val', 'col2': 'val' }) assert_equal(listener.failure_count, 4) # On the 4th failure, didn't retry pool.dispose()
def test_failure_connection_info(self): stats_logger = StatsLoggerRequestInfo() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=True, max_retries=0, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=True, server_list=['localhost:9160']) cf = ColumnFamily(pool, 'Counter1') # Corrupt the connection conn = pool.get() setattr(conn, 'send_get', conn._fail_once) conn._should_fail = True conn.return_to_pool() assert_raises(MaximumRetryException, cf.get, 'greunt', columns=['col']) assert_true('request' in stats_logger.failure_dict['connection'].info) request = stats_logger.failure_dict['connection'].info['request'] assert_equal(request['method'], 'get') assert_equal(request['args'], ('greunt', ColumnPath('Counter1', None, 'col'), 1)) assert_equal(request['kwargs'], {})
def _cassandra_init_conn_pools(self): socket_factory = self._make_socket_factory() for ks, cf_dict in itertools.chain(list(self._rw_keyspaces.items()), list(self._ro_keyspaces.items())): keyspace = '%s%s' % (self._db_prefix, ks) pool = pycassa.ConnectionPool(keyspace, self._server_list, max_overflow=5, use_threadlocal=True, prefill=True, pool_size=self._pool_size, pool_timeout=120, max_retries=15, timeout=5, credentials=self._credential, socket_factory=socket_factory) for cf_name in cf_dict: cf_kwargs = cf_dict[cf_name].get('cf_args', {}) self._cf_dict[cf_name] = ColumnFamily( pool, cf_name, read_consistency_level=ConsistencyLevel.QUORUM, write_consistency_level=ConsistencyLevel.QUORUM, dict_class=dict, **cf_kwargs) ConnectionState.update(conn_type=ConnType.DATABASE, name='Cassandra', status=ConnectionStatus.UP, message='', server_addrs=self._server_list) self._conn_state = ConnectionStatus.UP msg = 'Cassandra connection ESTABLISHED' self._logger(msg, level=SandeshLevel.SYS_NOTICE)
def test_queue_failure_with_no_retries(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool( pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Counter1') assert_raises(MaximumRetryException, cf.insert, 'key', { 'col': 2, 'col2': 2 }) assert_equal(stats_logger.stats['failed'], 1) # didn't retry at all pool.dispose()
class Dictionary: """ Nhóm chức năng từ điển: * Tra từ Anh-Việt * Tra từ Việt-Anh """ def __init__(self): # Connect to Cassandra servers client = connect(cassandra_hosts) self.d = ColumnFamily(client, cassandra_keyspace, 'Dictionary', super=True) self.u = ColumnFamily(client, cassandra_keyspace, 'Users', super=True) self.e = Error() def _lookup(self, keyword, dict_type='en_vi'): try: return self.d.get(dict_type, super_column=str(keyword)) except (NotFoundException, InvalidRequestException): return None def lookup(self, environ): try: session_id = environ['request']['session_id'] except KeyError: return self.e.authen_error("Thiếu session_id") try: self.u.get('session_id', super_column=session_id) except (NotFoundException, InvalidRequestException): return self.e.authen_error() result = self._lookup(environ['request']['keyword']) result2 = self._lookup(environ['request']['keyword'], 'vi_en') result3 = self._lookup(environ['request']['keyword'], 'en_en') if (result is None) and (result2 is None) and (result3 is None): return self.e.not_found("Từ khóa bạn tìm không có trong từ điển") xml = [] if result is not None: xml.append('<result type="en_vi" keyword="%s" mean="%s" spell="%s" status_code="200"/>' \ % (xml_format(environ['request']['keyword']), xml_format(result['nghia']), xml_format(result['phien_am_quoc_te']))) if result2 is not None: xml.append('<result type="vi_en" keyword="%s" mean="%s" spell="" status_code="200"/>' \ % (xml_format(environ['request']['keyword']), xml_format(result2['nghia']))) return '\n\n'.join(xml) def total_words(self, dict_type='en_vi'): return self.d.get_count(dict_type)
class ActiveCode: def __init__(self): # Connect to Cassandra servers client = connect(cassandra_hosts) self.u = ColumnFamily(client, cassandra_keyspace, 'Users', super=True) self.e = Error() def _gen_active_code(self, length=6, chars=(letters + digits)): return ''.join([choice(chars) for _ in xrange(length)]) def _is_exist(self, code): try: self.u.get('active_codes', super_column=code) return True except NotFoundException: return False def get_new_code(self): new = self._gen_active_code() while self._is_exist(new) is True: new = self._gen_active_code() # store in database self.u.insert('active_codes', {new: {'create_time': str(datetime.now())}}) return new def active(self, username, active_code): if self._is_exist(active_code) is False: return False self.u.insert('active_codes', {active_code: {'active_time': str(datetime.now()), 'owner': str(username)}}) return True def stats(self): return self.u.get('active_codes')
def setup_class(cls): sys = SystemManager() sys.create_column_family(TEST_KS, 'StdLong', comparator_type=LONG_TYPE) sys.create_column_family(TEST_KS, 'StdInteger', comparator_type=INT_TYPE) sys.create_column_family(TEST_KS, 'StdTimeUUID', comparator_type=TIME_UUID_TYPE) sys.create_column_family(TEST_KS, 'StdLexicalUUID', comparator_type=LEXICAL_UUID_TYPE) sys.create_column_family(TEST_KS, 'StdAscii', comparator_type=ASCII_TYPE) sys.create_column_family(TEST_KS, 'StdUTF8', comparator_type=UTF8_TYPE) sys.create_column_family(TEST_KS, 'StdBytes', comparator_type=BYTES_TYPE) sys.close() cls.cf_long = ColumnFamily(pool, 'StdLong') cls.cf_int = ColumnFamily(pool, 'StdInteger') cls.cf_time = ColumnFamily(pool, 'StdTimeUUID') cls.cf_lex = ColumnFamily(pool, 'StdLexicalUUID') cls.cf_ascii = ColumnFamily(pool, 'StdAscii') cls.cf_utf8 = ColumnFamily(pool, 'StdUTF8') cls.cf_bytes = ColumnFamily(pool, 'StdBytes') cls.cfs = [ cls.cf_long, cls.cf_int, cls.cf_time, cls.cf_lex, cls.cf_ascii, cls.cf_utf8, cls.cf_bytes ]
def fakepopulate(self, numkeyspace, numcolfam, numentries): ''' this method populates fake data in casandra''' countkeys = numkeyspace logging.info("Inside fake populate method") logging.debug("Method started at : %s", str(datetime.now())) #create random keyspace while(countkeys): name = str(uuid1()).replace("-","") if name not in self.sysmanager.list_keyspaces(): countkeys = countkeys - 1 self.sysmanager.create_keyspace(name,strategy_options={"replication_factor": "1"}) #create random key families in each key space for keyspace in self.sysmanager.list_keyspaces(): if (keyspace != 'system'): #check to skip the system database countcolfam = numcolfam while(countcolfam): name = str(uuid1()).replace("-","") if name not in self.sysmanager.get_keyspace_column_families(keyspace).keys(): countcolfam = countcolfam - 1 self.sysmanager.create_column_family(keyspace, name) #create random keys in each key column family of each keyspace totalcount=0 for keyspace in self.sysmanager.list_keyspaces(): if (keyspace != 'system'): #check to skip the system database pool = ConnectionPool(keyspace,[self.address]) columnfamilies = self.sysmanager.get_keyspace_column_families(keyspace) for columnfamilyname in columnfamilies.keys(): colfamily = ColumnFamily(pool,columnfamilyname) countnumetries = numentries while(countnumetries): rowkeyname = str(uuid1()).replace("-","") colname = str(uuid1()).replace("-","") colval = str(uuid1()).replace("-","") #col_fam.insert('row_key', {'col_name': 'col_val'}) colfamily.insert(rowkeyname, {colname:colval}) countnumetries = countnumetries - 1 totalcount = totalcount + 1 logging.debug("Method ended at : %s", str(datetime.now())) logging.debug("Total Entries added : %s", str(totalcount)) logging.info("Exiting fakepopulate method")
def test_assertion_threadlocal_failover(self): listener = _TestListener() pool = AssertionPool(keyspace='Keyspace1', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) conn = pool.get() cf = ColumnFamily(conn, 'Standard1') for i in range(1,5): setattr(cf.client._connection.client, 'batch_mutate', _timeout) # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert('key', {'col': 'val'}) assert_equal(listener.failure_count, i) cf.get('key') pool.dispose()
def insert_insert_get_indexed_slices(self): indexed_cf = ColumnFamily(pool, "Indexed1") columns = {"birthdate": 1L} keys = [] for i in range(1, 4): indexed_cf.insert("key%d" % i, columns) keys.append("key%d") expr = index.create_index_expression(column_name="birthdate", value=1L) clause = index.create_index_clause([expr]) count = 0 for key, cols in indexed_cf.get_indexed_slices(clause): assert_equal(cols, columns) assert key in keys count += 1 assert_equal(count, 3)
def test_default_validated_columns(self): sys = SystemManager() sys.create_column_family(TEST_KS, 'DefaultValidator', default_validation_class=LONG_TYPE) sys.alter_column(TEST_KS, 'DefaultValidator', 'subcol', TIME_UUID_TYPE) sys.close() cf = ColumnFamily(pool, 'DefaultValidator') key = 'key1' col_cf = {'aaaaaa': 1L} col_cm = {'subcol': TIME1} col_ncf = {'aaaaaa': TIME1} col_ncm = {'subcol': 1L} # Both of these inserts work, as cf allows # longs and cm for 'subcol' allows TIMEUUIDs. cf.insert(key, col_cf) cf.insert(key, col_cm) assert_equal(cf.get(key), {'aaaaaa': 1L, 'subcol': TIME1})
def insert_insert_get_indexed_slices(self): indexed_cf = ColumnFamily(self.client, 'Indexed1') columns = {'birthdate': 1L} keys = [] for i in range(1,4): indexed_cf.insert('key%d' % i, columns) keys.append('key%d') expr = index.create_index_expression(column_name='birthdate', value=1L) clause = index.create_index_clause([expr]) count = 0 for key,cols in indexed_cf.get_indexed_slices(clause): assert cols == columns assert key in keys count += 1 assert_equal(count, 3)
def exportdata(self,destination=None): ''' Method to export data to files ''' logging.info("inside export data method") #result={} count = 0 for keyspace in self.sysmanager.list_keyspaces(): if (keyspace != 'system'): #check to skip the system database pool = ConnectionPool(keyspace,[self.address]) columnfamilies = self.sysmanager.get_keyspace_column_families(keyspace) #result[keyspace]={} result = {} # iterate through all the column family for columnfamilyname in columnfamilies.keys(): #result[keyspace][columnfamilyname]=[] colfamily = ColumnFamily(pool,columnfamilyname) cols = colfamily.get_range(column_reversed=True) result['keyspace']=keyspace result['columnfamily']= columnfamilyname result['cols']=[] for col in cols: result['cols'].append(col) count = count + 1 # check count if it 10000 then reset it flush result if (count == 100000): filename ="%s.out" % str(uuid1()).replace("-","") filepath = "output/%s" % filename file = open(filepath,"w") pickle.dump(result, file, protocol=0) file.close() #reset cols print sys.getsizeof(result)#in mb result['cols']=[] count = 0 if count > 0: filename ="%s.out" % str(uuid1()).replace("-","") filepath = "output/%s" % filename file = open(filepath,"w") pickle.dump(result, file, protocol=0) file.close()