def test_server_list_func(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool('PycassaTestKeyspace', server_list=_get_list, listeners=[stats_logger], prefill=False) assert_equal(stats_logger.serv_list, ['foo:bar']) assert_equal(stats_logger.stats['list'], 1) pool.dispose()
def test_server_list_func(self): listener = _TestListener() pool = ConnectionPool('PycassaTestKeyspace', server_list=_get_list, listeners=[listener], prefill=False) assert_equal(listener.serv_list, ['foo:bar']) assert_equal(listener.list_count, 1) pool.dispose()
def test_queue_failure_on_retry(self): listener = _TestListener() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) def raiser(): raise IOError # Replace wrapper will open a connection to get the version, so if it # fails we need to retry as with any other connection failure pool._replace_wrapper = raiser # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Standard1') assert_raises(MaximumRetryException, cf.insert, 'key', {'col':'val', 'col2': 'val'}) assert_equal(listener.failure_count, 4) # On the 4th failure, didn't retry pool.dispose()
def test_queue_threadlocal_retry_limit(self): listener = _TestListener() pool = ConnectionPool( pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Standard1') assert_raises(MaximumRetryException, cf.insert, 'key', { 'col': 'val', 'col2': 'val' }) assert_equal(listener.failure_count, 4) # On the 4th failure, didn't retry pool.dispose()
def test_queue_threadlocal_failover(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=True, timeout=0.05, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(1, 5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'}) assert_equal(stats_logger.stats['failed'], i) assert_equal(cf.get('key'), {'col': 'val%d' % i, 'col2': 'val'}) pool.dispose() stats_logger.reset() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, timeout=0.05, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() threads = [] args = ('key', {'col': 'val', 'col2': 'val'}) for i in range(5): threads.append(threading.Thread(target=cf.insert, args=args)) threads[-1].start() for thread in threads: thread.join() assert_equal(stats_logger.stats['failed'], 5) pool.dispose()
def test_failure_connection_info(self): stats_logger = StatsLoggerRequestInfo() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=True, max_retries=0, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=True, server_list=['localhost:9160']) cf = ColumnFamily(pool, 'Counter1') # Corrupt the connection conn = pool.get() setattr(conn, 'send_get', conn._fail_once) conn._should_fail = True conn.return_to_pool() assert_raises(MaximumRetryException, cf.get, 'greunt', columns=['col']) assert_true('request' in stats_logger.failure_dict['connection'].info) request = stats_logger.failure_dict['connection'].info['request'] assert_equal(request['method'], 'get') assert_equal(request['args'], ('greunt', ColumnPath('Counter1', None, 'col'), 1)) assert_equal(request['kwargs'], {})
def test_queue_failure_with_no_retries(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool( pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Counter1') assert_raises(MaximumRetryException, cf.insert, 'key', { 'col': 2, 'col2': 2 }) assert_equal(stats_logger.stats['failed'], 1) # didn't retry at all pool.dispose()
def test_queue_failover(self): for prefill in (True, False): listener = _TestListener() pool = ConnectionPool( pool_size=1, max_overflow=0, recycle=10000, prefill=prefill, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(1, 5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'}) assert_equal(listener.failure_count, i) assert_equal(cf.get('key'), { 'col': 'val%d' % i, 'col2': 'val' }) pool.dispose()
def create_cfs(self): """ Creates the Cassandra Column Families (if not exist) """ sys_mgr = None pool = None try: sys_mgr = SystemManager() pool = ConnectionPool(settings.KEYSPACE, server_list=settings.CASSANDRA_HOSTS) for cf_name in [ CF_LOGS, CF_LOGS_BY_APP, CF_LOGS_BY_HOST, CF_LOGS_BY_SEVERITY ]: try: cf = ColumnFamily(pool, cf_name) except: logger.info("create_cfs(): Creating column family %s", cf_name) sys_mgr.create_column_family( settings.KEYSPACE, cf_name, comparator_type=TimeUUIDType()) cf = ColumnFamily(pool, cf_name) cf.get_count(str(uuid.uuid4())) finally: if pool: pool.dispose() if sys_mgr: sys_mgr.close()
def test_queue_pool_recycle(self): listener = _TestListener() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=1, prefill=True, pool_timeout=0.5, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False) cf = ColumnFamily(pool, 'Standard1') columns = {'col1': 'val', 'col2': 'val'} for i in range(10): cf.insert('key', columns) assert_equal(listener.recycle_count, 5) pool.dispose() listener.reset() # Try with threadlocal=True pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=1, prefill=False, pool_timeout=0.5, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True) cf = ColumnFamily(pool, 'Standard1') for i in range(10): cf.insert('key', columns) pool.dispose() assert_equal(listener.recycle_count, 5)
class TestDefaultValidators(unittest.TestCase): def setUp(self): credentials = {"username": "******", "password": "******"} self.pool = ConnectionPool(pool_size=5, keyspace="Keyspace1", credentials=credentials) self.cf_def_valid = ColumnFamily(self.pool, "DefaultValidator") def tearDown(self): for key, cols in self.cf_def_valid.get_range(): self.cf_def_valid.remove(key) self.pool.dispose() def test_default_validated_columns(self): key = "key1" col_cf = {"aaaaaa": 1L} col_cm = {"subcol": TIME1} col_ncf = {"aaaaaa": TIME1} col_ncm = {"subcol": 1L} # Both of these inserts work, as cf allows # longs and cm for 'subcol' allows TIMEUUIDs. self.cf_def_valid.insert(key, col_cf) self.cf_def_valid.insert(key, col_cm) assert self.cf_def_valid.get(key) == {"aaaaaa": 1L, "subcol": TIME1} assert_raises(TypeError, self.cf_def_valid.insert, key, col_ncf) assert_raises(TypeError, self.cf_def_valid.insert, key, col_ncm)
def test_queue_pool(self): listener = _TestListener() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, pool_timeout=0.1, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False) conns = [] for i in range(10): conns.append(pool.get()) assert_equal(listener.connect_count, 10) assert_equal(listener.checkout_count, 10) # Pool is maxed out now assert_raises(NoConnectionAvailable, pool.get) assert_equal(listener.connect_count, 10) assert_equal(listener.max_count, 1) for i in range(0, 5): pool.return_conn(conns[i]) assert_equal(listener.close_count, 0) assert_equal(listener.checkin_count, 5) for i in range(5, 10): pool.return_conn(conns[i]) assert_equal(listener.close_count, 5) assert_equal(listener.checkin_count, 10) conns = [] # These connections should come from the pool for i in range(5): conns.append(pool.get()) assert_equal(listener.connect_count, 10) assert_equal(listener.checkout_count, 15) # But these will need to be made for i in range(5): conns.append(pool.get()) assert_equal(listener.connect_count, 15) assert_equal(listener.checkout_count, 20) assert_equal(listener.close_count, 5) for i in range(10): conns[i].return_to_pool() assert_equal(listener.checkin_count, 20) assert_equal(listener.close_count, 10) assert_raises(InvalidRequestError, conns[0].return_to_pool) assert_equal(listener.checkin_count, 20) assert_equal(listener.close_count, 10) print "in test:", id(conns[-1]) assert_raises(InvalidRequestError, conns[-1].return_to_pool) assert_equal(listener.checkin_count, 20) assert_equal(listener.close_count, 10) pool.dispose() assert_equal(listener.dispose_count, 1)
class TestValidators(unittest.TestCase): def setUp(self): credentials = {"username": "******", "password": "******"} self.pool = ConnectionPool(pool_size=10, keyspace="Keyspace1", credentials=credentials) self.cf_valid_long = ColumnFamily(self.pool, "ValidatorLong") self.cf_valid_int = ColumnFamily(self.pool, "ValidatorInt") self.cf_valid_time = ColumnFamily(self.pool, "ValidatorTime") self.cf_valid_lex = ColumnFamily(self.pool, "ValidatorLex") self.cf_valid_ascii = ColumnFamily(self.pool, "ValidatorAscii") self.cf_valid_utf8 = ColumnFamily(self.pool, "ValidatorUTF8") self.cf_valid_bytes = ColumnFamily(self.pool, "ValidatorBytes") self.cfs = [ self.cf_valid_long, self.cf_valid_int, self.cf_valid_time, self.cf_valid_lex, self.cf_valid_ascii, self.cf_valid_utf8, self.cf_valid_bytes, ] def tearDown(self): for cf in self.cfs: for key, cols in cf.get_range(): cf.remove(key) self.pool.dispose() def test_validated_columns(self): key = "key1" col = {"subcol": 1L} self.cf_valid_long.insert(key, col) assert self.cf_valid_long.get(key) == col col = {"subcol": 1} self.cf_valid_int.insert(key, col) assert self.cf_valid_int.get(key) == col col = {"subcol": TIME1} self.cf_valid_time.insert(key, col) assert self.cf_valid_time.get(key) == col col = {"subcol": uuid.UUID(bytes="aaa aaa aaa aaaa")} self.cf_valid_lex.insert(key, col) assert self.cf_valid_lex.get(key) == col col = {"subcol": "aaa"} self.cf_valid_ascii.insert(key, col) assert self.cf_valid_ascii.get(key) == col col = {"subcol": u"a\u0020"} self.cf_valid_utf8.insert(key, col) assert self.cf_valid_utf8.get(key) == col col = {"subcol": "aaa"} self.cf_valid_bytes.insert(key, col) assert self.cf_valid_bytes.get(key) == col
def test_queue_pool(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, pool_timeout=0.1, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False) conns = [] for i in range(10): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['checked_out'], 10) # Pool is maxed out now assert_raises(NoConnectionAvailable, pool.get) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['at_max'], 1) for i in range(0, 5): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['disposed']['success'], 0) assert_equal(stats_logger.stats['checked_in'], 5) for i in range(5, 10): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['disposed']['success'], 5) assert_equal(stats_logger.stats['checked_in'], 10) conns = [] # These connections should come from the pool for i in range(5): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['checked_out'], 15) # But these will need to be made for i in range(5): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 15) assert_equal(stats_logger.stats['checked_out'], 20) assert_equal(stats_logger.stats['disposed']['success'], 5) for i in range(10): conns[i].return_to_pool() assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) assert_raises(InvalidRequestError, conns[0].return_to_pool) assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) print("in test:", id(conns[-1])) conns[-1].return_to_pool() assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) pool.dispose()
def test_pool_connection_failure(self): stats_logger = StatsLoggerWithListStorage() def get_extra(): """Make failure count adjustments based on whether or not the permuted list starts with a good host:port""" if stats_logger.serv_list[0] == 'localhost:9160': return 0 else: return 1 pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, keyspace='PycassaTestKeyspace', credentials=_credentials, pool_timeout=0.01, timeout=0.05, listeners=[stats_logger], use_threadlocal=False, server_list=['localhost:9160', 'foobar:1']) assert_equal(stats_logger.stats['failed'], 4 + get_extra()) for i in range(0, 7): pool.get() assert_equal(stats_logger.stats['failed'], 6 + get_extra()) pool.dispose() stats_logger.reset() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, keyspace='PycassaTestKeyspace', credentials=_credentials, pool_timeout=0.01, timeout=0.05, listeners=[stats_logger], use_threadlocal=True, server_list=['localhost:9160', 'foobar:1']) assert_equal(stats_logger.stats['failed'], 4 + get_extra()) threads = [] for i in range(0, 7): threads.append(threading.Thread(target=pool.get)) threads[-1].start() for thread in threads: thread.join() assert_equal(stats_logger.stats['failed'], 6 + get_extra()) pool.dispose()
def test_pool_invalid_request(self): listener = _TestListener() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=True, max_retries=3, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160']) cf = ColumnFamily(pool, 'Standard1') # Make sure the pool doesn't hide and retries invalid requests assert_raises(InvalidRequestException, cf.add, 'key', 'col') assert_raises(NotFoundException, cf.get, 'none') pool.dispose()
def __call__(self, uuids=None, cassandra_servers=None, force=False, **kwargs): super(CheckBadRefs, self).__call__(**kwargs) self.force = force pool = ConnectionPool('config_db_uuid', server_list=cassandra_servers) uuid_cf = ColumnFamily(pool, 'obj_uuid_table') if uuids: def uuids_g(): for uuid in uuids: yield uuid else: def uuids_g(): for k, v in uuid_cf.get_range(column_count=1, filter_empty=True): yield k for uuid in uuids_g(): values = dict(uuid_cf.xget(uuid)) res = self._get_current_resource(uuid, values) bad_refs = self._check_resource_refs(uuid, values) if not res or bad_refs: printo(self._props_to_json(values)) if not res and not self.check: if self.force or continue_prompt(message="Delete ?"): self._delete(uuid_cf, uuid)
def setup_module(): global pool, cf, scf credentials = {'username': '******', 'password': '******'} pool = ConnectionPool(keyspace='PycassaTestKeyspace', credentials=credentials) cf = ColumnFamily(pool, 'Standard1') scf = ColumnFamily(pool, 'Super1')
def _get_connection(retry=None, wait_between_retry=None): """ Creates a connection to Cassandra. Returs: pool """ num = 0 if retry is None: retry = settings.CASSANDRA_CONNECT_RETRY_COUNT if wait_between_retry is None: wait_between_retry = settings.CASSANDRA_CONNECT_RETRY_WAIT while True: try: pool = ConnectionPool( settings.KEYSPACE, server_list=settings.CASSANDRA_HOSTS, timeout=settings.CASSANDRA_CONNECTION_POOL_TIMEOUT) return pool except AllServersUnavailable: num += 1 if num >= retry: logger.exception("Giving up after many retries....") raise logger.warn( "AllServersUnavailable detected. Retrying (%d of %d)...", num, retry) time.sleep(wait_between_retry)
def setup_module(): global pool, sys_man credentials = {'username': '******', 'password': '******'} pool = ConnectionPool(keyspace='PycassaTestKeyspace', credentials=credentials, timeout=1.0) sys_man = SystemManager()
def test_basic_pools(self): pool = ConnectionPool('PycassaTestKeyspace', credentials=_credentials) pool.dispose() pool = pool.recreate() cf = ColumnFamily(pool, 'Standard1') cf.insert('key1', {'col':'val'}) pool.status() pool.dispose()
def setup_module(): global pool, cf, scf, indexed_cf, sys_man credentials = {'username': '******', 'password': '******'} pool = ConnectionPool(keyspace='PycassaTestKeyspace', credentials=credentials) cf = ColumnFamily(pool, 'Standard1', dict_class=TestDict) scf = ColumnFamily(pool, 'Super1', dict_class=dict) indexed_cf = ColumnFamily(pool, 'Indexed1') sys_man = SystemManager()
def test_pool_invalid_request(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=True, max_retries=3, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False, server_list=['localhost:9160']) cf = ColumnFamily(pool, 'Standard1') # Make sure the pool doesn't hide and retries invalid requests assert_raises(InvalidRequestException, cf.add, 'key', 'col') assert_raises(NotFoundException, cf.get, 'none') pool.dispose()
def set_keyspace(self, keyspace): try: self.pool = ConnectionPool(keyspace, server_list=[self.manager._conn.server], credentials=self.credentials, timeout=self.timeout) except InvalidRequestException: raise CCliClientKeyspaceError("Unknown keyspace '%s'" % keyspace)
def setup_module(): global pool, cf, scf, counter_cf, super_counter_cf, sysman credentials = {'username': '******', 'password': '******'} pool = ConnectionPool(keyspace='PycassaTestKeyspace', credentials=credentials) cf = ColumnFamily(pool, 'Standard1') scf = ColumnFamily(pool, 'Super1') sysman = SystemManager() counter_cf = ColumnFamily(pool, 'Counter1') super_counter_cf = ColumnFamily(pool, 'SuperCounter1')
def test_queue_threadlocal_retry_limit(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Standard1') assert_raises(MaximumRetryException, cf.insert, 'key', {'col': 'val', 'col2': 'val'}) assert_equal(stats_logger.stats['failed'], 4) # On the 4th failure, didn't retry pool.dispose()
def test_queue_failure_with_no_retries(self): listener = _TestListener() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Counter1') assert_raises(MaximumRetryException, cf.insert, 'key', {'col': 2, 'col2': 2}) assert_equal(listener.failure_count, 1) # didn't retry at all pool.dispose()
def test_failure_connection_info(self): listener = _TestListenerRequestInfo() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=True, max_retries=0, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160']) cf = ColumnFamily(pool, 'Counter1') # Corrupt the connection conn = pool.get() setattr(conn, 'send_get', conn._fail_once) conn._should_fail = True conn.return_to_pool() assert_raises(MaximumRetryException, cf.get, 'greunt', columns=['col']) assert_true('request' in listener.failure_dict['connection'].info) request = listener.failure_dict['connection'].info['request'] assert_equal(request['method'], 'get') assert_equal(request['args'], ('greunt', ColumnPath('Counter1', None, 'col'), 1)) assert_equal(request['kwargs'], {})
def test_queue_threadlocal_failover(self): listener = _TestListener() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=True, timeout=0.05, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(1,5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'}) assert_equal(listener.failure_count, i) assert_equal(cf.get('key'), {'col': 'val%d' % i, 'col2': 'val'}) pool.dispose() listener.reset() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, timeout=0.05, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() threads = [] args=('key', {'col': 'val', 'col2': 'val'}) for i in range(5): threads.append(threading.Thread(target=cf.insert, args=args)) threads[-1].start() for thread in threads: thread.join() assert_equal(listener.failure_count, 5) pool.dispose()
def create_cfs(self): """ Creates the Cassandra Column Families (if not exist) """ sys_mgr = None pool = None try: sys_mgr = SystemManager() pool = ConnectionPool(settings.KEYSPACE, server_list=settings.CASSANDRA_HOSTS) for cf_name in [CF_LOGS, CF_LOGS_BY_APP, CF_LOGS_BY_HOST, CF_LOGS_BY_SEVERITY]: try: cf = ColumnFamily(pool, cf_name) except: logger.info("create_cfs(): Creating column family %s", cf_name) sys_mgr.create_column_family(settings.KEYSPACE, cf_name, comparator_type=TimeUUIDType()) cf = ColumnFamily(pool, cf_name) cf.get_count(str(uuid.uuid4())) finally: if pool: pool.dispose() if sys_mgr: sys_mgr.close()
def setup_module(): global pool, cf, scf, indexed_cf, counter_cf, counter_scf, sys_man, have_counters credentials = {'username': '******', 'password': '******'} pool = ConnectionPool(keyspace='PycassaTestKeyspace', credentials=credentials) cf = ColumnFamily(pool, 'Standard1', dict_class=TestDict) scf = ColumnFamily(pool, 'Super1', dict_class=dict) indexed_cf = ColumnFamily(pool, 'Indexed1') sys_man = SystemManager() have_counters = sys_man._conn.version != CASSANDRA_07 if have_counters: counter_cf = ColumnFamily(pool, 'Counter1') counter_scf = ColumnFamily(pool, 'SuperCounter1')
def setup_module(): global pool, cf, indexed_cf, pool_stub, indexed_cf_stub, cf_stub credentials = {'username': '******', 'password': '******'} pool = ConnectionPool(keyspace='PycassaTestKeyspace', credentials=credentials, timeout=1.0) cf = ColumnFamily(pool, 'Standard1', dict_class=TestDict) indexed_cf = ColumnFamily(pool, 'Indexed1') pool_stub = ConnectionPoolStub(keyspace='PycassaTestKeyspace', credentials=credentials, timeout=1.0) cf_stub = ColumnFamilyStub(pool_stub, 'Standard1', dict_class=TestDict) indexed_cf_stub = ColumnFamilyStub(pool_stub, 'Indexed1')
def __call__(self, force=False, parent_type=None, cassandra_servers=None): valid_acl = [] parents = Collection(parent_type, fetch=True, recursive=2) for parent in parents: if 'access_control_lists' in parent.keys(): valid_acl += [ acl['uuid'] for acl in parent['access_control_lists'] ] valid_acl = list(set(valid_acl)) orphaned_acls = set([]) # Due to a bug in contrail API, we cannot list more than 10000 elements # on a resource and there is no way to list ACL by tenant. # So that ugly hack directly fetch all ACL UUIDs from the cassandra database :( pool = ConnectionPool('config_db_uuid', server_list=cassandra_servers) fqname_cf = ColumnFamily(pool, 'obj_fq_name_table') for key, value in fqname_cf.xget('access_control_list'): acl_uuid = decode_string(key).split(':')[-1] if acl_uuid in valid_acl: continue acl = Resource('access-control-list', uuid=acl_uuid, fetch=True) if ('parent_uuid' in acl.keys() and 'parent_type' in acl.keys() and acl['parent_type'] == parent_type and acl.uuid not in valid_acl): try: parent_acl = acl.parent except ResourceNotFound: msg = ("The %s parent ACL %s was not found." % (parent_type.replace('-', ' '), acl['parent_uuid'])) if force: msg = msg + " Delete orphan ACL %s." % acl.uuid acl.delete() logger.debug(msg) orphaned_acls.add(acl['uuid']) else: logger.debug( "The ACL %(acl)s have a %(parent_type)s %(parent_acl)s which exists but \ was not found in the precedent %(parent_type)s list. Not delete it." % { 'acl': acl, 'parent_type': parent_type.replace('-', ' '), 'parent_acl': parent_acl }) if force: logger.debug("%d orphaned ACL were deleted" % len(orphaned_acls)) else: logger.debug("Found %d orphaned ACL to delete" % len(orphaned_acls))
def test_queue_failover(self): for prefill in (True, False): listener = _TestListener() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=prefill, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(1,5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'}) assert_equal(listener.failure_count, i) assert_equal(cf.get('key'), {'col': 'val%d' % i, 'col2': 'val'}) pool.dispose()
def setUp(self): credentials = {"username": "******", "password": "******"} self.pool = ConnectionPool(pool_size=10, keyspace="Keyspace1", credentials=credentials) self.cf = ColumnFamily(self.pool, "Standard2") self.cf_long = ColumnFamily(self.pool, "StdLong") self.cf_int = ColumnFamily(self.pool, "StdInteger") self.cf_time = ColumnFamily(self.pool, "StdTimeUUID") self.cf_lex = ColumnFamily(self.pool, "StdLexicalUUID") self.cf_ascii = ColumnFamily(self.pool, "StdAscii") self.cf_utf8 = ColumnFamily(self.pool, "StdUTF8") self.cf_bytes = ColumnFamily(self.pool, "StdBytes") self.cfs = [self.cf_long, self.cf_int, self.cf_time, self.cf_lex, self.cf_ascii, self.cf_utf8, self.cf_bytes]
def test_pool_connection_failure(self): listener = _TestListener() def get_extra(): """Make failure count adjustments based on whether or not the permuted list starts with a good host:port""" if listener.serv_list[0] == 'localhost:9160': return 0 else: return 1 pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, keyspace='PycassaTestKeyspace', credentials=_credentials, pool_timeout=0.01, timeout=0.05, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'foobar:1']) assert_equal(listener.failure_count, 4 + get_extra()) for i in range(0,7): pool.get() assert_equal(listener.failure_count, 6 + get_extra()) pool.dispose() listener.reset() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, keyspace='PycassaTestKeyspace', credentials=_credentials, pool_timeout=0.01, timeout=0.05, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160', 'foobar:1']) assert_equal(listener.failure_count, 4 + get_extra()) threads = [] for i in range(0, 7): threads.append(threading.Thread(target=pool.get)) threads[-1].start() for thread in threads: thread.join() assert_equal(listener.failure_count, 6 + get_extra()) pool.dispose()
def __init__(self, root, keyspace, server_list, read_consistency_level=ConsistencyLevel.ONE, write_consistency_level=ConsistencyLevel.ONE, localDCName=None, credentials=None, pool_size=5, max_overflow=0, timeout=0.5): self.cassandra_connection = ConnectionPool(keyspace, server_list, credentials=credentials, pool_size=pool_size, max_overflow=max_overflow, timeout=timeout) self.cfCache = ColumnFamilyCache(self.cassandra_connection, read_consistency_level, write_consistency_level, credentials) self.root = root self._nodeCache = NodeCache() self.localDCName = localDCName self.credentials = credentials self.read_consistency_level = read_consistency_level
def setUp(self): credentials = {"username": "******", "password": "******"} self.pool = ConnectionPool(pool_size=10, keyspace="Keyspace1", credentials=credentials) self.cf_suplong_sublong = ColumnFamily(self.pool, "SuperLongSubLong") self.cf_suplong_subint = ColumnFamily(self.pool, "SuperLongSubInt") self.cf_suplong_subtime = ColumnFamily(self.pool, "SuperLongSubTime") self.cf_suplong_sublex = ColumnFamily(self.pool, "SuperLongSubLex") self.cf_suplong_subascii = ColumnFamily(self.pool, "SuperLongSubAscii") self.cf_suplong_subutf8 = ColumnFamily(self.pool, "SuperLongSubUTF8") self.cf_suplong_subbytes = ColumnFamily(self.pool, "SuperLongSubBytes") self.cfs = [ self.cf_suplong_subint, self.cf_suplong_subint, self.cf_suplong_subtime, self.cf_suplong_sublex, self.cf_suplong_subascii, self.cf_suplong_subutf8, self.cf_suplong_subbytes, ]
def setUp(self): credentials = {"username": "******", "password": "******"} self.pool = ConnectionPool(pool_size=10, keyspace="Keyspace1", credentials=credentials) self.cf_valid_long = ColumnFamily(self.pool, "ValidatorLong") self.cf_valid_int = ColumnFamily(self.pool, "ValidatorInt") self.cf_valid_time = ColumnFamily(self.pool, "ValidatorTime") self.cf_valid_lex = ColumnFamily(self.pool, "ValidatorLex") self.cf_valid_ascii = ColumnFamily(self.pool, "ValidatorAscii") self.cf_valid_utf8 = ColumnFamily(self.pool, "ValidatorUTF8") self.cf_valid_bytes = ColumnFamily(self.pool, "ValidatorBytes") self.cfs = [ self.cf_valid_long, self.cf_valid_int, self.cf_valid_time, self.cf_valid_lex, self.cf_valid_ascii, self.cf_valid_utf8, self.cf_valid_bytes, ]
def test_queue_failure_on_retry(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool( pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) def raiser(): raise IOError # Replace wrapper will open a connection to get the version, so if it # fails we need to retry as with any other connection failure pool._replace_wrapper = raiser # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Standard1') assert_raises(MaximumRetryException, cf.insert, 'key', { 'col': 'val', 'col2': 'val' }) assert_equal(stats_logger.stats['failed'], 4) # On the 4th failure, didn't retry pool.dispose()
from pycassa.system_manager import UTF8_TYPE from pycassa.system_manager import ASCII_TYPE import time ############################## Create Keyspace ################################ server_list = ['cassandra1:9160', 'cassandra2:9160', 'cassandra3:9160'] sys = SystemManager(server_list[0]) sys.list_keyspaces() if 'entries' in sys.list_keyspaces(): sys.drop_keyspace('entries') sys.create_keyspace('entries', SIMPLE_STRATEGY, {'replication_factor': '1'}) ############################## Connection Pooling ############################ #pool = ConnectionPool('entries', server_list=server_list, pool_size=20) pool = ConnectionPool('entries', server_list=server_list) ############################## Create Column Family ########################### sys.create_column_family('entries', 'Author', comparator_type=UTF8_TYPE) author_cf = ColumnFamily(pool, 'Author') ################################ INSERT ####################################### # Insert a row with a Column author_cf.insert('sacharya', {'first_name': 'Sudarshan'}) # Insert a row with multiple columns author_cf.insert('sacharya1', {'first_name': 'Sudarshan', 'last_name': 'Acharya'}) # Insert multiple rows author_cf.batch_insert({'rowkey1': {'first_name': 'Sudarshan', 'last_name': 'Acharya'}, 'rowkey2': {'first_name': 'Sudarshan', 'last_name': 'Acharya'}})
def test_basic_pools(self): pool = ConnectionPool('PycassaTestKeyspace', credentials=_credentials) cf = ColumnFamily(pool, 'Standard1') cf.insert('key1', {'col':'val'}) pool.dispose()
class TestStandardCFs(unittest.TestCase): def setUp(self): credentials = {"username": "******", "password": "******"} self.pool = ConnectionPool(pool_size=10, keyspace="Keyspace1", credentials=credentials) self.cf = ColumnFamily(self.pool, "Standard2") self.cf_long = ColumnFamily(self.pool, "StdLong") self.cf_int = ColumnFamily(self.pool, "StdInteger") self.cf_time = ColumnFamily(self.pool, "StdTimeUUID") self.cf_lex = ColumnFamily(self.pool, "StdLexicalUUID") self.cf_ascii = ColumnFamily(self.pool, "StdAscii") self.cf_utf8 = ColumnFamily(self.pool, "StdUTF8") self.cf_bytes = ColumnFamily(self.pool, "StdBytes") self.cfs = [self.cf_long, self.cf_int, self.cf_time, self.cf_lex, self.cf_ascii, self.cf_utf8, self.cf_bytes] def tearDown(self): for cf in self.cfs: for key, cols in cf.get_range(): cf.remove(key) self.pool.dispose() def make_group(self, cf, cols): diction = {cols[0]: VALS[0], cols[1]: VALS[1], cols[2]: VALS[2]} return {"cf": cf, "cols": cols, "dict": diction} def test_standard_column_family(self): # For each data type, create a group that includes its column family, # a set of column names, and a dictionary that maps from the column # names to values. type_groups = [] long_cols = [1111111111111111L, 2222222222222222L, 3333333333333333L] type_groups.append(self.make_group(self.cf_long, long_cols)) int_cols = [1, 2, 3] type_groups.append(self.make_group(self.cf_int, int_cols)) time_cols = [TIME1, TIME2, TIME3] type_groups.append(self.make_group(self.cf_time, time_cols)) lex_cols = [ uuid.UUID(bytes="aaa aaa aaa aaaa"), uuid.UUID(bytes="bbb bbb bbb bbbb"), uuid.UUID(bytes="ccc ccc ccc cccc"), ] type_groups.append(self.make_group(self.cf_lex, lex_cols)) ascii_cols = ["aaaa", "bbbb", "cccc"] type_groups.append(self.make_group(self.cf_ascii, ascii_cols)) utf8_cols = [u"a\u0020", u"b\u0020", u"c\u0020"] type_groups.append(self.make_group(self.cf_utf8, utf8_cols)) bytes_cols = ["aaaa", "bbbb", "cccc"] type_groups.append(self.make_group(self.cf_bytes, bytes_cols)) # Begin the actual inserting and getting for group in type_groups: cf = group.get("cf") gdict = group.get("dict") gcols = group.get("cols") cf.insert(KEYS[0], gdict) assert_equal(cf.get(KEYS[0]), gdict) # Check each column individually for i in range(3): assert_equal(cf.get(KEYS[0], columns=[gcols[i]]), {gcols[i]: VALS[i]}) # Check that if we list all columns, we get the full dict assert_equal(cf.get(KEYS[0], columns=gcols[:]), gdict) # The same thing with a start and end instead assert_equal(cf.get(KEYS[0], column_start=gcols[0], column_finish=gcols[2]), gdict) # A start and end that are the same assert_equal(cf.get(KEYS[0], column_start=gcols[0], column_finish=gcols[0]), {gcols[0]: VALS[0]}) assert_equal(cf.get_count(KEYS[0]), 3) # Test removing rows cf.remove(KEYS[0], columns=gcols[:1]) assert_equal(cf.get_count(KEYS[0]), 2) cf.remove(KEYS[0], columns=gcols[1:]) assert_equal(cf.get_count(KEYS[0]), 0) # Insert more than one row now cf.insert(KEYS[0], gdict) cf.insert(KEYS[1], gdict) cf.insert(KEYS[2], gdict) ### multiget() tests ### res = cf.multiget(KEYS[:]) for i in range(3): assert_equal(res.get(KEYS[i]), gdict) res = cf.multiget(KEYS[2:]) assert_equal(res.get(KEYS[2]), gdict) # Check each column individually for i in range(3): res = cf.multiget(KEYS[:], columns=[gcols[i]]) for j in range(3): assert_equal(res.get(KEYS[j]), {gcols[i]: VALS[i]}) # Check that if we list all columns, we get the full dict res = cf.multiget(KEYS[:], columns=gcols[:]) for j in range(3): assert_equal(res.get(KEYS[j]), gdict) # The same thing with a start and end instead res = cf.multiget(KEYS[:], column_start=gcols[0], column_finish=gcols[2]) for j in range(3): assert_equal(res.get(KEYS[j]), gdict) # A start and end that are the same res = cf.multiget(KEYS[:], column_start=gcols[0], column_finish=gcols[0]) for j in range(3): assert_equal(res.get(KEYS[j]), {gcols[0]: VALS[0]}) ### get_range() tests ### res = cf.get_range(start=KEYS[0]) for sub_res in res: assert_equal(sub_res[1], gdict) res = cf.get_range(start=KEYS[0], column_start=gcols[0], column_finish=gcols[2]) for sub_res in res: assert_equal(sub_res[1], gdict) res = cf.get_range(start=KEYS[0], columns=gcols[:]) for sub_res in res: assert_equal(sub_res[1], gdict)
def tearDown(self): pool = ConnectionPool('PycassaTestKeyspace') cf = ColumnFamily(pool, 'Standard1') for key, cols in cf.get_range(): cf.remove(key)
def test_queue_pool_threadlocal(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, pool_timeout=0.01, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=True) conns = [] assert_equal(stats_logger.stats['created']['success'], 5) # These connections should all be the same for i in range(10): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 5) assert_equal(stats_logger.stats['checked_out'], 1) for i in range(0, 5): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['checked_in'], 1) for i in range(5, 10): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['checked_in'], 1) conns = [] assert_equal(stats_logger.stats['created']['success'], 5) # A single connection should come from the pool for i in range(5): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 5) assert_equal(stats_logger.stats['checked_out'], 2) for conn in conns: pool.return_conn(conn) conns = [] threads = [] stats_logger.reset() def checkout_return(): conn = pool.get() time.sleep(1) pool.return_conn(conn) for i in range(5): threads.append(threading.Thread(target=checkout_return)) threads[-1].start() for thread in threads: thread.join() assert_equal(stats_logger.stats['created']['success'], 0) # Still 5 connections in pool assert_equal(stats_logger.stats['checked_out'], 5) assert_equal(stats_logger.stats['checked_in'], 5) # These should come from the pool threads = [] for i in range(5): threads.append(threading.Thread(target=checkout_return)) threads[-1].start() for thread in threads: thread.join() assert_equal(stats_logger.stats['created']['success'], 0) assert_equal(stats_logger.stats['checked_out'], 10) assert_equal(stats_logger.stats['checked_in'], 10) pool.dispose()
def setUp(self): credentials = {"username": "******", "password": "******"} self.pool = ConnectionPool(pool_size=5, keyspace="Keyspace1", credentials=credentials) self.cf_def_valid = ColumnFamily(self.pool, "DefaultValidator")
def create_cfs(self): """ Creates the Cassandra Column Families (if not exist) """ sys_mgr = None pool = None try: sys_mgr = SystemManager() pool = ConnectionPool(settings.KEYSPACE, server_list=settings.CASSANDRA_HOSTS) try: cf = ColumnFamily(pool, CF_LOGS) except: logger.info("create_cfs(): Creating column family %s", CF_LOGS) #======================================== # Column key -> CompositeType #======================================== # 1. UUID + Timestamp # 2. Host / Origin # 3. Application # 4. Severiry comparator = CompositeType( TimeUUIDType(), UTF8Type(), UTF8Type(), UTF8Type() ) sys_mgr.create_column_family(settings.KEYSPACE, CF_LOGS, comparator_type=comparator) cf = ColumnFamily(pool, CF_LOGS) # cf.get_count(str(uuid.uuid4())) try: cf = ColumnFamily(pool, CF_METADATA) except: logger.info("create_cfs(): Creating column family %s", CF_METADATA) sys_mgr.create_column_family(settings.KEYSPACE, CF_METADATA, comparator_type=UTF8Type()) cf = ColumnFamily(pool, CF_METADATA) cf.get_count(str(uuid.uuid4())) try: cf = ColumnFamily(pool, CF_TIMESTAMP_BITMAP) except: logger.info("create_cfs(): Creating column family %s", CF_TIMESTAMP_BITMAP) sys_mgr.create_column_family(settings.KEYSPACE, CF_TIMESTAMP_BITMAP, comparator_type=IntegerType()) cf = ColumnFamily(pool, CF_TIMESTAMP_BITMAP) try: cf = ColumnFamily(pool, CF_MULTI_MESSAGELOGS) except: logger.info("create_cfs(): Creating column family %s", CF_MULTI_MESSAGELOGS) sys_mgr.create_column_family(settings.KEYSPACE, CF_MULTI_MESSAGELOGS, comparator_type=UTF8Type()) cf = ColumnFamily(pool, CF_MULTI_MESSAGELOGS) sys_mgr.create_index(settings.KEYSPACE, CF_MULTI_MESSAGELOGS, 'meta:host', UTF8_TYPE, index_name='multimsg_host_index') sys_mgr.create_index(settings.KEYSPACE, CF_MULTI_MESSAGELOGS, 'meta:application', UTF8_TYPE, index_name='multimsg_application_index') sys_mgr.create_index(settings.KEYSPACE, CF_MULTI_MESSAGELOGS, 'meta:status', UTF8_TYPE, index_name='multimsg_finish_status_index') finally: if pool: pool.dispose() if sys_mgr: sys_mgr.close()
class TestTimeUUIDs(unittest.TestCase): def setUp(self): credentials = {"username": "******", "password": "******"} self.pool = ConnectionPool(pool_size=5, keyspace="Keyspace1", credentials=credentials) self.cf_time = ColumnFamily(self.pool, "StdTimeUUID") def tearDown(self): for key, cols in self.cf_time.get_range(): self.cf_time.remove(key) self.pool.dispose() def test_datetime_to_uuid(self): key = "key1" timeline = [] timeline.append(datetime.now()) time1 = uuid1() col1 = {time1: "0"} self.cf_time.insert(key, col1) time.sleep(1) timeline.append(datetime.now()) time2 = uuid1() col2 = {time2: "1"} self.cf_time.insert(key, col2) time.sleep(1) timeline.append(datetime.now()) cols = {time1: "0", time2: "1"} assert_equal(self.cf_time.get(key, column_start=timeline[0]), cols) assert_equal(self.cf_time.get(key, column_finish=timeline[2]), cols) assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[2]), cols) assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[2]), cols) assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[1]), col1) assert_equal(self.cf_time.get(key, column_start=timeline[1], column_finish=timeline[2]), col2) def test_time_to_uuid(self): key = "key1" timeline = [] timeline.append(time.time()) time1 = uuid1() col1 = {time1: "0"} self.cf_time.insert(key, col1) time.sleep(0.1) timeline.append(time.time()) time2 = uuid1() col2 = {time2: "1"} self.cf_time.insert(key, col2) time.sleep(0.1) timeline.append(time.time()) cols = {time1: "0", time2: "1"} assert_equal(self.cf_time.get(key, column_start=timeline[0]), cols) assert_equal(self.cf_time.get(key, column_finish=timeline[2]), cols) assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[2]), cols) assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[2]), cols) assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[1]), col1) assert_equal(self.cf_time.get(key, column_start=timeline[1], column_finish=timeline[2]), col2) def test_auto_time_to_uuid1(self): key = "key" t = time.time() col = {t: "foo"} self.cf_time.insert(key, col) uuid_res = self.cf_time.get(key).keys()[0] timestamp = convert_uuid_to_time(uuid_res) assert_almost_equal(timestamp, t, places=3)
def test_queue_pool(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, pool_timeout=0.1, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False) conns = [] for i in range(10): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['checked_out'], 10) # Pool is maxed out now assert_raises(NoConnectionAvailable, pool.get) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['at_max'], 1) for i in range(0, 5): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['disposed']['success'], 0) assert_equal(stats_logger.stats['checked_in'], 5) for i in range(5, 10): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['disposed']['success'], 5) assert_equal(stats_logger.stats['checked_in'], 10) conns = [] # These connections should come from the pool for i in range(5): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['checked_out'], 15) # But these will need to be made for i in range(5): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 15) assert_equal(stats_logger.stats['checked_out'], 20) assert_equal(stats_logger.stats['disposed']['success'], 5) for i in range(10): conns[i].return_to_pool() assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) assert_raises(InvalidRequestError, conns[0].return_to_pool) assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) print "in test:", id(conns[-1]) assert_raises(InvalidRequestError, conns[-1].return_to_pool) assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) pool.dispose()
def setUp(self): credentials = {"username": "******", "password": "******"} self.pool = ConnectionPool(pool_size=5, keyspace="Keyspace1", credentials=credentials) self.cf_time = ColumnFamily(self.pool, "StdTimeUUID")
class TestSuperSubCFs(unittest.TestCase): def setUp(self): credentials = {"username": "******", "password": "******"} self.pool = ConnectionPool(pool_size=10, keyspace="Keyspace1", credentials=credentials) self.cf_suplong_sublong = ColumnFamily(self.pool, "SuperLongSubLong") self.cf_suplong_subint = ColumnFamily(self.pool, "SuperLongSubInt") self.cf_suplong_subtime = ColumnFamily(self.pool, "SuperLongSubTime") self.cf_suplong_sublex = ColumnFamily(self.pool, "SuperLongSubLex") self.cf_suplong_subascii = ColumnFamily(self.pool, "SuperLongSubAscii") self.cf_suplong_subutf8 = ColumnFamily(self.pool, "SuperLongSubUTF8") self.cf_suplong_subbytes = ColumnFamily(self.pool, "SuperLongSubBytes") self.cfs = [ self.cf_suplong_subint, self.cf_suplong_subint, self.cf_suplong_subtime, self.cf_suplong_sublex, self.cf_suplong_subascii, self.cf_suplong_subutf8, self.cf_suplong_subbytes, ] def tearDown(self): for cf in self.cfs: for key, cols in cf.get_range(): cf.remove(key) self.pool.dispose() def make_sub_group(self, cf, cols): diction = {123L: {cols[0]: VALS[0], cols[1]: VALS[1], cols[2]: VALS[2]}} return {"cf": cf, "cols": cols, "dict": diction} def test_super_column_family_subs(self): # For each data type, create a group that includes its column family, # a set of column names, and a dictionary that maps from the column # names to values. type_groups = [] long_cols = [1111111111111111L, 2222222222222222L, 3333333333333333L] type_groups.append(self.make_sub_group(self.cf_suplong_sublong, long_cols)) int_cols = [1, 2, 3] type_groups.append(self.make_sub_group(self.cf_suplong_subint, int_cols)) time_cols = [TIME1, TIME2, TIME3] type_groups.append(self.make_sub_group(self.cf_suplong_subtime, time_cols)) lex_cols = [ uuid.UUID(bytes="aaa aaa aaa aaaa"), uuid.UUID(bytes="bbb bbb bbb bbbb"), uuid.UUID(bytes="ccc ccc ccc cccc"), ] type_groups.append(self.make_sub_group(self.cf_suplong_sublex, lex_cols)) ascii_cols = ["aaaa", "bbbb", "cccc"] type_groups.append(self.make_sub_group(self.cf_suplong_subascii, ascii_cols)) utf8_cols = [u"a\u0020", u"b\u0020", u"c\u0020"] type_groups.append(self.make_sub_group(self.cf_suplong_subutf8, utf8_cols)) bytes_cols = ["aaaa", "bbbb", "cccc"] type_groups.append(self.make_sub_group(self.cf_suplong_subbytes, bytes_cols)) # Begin the actual inserting and getting for group in type_groups: cf = group.get("cf") gdict = group.get("dict") cf.insert(KEYS[0], gdict) assert_equal(cf.get(KEYS[0]), gdict) assert_equal(cf.get(KEYS[0], columns=[123L]), gdict) # A start and end that are the same assert_equal(cf.get(KEYS[0], column_start=123L, column_finish=123L), gdict) assert_equal(cf.get_count(KEYS[0]), 1) # Test removing rows cf.remove(KEYS[0], super_column=123L) assert_equal(cf.get_count(KEYS[0]), 0) # Insert more than one row now cf.insert(KEYS[0], gdict) cf.insert(KEYS[1], gdict) cf.insert(KEYS[2], gdict) ### multiget() tests ### res = cf.multiget(KEYS[:]) for i in range(3): assert_equal(res.get(KEYS[i]), gdict) res = cf.multiget(KEYS[2:]) assert_equal(res.get(KEYS[2]), gdict) res = cf.multiget(KEYS[:], columns=[123L]) for i in range(3): assert_equal(res.get(KEYS[i]), gdict) res = cf.multiget(KEYS[:], super_column=123L) for i in range(3): assert_equal(res.get(KEYS[i]), gdict.get(123L)) res = cf.multiget(KEYS[:], column_start=123L, column_finish=123L) for j in range(3): assert_equal(res.get(KEYS[j]), gdict) ### get_range() tests ### res = cf.get_range(start=KEYS[0]) for sub_res in res: assert_equal(sub_res[1], gdict) res = cf.get_range(start=KEYS[0], column_start=123L, column_finish=123L) for sub_res in res: assert_equal(sub_res[1], gdict) res = cf.get_range(start=KEYS[0], columns=[123L]) for sub_res in res: assert_equal(sub_res[1], gdict) res = cf.get_range(start=KEYS[0], super_column=123L) for sub_res in res: assert_equal(sub_res[1], gdict.get(123L))
def test_queue_pool_threadlocal(self): listener = _TestListener() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, pool_timeout=0.01, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True) conns = [] assert_equal(listener.connect_count, 5) # These connections should all be the same for i in range(10): conns.append(pool.get()) assert_equal(listener.connect_count, 5) assert_equal(listener.checkout_count, 1) for i in range(0, 5): pool.return_conn(conns[i]) assert_equal(listener.checkin_count, 1) for i in range(5, 10): pool.return_conn(conns[i]) assert_equal(listener.checkin_count, 1) conns = [] assert_equal(listener.connect_count, 5) # A single connection should come from the pool for i in range(5): conns.append(pool.get()) assert_equal(listener.connect_count, 5) assert_equal(listener.checkout_count, 2) for conn in conns: pool.return_conn(conn) conns = [] threads = [] listener.reset() def checkout_return(): conn = pool.get() time.sleep(1) pool.return_conn(conn) for i in range(5): threads.append(threading.Thread(target=checkout_return)) threads[-1].start() for thread in threads: thread.join() assert_equal(listener.connect_count, 0) # Still 5 connections in pool assert_equal(listener.checkout_count, 5) assert_equal(listener.checkin_count, 5) # These should come from the pool threads = [] for i in range(5): threads.append(threading.Thread(target=checkout_return)) threads[-1].start() for thread in threads: thread.join() assert_equal(listener.connect_count, 0) assert_equal(listener.checkout_count, 10) assert_equal(listener.checkin_count, 10) pool.dispose()
def setup_module(): global pool credentials = {'username': '******', 'password': '******'} pool = ConnectionPool(TEST_KS, pool_size=10, credentials=credentials)
def db_export(self): db_contents = {'cassandra': {}, 'zookeeper': {}} cassandra_contents = db_contents['cassandra'] creds = None if self._api_args.cassandra_user and self._api_args.cassandra_password: creds = {'username': self._api_args.cassandra_user, 'password': self._api_args.cassandra_password} socket_factory = default_socket_factory if ('cassandra_use_ssl' in self._api_args and self._api_args.cassandra_use_ssl): socket_factory = self._make_ssl_socket_factory( self._api_args.cassandra_ca_certs, validate=False) sys_mgr = SystemManager( self._api_args.cassandra_server_list[0], credentials=creds, socket_factory=socket_factory) existing_keyspaces = sys_mgr.list_keyspaces() for ks_name in set(KEYSPACES) - set(self._args.omit_keyspaces or []): if self._api_args.cluster_id: full_ks_name = '%s_%s' %(self._api_args.cluster_id, ks_name) else: full_ks_name = ks_name if full_ks_name not in existing_keyspaces: continue cassandra_contents[ks_name] = {} pool = ConnectionPool( full_ks_name, self._api_args.cassandra_server_list, pool_timeout=120, max_retries=-1, timeout=5, socket_factory=socket_factory, credentials=creds) for cf_name in sys_mgr.get_keyspace_column_families(full_ks_name): cassandra_contents[ks_name][cf_name] = {} cf = ColumnFamily(pool, cf_name, buffer_size=self._args.buffer_size) for r,c in cf.get_range(column_count=10000000, include_timestamp=True): cassandra_contents[ks_name][cf_name][r] = c logger.info("Cassandra DB dumped") def get_nodes(path): if path[:-1].rpartition('/')[-1] in self._zk_ignore_list: return [] try: if not zk.get_children(path): return [(path, zk.get(path))] except kazoo.exceptions.NoNodeError: return [] nodes = [] for child in zk.get_children(path): nodes.extend(get_nodes('%s%s/' %(path, child))) return nodes zk = kazoo.client.KazooClient(self._api_args.zk_server_ip) zk.start() nodes = get_nodes(self._api_args.cluster_id+'/') zk.stop() db_contents['zookeeper'] = json.dumps(nodes) logger.info("Zookeeper DB dumped") f = open(self._args.export_to, 'w') try: f.write(json.dumps(db_contents)) finally: f.close() logger.info("DB dump wrote to file %s" % self._args.export_to)