def test_queue_failure_with_no_retries(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool( pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Counter1') assert_raises(MaximumRetryException, cf.insert, 'key', { 'col': 2, 'col2': 2 }) assert_equal(stats_logger.stats['failed'], 1) # didn't retry at all pool.dispose()
def test_queue_failure_on_retry(self): listener = _TestListener() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) def raiser(): raise IOError # Replace wrapper will open a connection to get the version, so if it # fails we need to retry as with any other connection failure pool._replace_wrapper = raiser # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Standard1') assert_raises(MaximumRetryException, cf.insert, 'key', {'col':'val', 'col2': 'val'}) assert_equal(listener.failure_count, 4) # On the 4th failure, didn't retry pool.dispose()
def test_queue_failover(self): for prefill in (True, False): listener = _TestListener() pool = ConnectionPool( pool_size=1, max_overflow=0, recycle=10000, prefill=prefill, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(1, 5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'}) assert_equal(listener.failure_count, i) assert_equal(cf.get('key'), { 'col': 'val%d' % i, 'col2': 'val' }) pool.dispose()
def test_server_list_func(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool('PycassaTestKeyspace', server_list=_get_list, listeners=[stats_logger], prefill=False) assert_equal(stats_logger.serv_list, ['foo:bar']) assert_equal(stats_logger.stats['list'], 1) pool.dispose()
def test_server_list_func(self): listener = _TestListener() pool = ConnectionPool('PycassaTestKeyspace', server_list=_get_list, listeners=[listener], prefill=False) assert_equal(listener.serv_list, ['foo:bar']) assert_equal(listener.list_count, 1) pool.dispose()
class TestDefaultValidators(unittest.TestCase): def setUp(self): credentials = {"username": "******", "password": "******"} self.pool = ConnectionPool(pool_size=5, keyspace="Keyspace1", credentials=credentials) self.cf_def_valid = ColumnFamily(self.pool, "DefaultValidator") def tearDown(self): for key, cols in self.cf_def_valid.get_range(): self.cf_def_valid.remove(key) self.pool.dispose() def test_default_validated_columns(self): key = "key1" col_cf = {"aaaaaa": 1L} col_cm = {"subcol": TIME1} col_ncf = {"aaaaaa": TIME1} col_ncm = {"subcol": 1L} # Both of these inserts work, as cf allows # longs and cm for 'subcol' allows TIMEUUIDs. self.cf_def_valid.insert(key, col_cf) self.cf_def_valid.insert(key, col_cm) assert self.cf_def_valid.get(key) == {"aaaaaa": 1L, "subcol": TIME1} assert_raises(TypeError, self.cf_def_valid.insert, key, col_ncf) assert_raises(TypeError, self.cf_def_valid.insert, key, col_ncm)
def test_queue_threadlocal_retry_limit(self): listener = _TestListener() pool = ConnectionPool( pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Standard1') assert_raises(MaximumRetryException, cf.insert, 'key', { 'col': 'val', 'col2': 'val' }) assert_equal(listener.failure_count, 4) # On the 4th failure, didn't retry pool.dispose()
def test_queue_threadlocal_failover(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=True, timeout=0.05, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(1, 5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'}) assert_equal(stats_logger.stats['failed'], i) assert_equal(cf.get('key'), {'col': 'val%d' % i, 'col2': 'val'}) pool.dispose() stats_logger.reset() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, timeout=0.05, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() threads = [] args = ('key', {'col': 'val', 'col2': 'val'}) for i in range(5): threads.append(threading.Thread(target=cf.insert, args=args)) threads[-1].start() for thread in threads: thread.join() assert_equal(stats_logger.stats['failed'], 5) pool.dispose()
def create_cfs(self): """ Creates the Cassandra Column Families (if not exist) """ sys_mgr = None pool = None try: sys_mgr = SystemManager() pool = ConnectionPool(settings.KEYSPACE, server_list=settings.CASSANDRA_HOSTS) for cf_name in [ CF_LOGS, CF_LOGS_BY_APP, CF_LOGS_BY_HOST, CF_LOGS_BY_SEVERITY ]: try: cf = ColumnFamily(pool, cf_name) except: logger.info("create_cfs(): Creating column family %s", cf_name) sys_mgr.create_column_family( settings.KEYSPACE, cf_name, comparator_type=TimeUUIDType()) cf = ColumnFamily(pool, cf_name) cf.get_count(str(uuid.uuid4())) finally: if pool: pool.dispose() if sys_mgr: sys_mgr.close()
def test_queue_pool_recycle(self): listener = _TestListener() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=1, prefill=True, pool_timeout=0.5, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False) cf = ColumnFamily(pool, 'Standard1') columns = {'col1': 'val', 'col2': 'val'} for i in range(10): cf.insert('key', columns) assert_equal(listener.recycle_count, 5) pool.dispose() listener.reset() # Try with threadlocal=True pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=1, prefill=False, pool_timeout=0.5, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True) cf = ColumnFamily(pool, 'Standard1') for i in range(10): cf.insert('key', columns) pool.dispose() assert_equal(listener.recycle_count, 5)
class TestValidators(unittest.TestCase): def setUp(self): credentials = {"username": "******", "password": "******"} self.pool = ConnectionPool(pool_size=10, keyspace="Keyspace1", credentials=credentials) self.cf_valid_long = ColumnFamily(self.pool, "ValidatorLong") self.cf_valid_int = ColumnFamily(self.pool, "ValidatorInt") self.cf_valid_time = ColumnFamily(self.pool, "ValidatorTime") self.cf_valid_lex = ColumnFamily(self.pool, "ValidatorLex") self.cf_valid_ascii = ColumnFamily(self.pool, "ValidatorAscii") self.cf_valid_utf8 = ColumnFamily(self.pool, "ValidatorUTF8") self.cf_valid_bytes = ColumnFamily(self.pool, "ValidatorBytes") self.cfs = [ self.cf_valid_long, self.cf_valid_int, self.cf_valid_time, self.cf_valid_lex, self.cf_valid_ascii, self.cf_valid_utf8, self.cf_valid_bytes, ] def tearDown(self): for cf in self.cfs: for key, cols in cf.get_range(): cf.remove(key) self.pool.dispose() def test_validated_columns(self): key = "key1" col = {"subcol": 1L} self.cf_valid_long.insert(key, col) assert self.cf_valid_long.get(key) == col col = {"subcol": 1} self.cf_valid_int.insert(key, col) assert self.cf_valid_int.get(key) == col col = {"subcol": TIME1} self.cf_valid_time.insert(key, col) assert self.cf_valid_time.get(key) == col col = {"subcol": uuid.UUID(bytes="aaa aaa aaa aaaa")} self.cf_valid_lex.insert(key, col) assert self.cf_valid_lex.get(key) == col col = {"subcol": "aaa"} self.cf_valid_ascii.insert(key, col) assert self.cf_valid_ascii.get(key) == col col = {"subcol": u"a\u0020"} self.cf_valid_utf8.insert(key, col) assert self.cf_valid_utf8.get(key) == col col = {"subcol": "aaa"} self.cf_valid_bytes.insert(key, col) assert self.cf_valid_bytes.get(key) == col
def test_queue_pool(self): listener = _TestListener() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, pool_timeout=0.1, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False) conns = [] for i in range(10): conns.append(pool.get()) assert_equal(listener.connect_count, 10) assert_equal(listener.checkout_count, 10) # Pool is maxed out now assert_raises(NoConnectionAvailable, pool.get) assert_equal(listener.connect_count, 10) assert_equal(listener.max_count, 1) for i in range(0, 5): pool.return_conn(conns[i]) assert_equal(listener.close_count, 0) assert_equal(listener.checkin_count, 5) for i in range(5, 10): pool.return_conn(conns[i]) assert_equal(listener.close_count, 5) assert_equal(listener.checkin_count, 10) conns = [] # These connections should come from the pool for i in range(5): conns.append(pool.get()) assert_equal(listener.connect_count, 10) assert_equal(listener.checkout_count, 15) # But these will need to be made for i in range(5): conns.append(pool.get()) assert_equal(listener.connect_count, 15) assert_equal(listener.checkout_count, 20) assert_equal(listener.close_count, 5) for i in range(10): conns[i].return_to_pool() assert_equal(listener.checkin_count, 20) assert_equal(listener.close_count, 10) assert_raises(InvalidRequestError, conns[0].return_to_pool) assert_equal(listener.checkin_count, 20) assert_equal(listener.close_count, 10) print "in test:", id(conns[-1]) assert_raises(InvalidRequestError, conns[-1].return_to_pool) assert_equal(listener.checkin_count, 20) assert_equal(listener.close_count, 10) pool.dispose() assert_equal(listener.dispose_count, 1)
def test_queue_pool(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, pool_timeout=0.1, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False) conns = [] for i in range(10): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['checked_out'], 10) # Pool is maxed out now assert_raises(NoConnectionAvailable, pool.get) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['at_max'], 1) for i in range(0, 5): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['disposed']['success'], 0) assert_equal(stats_logger.stats['checked_in'], 5) for i in range(5, 10): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['disposed']['success'], 5) assert_equal(stats_logger.stats['checked_in'], 10) conns = [] # These connections should come from the pool for i in range(5): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['checked_out'], 15) # But these will need to be made for i in range(5): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 15) assert_equal(stats_logger.stats['checked_out'], 20) assert_equal(stats_logger.stats['disposed']['success'], 5) for i in range(10): conns[i].return_to_pool() assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) assert_raises(InvalidRequestError, conns[0].return_to_pool) assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) print("in test:", id(conns[-1])) conns[-1].return_to_pool() assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) pool.dispose()
def test_pool_connection_failure(self): stats_logger = StatsLoggerWithListStorage() def get_extra(): """Make failure count adjustments based on whether or not the permuted list starts with a good host:port""" if stats_logger.serv_list[0] == 'localhost:9160': return 0 else: return 1 pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, keyspace='PycassaTestKeyspace', credentials=_credentials, pool_timeout=0.01, timeout=0.05, listeners=[stats_logger], use_threadlocal=False, server_list=['localhost:9160', 'foobar:1']) assert_equal(stats_logger.stats['failed'], 4 + get_extra()) for i in range(0, 7): pool.get() assert_equal(stats_logger.stats['failed'], 6 + get_extra()) pool.dispose() stats_logger.reset() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, keyspace='PycassaTestKeyspace', credentials=_credentials, pool_timeout=0.01, timeout=0.05, listeners=[stats_logger], use_threadlocal=True, server_list=['localhost:9160', 'foobar:1']) assert_equal(stats_logger.stats['failed'], 4 + get_extra()) threads = [] for i in range(0, 7): threads.append(threading.Thread(target=pool.get)) threads[-1].start() for thread in threads: thread.join() assert_equal(stats_logger.stats['failed'], 6 + get_extra()) pool.dispose()
def test_pool_invalid_request(self): listener = _TestListener() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=True, max_retries=3, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160']) cf = ColumnFamily(pool, 'Standard1') # Make sure the pool doesn't hide and retries invalid requests assert_raises(InvalidRequestException, cf.add, 'key', 'col') assert_raises(NotFoundException, cf.get, 'none') pool.dispose()
def test_queue_threadlocal_failover(self): listener = _TestListener() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=True, timeout=0.05, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(1,5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'}) assert_equal(listener.failure_count, i) assert_equal(cf.get('key'), {'col': 'val%d' % i, 'col2': 'val'}) pool.dispose() listener.reset() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, timeout=0.05, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() threads = [] args=('key', {'col': 'val', 'col2': 'val'}) for i in range(5): threads.append(threading.Thread(target=cf.insert, args=args)) threads[-1].start() for thread in threads: thread.join() assert_equal(listener.failure_count, 5) pool.dispose()
def test_pool_invalid_request(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=True, max_retries=3, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False, server_list=['localhost:9160']) cf = ColumnFamily(pool, 'Standard1') # Make sure the pool doesn't hide and retries invalid requests assert_raises(InvalidRequestException, cf.add, 'key', 'col') assert_raises(NotFoundException, cf.get, 'none') pool.dispose()
def test_pool_connection_failure(self): listener = _TestListener() def get_extra(): """Make failure count adjustments based on whether or not the permuted list starts with a good host:port""" if listener.serv_list[0] == 'localhost:9160': return 0 else: return 1 pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, keyspace='PycassaTestKeyspace', credentials=_credentials, pool_timeout=0.01, timeout=0.05, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'foobar:1']) assert_equal(listener.failure_count, 4 + get_extra()) for i in range(0,7): pool.get() assert_equal(listener.failure_count, 6 + get_extra()) pool.dispose() listener.reset() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, keyspace='PycassaTestKeyspace', credentials=_credentials, pool_timeout=0.01, timeout=0.05, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160', 'foobar:1']) assert_equal(listener.failure_count, 4 + get_extra()) threads = [] for i in range(0, 7): threads.append(threading.Thread(target=pool.get)) threads[-1].start() for thread in threads: thread.join() assert_equal(listener.failure_count, 6 + get_extra()) pool.dispose()
def test_queue_failure_with_no_retries(self): listener = _TestListener() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Counter1') assert_raises(MaximumRetryException, cf.insert, 'key', {'col': 2, 'col2': 2}) assert_equal(listener.failure_count, 1) # didn't retry at all pool.dispose()
def test_queue_threadlocal_retry_limit(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Standard1') assert_raises(MaximumRetryException, cf.insert, 'key', {'col': 'val', 'col2': 'val'}) assert_equal(stats_logger.stats['failed'], 4) # On the 4th failure, didn't retry pool.dispose()
def test_queue_pool_recycle(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=1, prefill=True, pool_timeout=0.5, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False) cf = ColumnFamily(pool, 'Standard1') columns = {'col1': 'val', 'col2': 'val'} for i in range(10): cf.insert('key', columns) assert_equal(stats_logger.stats['recycled'], 5) pool.dispose() stats_logger.reset() # Try with threadlocal=True pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=1, prefill=False, pool_timeout=0.5, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=True) cf = ColumnFamily(pool, 'Standard1') for i in range(10): cf.insert('key', columns) pool.dispose() assert_equal(stats_logger.stats['recycled'], 5)
def create_cfs(self): """ Creates the Cassandra Column Families (if not exist) """ sys_mgr = None pool = None try: sys_mgr = SystemManager() pool = ConnectionPool(settings.KEYSPACE, server_list=settings.CASSANDRA_HOSTS) for cf_name in [CF_LOGS, CF_LOGS_BY_APP, CF_LOGS_BY_HOST, CF_LOGS_BY_SEVERITY]: try: cf = ColumnFamily(pool, cf_name) except: logger.info("create_cfs(): Creating column family %s", cf_name) sys_mgr.create_column_family(settings.KEYSPACE, cf_name, comparator_type=TimeUUIDType()) cf = ColumnFamily(pool, cf_name) cf.get_count(str(uuid.uuid4())) finally: if pool: pool.dispose() if sys_mgr: sys_mgr.close()
def test_queue_failure_on_retry(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool( pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) def raiser(): raise IOError # Replace wrapper will open a connection to get the version, so if it # fails we need to retry as with any other connection failure pool._replace_wrapper = raiser # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Standard1') assert_raises(MaximumRetryException, cf.insert, 'key', { 'col': 'val', 'col2': 'val' }) assert_equal(stats_logger.stats['failed'], 4) # On the 4th failure, didn't retry pool.dispose()
def test_queue_failover(self): for prefill in (True, False): listener = _TestListener() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=prefill, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(1,5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'}) assert_equal(listener.failure_count, i) assert_equal(cf.get('key'), {'col': 'val%d' % i, 'col2': 'val'}) pool.dispose()
def create_cfs(self): """ Creates the Cassandra Column Families (if not exist) """ sys_mgr = None pool = None try: sys_mgr = SystemManager() pool = ConnectionPool(settings.KEYSPACE, server_list=settings.CASSANDRA_HOSTS) try: cf = ColumnFamily(pool, CF_LOGS) except: logger.info("create_cfs(): Creating column family %s", CF_LOGS) #======================================== # Column key -> CompositeType #======================================== # 1. UUID + Timestamp # 2. Host / Origin # 3. Application # 4. Severiry comparator = CompositeType( TimeUUIDType(), UTF8Type(), UTF8Type(), UTF8Type() ) sys_mgr.create_column_family(settings.KEYSPACE, CF_LOGS, comparator_type=comparator) cf = ColumnFamily(pool, CF_LOGS) # cf.get_count(str(uuid.uuid4())) try: cf = ColumnFamily(pool, CF_METADATA) except: logger.info("create_cfs(): Creating column family %s", CF_METADATA) sys_mgr.create_column_family(settings.KEYSPACE, CF_METADATA, comparator_type=UTF8Type()) cf = ColumnFamily(pool, CF_METADATA) cf.get_count(str(uuid.uuid4())) try: cf = ColumnFamily(pool, CF_TIMESTAMP_BITMAP) except: logger.info("create_cfs(): Creating column family %s", CF_TIMESTAMP_BITMAP) sys_mgr.create_column_family(settings.KEYSPACE, CF_TIMESTAMP_BITMAP, comparator_type=IntegerType()) cf = ColumnFamily(pool, CF_TIMESTAMP_BITMAP) try: cf = ColumnFamily(pool, CF_MULTI_MESSAGELOGS) except: logger.info("create_cfs(): Creating column family %s", CF_MULTI_MESSAGELOGS) sys_mgr.create_column_family(settings.KEYSPACE, CF_MULTI_MESSAGELOGS, comparator_type=UTF8Type()) cf = ColumnFamily(pool, CF_MULTI_MESSAGELOGS) sys_mgr.create_index(settings.KEYSPACE, CF_MULTI_MESSAGELOGS, 'meta:host', UTF8_TYPE, index_name='multimsg_host_index') sys_mgr.create_index(settings.KEYSPACE, CF_MULTI_MESSAGELOGS, 'meta:application', UTF8_TYPE, index_name='multimsg_application_index') sys_mgr.create_index(settings.KEYSPACE, CF_MULTI_MESSAGELOGS, 'meta:status', UTF8_TYPE, index_name='multimsg_finish_status_index') finally: if pool: pool.dispose() if sys_mgr: sys_mgr.close()
class TestStandardCFs(unittest.TestCase): def setUp(self): credentials = {"username": "******", "password": "******"} self.pool = ConnectionPool(pool_size=10, keyspace="Keyspace1", credentials=credentials) self.cf = ColumnFamily(self.pool, "Standard2") self.cf_long = ColumnFamily(self.pool, "StdLong") self.cf_int = ColumnFamily(self.pool, "StdInteger") self.cf_time = ColumnFamily(self.pool, "StdTimeUUID") self.cf_lex = ColumnFamily(self.pool, "StdLexicalUUID") self.cf_ascii = ColumnFamily(self.pool, "StdAscii") self.cf_utf8 = ColumnFamily(self.pool, "StdUTF8") self.cf_bytes = ColumnFamily(self.pool, "StdBytes") self.cfs = [self.cf_long, self.cf_int, self.cf_time, self.cf_lex, self.cf_ascii, self.cf_utf8, self.cf_bytes] def tearDown(self): for cf in self.cfs: for key, cols in cf.get_range(): cf.remove(key) self.pool.dispose() def make_group(self, cf, cols): diction = {cols[0]: VALS[0], cols[1]: VALS[1], cols[2]: VALS[2]} return {"cf": cf, "cols": cols, "dict": diction} def test_standard_column_family(self): # For each data type, create a group that includes its column family, # a set of column names, and a dictionary that maps from the column # names to values. type_groups = [] long_cols = [1111111111111111L, 2222222222222222L, 3333333333333333L] type_groups.append(self.make_group(self.cf_long, long_cols)) int_cols = [1, 2, 3] type_groups.append(self.make_group(self.cf_int, int_cols)) time_cols = [TIME1, TIME2, TIME3] type_groups.append(self.make_group(self.cf_time, time_cols)) lex_cols = [ uuid.UUID(bytes="aaa aaa aaa aaaa"), uuid.UUID(bytes="bbb bbb bbb bbbb"), uuid.UUID(bytes="ccc ccc ccc cccc"), ] type_groups.append(self.make_group(self.cf_lex, lex_cols)) ascii_cols = ["aaaa", "bbbb", "cccc"] type_groups.append(self.make_group(self.cf_ascii, ascii_cols)) utf8_cols = [u"a\u0020", u"b\u0020", u"c\u0020"] type_groups.append(self.make_group(self.cf_utf8, utf8_cols)) bytes_cols = ["aaaa", "bbbb", "cccc"] type_groups.append(self.make_group(self.cf_bytes, bytes_cols)) # Begin the actual inserting and getting for group in type_groups: cf = group.get("cf") gdict = group.get("dict") gcols = group.get("cols") cf.insert(KEYS[0], gdict) assert_equal(cf.get(KEYS[0]), gdict) # Check each column individually for i in range(3): assert_equal(cf.get(KEYS[0], columns=[gcols[i]]), {gcols[i]: VALS[i]}) # Check that if we list all columns, we get the full dict assert_equal(cf.get(KEYS[0], columns=gcols[:]), gdict) # The same thing with a start and end instead assert_equal(cf.get(KEYS[0], column_start=gcols[0], column_finish=gcols[2]), gdict) # A start and end that are the same assert_equal(cf.get(KEYS[0], column_start=gcols[0], column_finish=gcols[0]), {gcols[0]: VALS[0]}) assert_equal(cf.get_count(KEYS[0]), 3) # Test removing rows cf.remove(KEYS[0], columns=gcols[:1]) assert_equal(cf.get_count(KEYS[0]), 2) cf.remove(KEYS[0], columns=gcols[1:]) assert_equal(cf.get_count(KEYS[0]), 0) # Insert more than one row now cf.insert(KEYS[0], gdict) cf.insert(KEYS[1], gdict) cf.insert(KEYS[2], gdict) ### multiget() tests ### res = cf.multiget(KEYS[:]) for i in range(3): assert_equal(res.get(KEYS[i]), gdict) res = cf.multiget(KEYS[2:]) assert_equal(res.get(KEYS[2]), gdict) # Check each column individually for i in range(3): res = cf.multiget(KEYS[:], columns=[gcols[i]]) for j in range(3): assert_equal(res.get(KEYS[j]), {gcols[i]: VALS[i]}) # Check that if we list all columns, we get the full dict res = cf.multiget(KEYS[:], columns=gcols[:]) for j in range(3): assert_equal(res.get(KEYS[j]), gdict) # The same thing with a start and end instead res = cf.multiget(KEYS[:], column_start=gcols[0], column_finish=gcols[2]) for j in range(3): assert_equal(res.get(KEYS[j]), gdict) # A start and end that are the same res = cf.multiget(KEYS[:], column_start=gcols[0], column_finish=gcols[0]) for j in range(3): assert_equal(res.get(KEYS[j]), {gcols[0]: VALS[0]}) ### get_range() tests ### res = cf.get_range(start=KEYS[0]) for sub_res in res: assert_equal(sub_res[1], gdict) res = cf.get_range(start=KEYS[0], column_start=gcols[0], column_finish=gcols[2]) for sub_res in res: assert_equal(sub_res[1], gdict) res = cf.get_range(start=KEYS[0], columns=gcols[:]) for sub_res in res: assert_equal(sub_res[1], gdict)
class TestSuperSubCFs(unittest.TestCase): def setUp(self): credentials = {"username": "******", "password": "******"} self.pool = ConnectionPool(pool_size=10, keyspace="Keyspace1", credentials=credentials) self.cf_suplong_sublong = ColumnFamily(self.pool, "SuperLongSubLong") self.cf_suplong_subint = ColumnFamily(self.pool, "SuperLongSubInt") self.cf_suplong_subtime = ColumnFamily(self.pool, "SuperLongSubTime") self.cf_suplong_sublex = ColumnFamily(self.pool, "SuperLongSubLex") self.cf_suplong_subascii = ColumnFamily(self.pool, "SuperLongSubAscii") self.cf_suplong_subutf8 = ColumnFamily(self.pool, "SuperLongSubUTF8") self.cf_suplong_subbytes = ColumnFamily(self.pool, "SuperLongSubBytes") self.cfs = [ self.cf_suplong_subint, self.cf_suplong_subint, self.cf_suplong_subtime, self.cf_suplong_sublex, self.cf_suplong_subascii, self.cf_suplong_subutf8, self.cf_suplong_subbytes, ] def tearDown(self): for cf in self.cfs: for key, cols in cf.get_range(): cf.remove(key) self.pool.dispose() def make_sub_group(self, cf, cols): diction = {123L: {cols[0]: VALS[0], cols[1]: VALS[1], cols[2]: VALS[2]}} return {"cf": cf, "cols": cols, "dict": diction} def test_super_column_family_subs(self): # For each data type, create a group that includes its column family, # a set of column names, and a dictionary that maps from the column # names to values. type_groups = [] long_cols = [1111111111111111L, 2222222222222222L, 3333333333333333L] type_groups.append(self.make_sub_group(self.cf_suplong_sublong, long_cols)) int_cols = [1, 2, 3] type_groups.append(self.make_sub_group(self.cf_suplong_subint, int_cols)) time_cols = [TIME1, TIME2, TIME3] type_groups.append(self.make_sub_group(self.cf_suplong_subtime, time_cols)) lex_cols = [ uuid.UUID(bytes="aaa aaa aaa aaaa"), uuid.UUID(bytes="bbb bbb bbb bbbb"), uuid.UUID(bytes="ccc ccc ccc cccc"), ] type_groups.append(self.make_sub_group(self.cf_suplong_sublex, lex_cols)) ascii_cols = ["aaaa", "bbbb", "cccc"] type_groups.append(self.make_sub_group(self.cf_suplong_subascii, ascii_cols)) utf8_cols = [u"a\u0020", u"b\u0020", u"c\u0020"] type_groups.append(self.make_sub_group(self.cf_suplong_subutf8, utf8_cols)) bytes_cols = ["aaaa", "bbbb", "cccc"] type_groups.append(self.make_sub_group(self.cf_suplong_subbytes, bytes_cols)) # Begin the actual inserting and getting for group in type_groups: cf = group.get("cf") gdict = group.get("dict") cf.insert(KEYS[0], gdict) assert_equal(cf.get(KEYS[0]), gdict) assert_equal(cf.get(KEYS[0], columns=[123L]), gdict) # A start and end that are the same assert_equal(cf.get(KEYS[0], column_start=123L, column_finish=123L), gdict) assert_equal(cf.get_count(KEYS[0]), 1) # Test removing rows cf.remove(KEYS[0], super_column=123L) assert_equal(cf.get_count(KEYS[0]), 0) # Insert more than one row now cf.insert(KEYS[0], gdict) cf.insert(KEYS[1], gdict) cf.insert(KEYS[2], gdict) ### multiget() tests ### res = cf.multiget(KEYS[:]) for i in range(3): assert_equal(res.get(KEYS[i]), gdict) res = cf.multiget(KEYS[2:]) assert_equal(res.get(KEYS[2]), gdict) res = cf.multiget(KEYS[:], columns=[123L]) for i in range(3): assert_equal(res.get(KEYS[i]), gdict) res = cf.multiget(KEYS[:], super_column=123L) for i in range(3): assert_equal(res.get(KEYS[i]), gdict.get(123L)) res = cf.multiget(KEYS[:], column_start=123L, column_finish=123L) for j in range(3): assert_equal(res.get(KEYS[j]), gdict) ### get_range() tests ### res = cf.get_range(start=KEYS[0]) for sub_res in res: assert_equal(sub_res[1], gdict) res = cf.get_range(start=KEYS[0], column_start=123L, column_finish=123L) for sub_res in res: assert_equal(sub_res[1], gdict) res = cf.get_range(start=KEYS[0], columns=[123L]) for sub_res in res: assert_equal(sub_res[1], gdict) res = cf.get_range(start=KEYS[0], super_column=123L) for sub_res in res: assert_equal(sub_res[1], gdict.get(123L))
def test_basic_pools(self): pool = ConnectionPool('PycassaTestKeyspace', credentials=_credentials) cf = ColumnFamily(pool, 'Standard1') cf.insert('key1', {'col': 'val'}) pool.dispose()
def test_queue_pool_threadlocal(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, pool_timeout=0.01, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=True) conns = [] assert_equal(stats_logger.stats['created']['success'], 5) # These connections should all be the same for i in range(10): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 5) assert_equal(stats_logger.stats['checked_out'], 1) for i in range(0, 5): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['checked_in'], 1) for i in range(5, 10): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['checked_in'], 1) conns = [] assert_equal(stats_logger.stats['created']['success'], 5) # A single connection should come from the pool for i in range(5): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 5) assert_equal(stats_logger.stats['checked_out'], 2) for conn in conns: pool.return_conn(conn) conns = [] threads = [] stats_logger.reset() def checkout_return(): conn = pool.get() time.sleep(1) pool.return_conn(conn) for i in range(5): threads.append(threading.Thread(target=checkout_return)) threads[-1].start() for thread in threads: thread.join() assert_equal(stats_logger.stats['created']['success'], 0) # Still 5 connections in pool assert_equal(stats_logger.stats['checked_out'], 5) assert_equal(stats_logger.stats['checked_in'], 5) # These should come from the pool threads = [] for i in range(5): threads.append(threading.Thread(target=checkout_return)) threads[-1].start() for thread in threads: thread.join() assert_equal(stats_logger.stats['created']['success'], 0) assert_equal(stats_logger.stats['checked_out'], 10) assert_equal(stats_logger.stats['checked_in'], 10) pool.dispose()
def create_cfs(self): """ Creates the Cassandra Column Families (if not exist) """ sys_mgr = None pool = None try: sys_mgr = SystemManager() pool = ConnectionPool(settings.KEYSPACE, server_list=settings.CASSANDRA_HOSTS) try: cf = ColumnFamily(pool, CF_LOGS) except: logger.info("create_cfs(): Creating column family %s", CF_LOGS) #======================================== # Column key -> CompositeType #======================================== # 1. UUID + Timestamp # 2. Host / Origin # 3. Application # 4. Severiry comparator = CompositeType(TimeUUIDType(), UTF8Type(), UTF8Type(), UTF8Type()) sys_mgr.create_column_family(settings.KEYSPACE, CF_LOGS, comparator_type=comparator) cf = ColumnFamily(pool, CF_LOGS) # cf.get_count(str(uuid.uuid4())) try: cf = ColumnFamily(pool, CF_METADATA) except: logger.info("create_cfs(): Creating column family %s", CF_METADATA) sys_mgr.create_column_family(settings.KEYSPACE, CF_METADATA, comparator_type=UTF8Type()) cf = ColumnFamily(pool, CF_METADATA) cf.get_count(str(uuid.uuid4())) try: cf = ColumnFamily(pool, CF_TIMESTAMP_BITMAP) except: logger.info("create_cfs(): Creating column family %s", CF_TIMESTAMP_BITMAP) sys_mgr.create_column_family(settings.KEYSPACE, CF_TIMESTAMP_BITMAP, comparator_type=IntegerType()) cf = ColumnFamily(pool, CF_TIMESTAMP_BITMAP) try: cf = ColumnFamily(pool, CF_MULTI_MESSAGELOGS) except: logger.info("create_cfs(): Creating column family %s", CF_MULTI_MESSAGELOGS) sys_mgr.create_column_family(settings.KEYSPACE, CF_MULTI_MESSAGELOGS, comparator_type=UTF8Type()) cf = ColumnFamily(pool, CF_MULTI_MESSAGELOGS) sys_mgr.create_index(settings.KEYSPACE, CF_MULTI_MESSAGELOGS, 'meta:host', UTF8_TYPE, index_name='multimsg_host_index') sys_mgr.create_index(settings.KEYSPACE, CF_MULTI_MESSAGELOGS, 'meta:application', UTF8_TYPE, index_name='multimsg_application_index') sys_mgr.create_index(settings.KEYSPACE, CF_MULTI_MESSAGELOGS, 'meta:status', UTF8_TYPE, index_name='multimsg_finish_status_index') finally: if pool: pool.dispose() if sys_mgr: sys_mgr.close()
def test_queue_pool_threadlocal(self): listener = _TestListener() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, pool_timeout=0.01, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True) conns = [] assert_equal(listener.connect_count, 5) # These connections should all be the same for i in range(10): conns.append(pool.get()) assert_equal(listener.connect_count, 5) assert_equal(listener.checkout_count, 1) for i in range(0, 5): pool.return_conn(conns[i]) assert_equal(listener.checkin_count, 1) for i in range(5, 10): pool.return_conn(conns[i]) assert_equal(listener.checkin_count, 1) conns = [] assert_equal(listener.connect_count, 5) # A single connection should come from the pool for i in range(5): conns.append(pool.get()) assert_equal(listener.connect_count, 5) assert_equal(listener.checkout_count, 2) for conn in conns: pool.return_conn(conn) conns = [] threads = [] listener.reset() def checkout_return(): conn = pool.get() time.sleep(1) pool.return_conn(conn) for i in range(5): threads.append(threading.Thread(target=checkout_return)) threads[-1].start() for thread in threads: thread.join() assert_equal(listener.connect_count, 0) # Still 5 connections in pool assert_equal(listener.checkout_count, 5) assert_equal(listener.checkin_count, 5) # These should come from the pool threads = [] for i in range(5): threads.append(threading.Thread(target=checkout_return)) threads[-1].start() for thread in threads: thread.join() assert_equal(listener.connect_count, 0) assert_equal(listener.checkout_count, 10) assert_equal(listener.checkin_count, 10) pool.dispose()
def test_queue_pool(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, pool_timeout=0.1, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False) conns = [] for i in range(10): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['checked_out'], 10) # Pool is maxed out now assert_raises(NoConnectionAvailable, pool.get) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['at_max'], 1) for i in range(0, 5): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['disposed']['success'], 0) assert_equal(stats_logger.stats['checked_in'], 5) for i in range(5, 10): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['disposed']['success'], 5) assert_equal(stats_logger.stats['checked_in'], 10) conns = [] # These connections should come from the pool for i in range(5): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['checked_out'], 15) # But these will need to be made for i in range(5): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 15) assert_equal(stats_logger.stats['checked_out'], 20) assert_equal(stats_logger.stats['disposed']['success'], 5) for i in range(10): conns[i].return_to_pool() assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) assert_raises(InvalidRequestError, conns[0].return_to_pool) assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) print "in test:", id(conns[-1]) assert_raises(InvalidRequestError, conns[-1].return_to_pool) assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) pool.dispose()
class TestTimeUUIDs(unittest.TestCase): def setUp(self): credentials = {"username": "******", "password": "******"} self.pool = ConnectionPool(pool_size=5, keyspace="Keyspace1", credentials=credentials) self.cf_time = ColumnFamily(self.pool, "StdTimeUUID") def tearDown(self): for key, cols in self.cf_time.get_range(): self.cf_time.remove(key) self.pool.dispose() def test_datetime_to_uuid(self): key = "key1" timeline = [] timeline.append(datetime.now()) time1 = uuid1() col1 = {time1: "0"} self.cf_time.insert(key, col1) time.sleep(1) timeline.append(datetime.now()) time2 = uuid1() col2 = {time2: "1"} self.cf_time.insert(key, col2) time.sleep(1) timeline.append(datetime.now()) cols = {time1: "0", time2: "1"} assert_equal(self.cf_time.get(key, column_start=timeline[0]), cols) assert_equal(self.cf_time.get(key, column_finish=timeline[2]), cols) assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[2]), cols) assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[2]), cols) assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[1]), col1) assert_equal(self.cf_time.get(key, column_start=timeline[1], column_finish=timeline[2]), col2) def test_time_to_uuid(self): key = "key1" timeline = [] timeline.append(time.time()) time1 = uuid1() col1 = {time1: "0"} self.cf_time.insert(key, col1) time.sleep(0.1) timeline.append(time.time()) time2 = uuid1() col2 = {time2: "1"} self.cf_time.insert(key, col2) time.sleep(0.1) timeline.append(time.time()) cols = {time1: "0", time2: "1"} assert_equal(self.cf_time.get(key, column_start=timeline[0]), cols) assert_equal(self.cf_time.get(key, column_finish=timeline[2]), cols) assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[2]), cols) assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[2]), cols) assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[1]), col1) assert_equal(self.cf_time.get(key, column_start=timeline[1], column_finish=timeline[2]), col2) def test_auto_time_to_uuid1(self): key = "key" t = time.time() col = {t: "foo"} self.cf_time.insert(key, col) uuid_res = self.cf_time.get(key).keys()[0] timestamp = convert_uuid_to_time(uuid_res) assert_almost_equal(timestamp, t, places=3)
def test_basic_pools(self): pool = ConnectionPool('PycassaTestKeyspace', credentials=_credentials) cf = ColumnFamily(pool, 'Standard1') cf.insert('key1', {'col':'val'}) pool.dispose()
print count count=author_cf.multiget_count(["sacharya1","sacharya2"]) print count ################################## REMOVE ##################################### # Remove the column for the row key and column key print "Removing the column last_name for row key sacharya1" author_cf.remove('sacharya1', columns=['last_name']) time.sleep(5) authors = author_cf.get('sacharya') print authors # REMOVE the entire row author_cf.remove('sacharya') try: time.sleep(5) print "Getting object already deleted" author_cf.get('sacharya') except Exception as e: print e # Delete all data from column family author_cf.truncate() ############################### DROP KEYSPACE ################################# sys.drop_keyspace('entries') pool.dispose()