def test_queue_threadlocal_failover(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=True, timeout=0.05, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(1, 5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'}) assert_equal(stats_logger.stats['failed'], i) assert_equal(cf.get('key'), {'col': 'val%d' % i, 'col2': 'val'}) pool.dispose() stats_logger.reset() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, timeout=0.05, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() threads = [] args = ('key', {'col': 'val', 'col2': 'val'}) for i in range(5): threads.append(threading.Thread(target=cf.insert, args=args)) threads[-1].start() for thread in threads: thread.join() assert_equal(stats_logger.stats['failed'], 5) pool.dispose()
def test_queue_pool(self): listener = _TestListener() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, pool_timeout=0.1, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False) conns = [] for i in range(10): conns.append(pool.get()) assert_equal(listener.connect_count, 10) assert_equal(listener.checkout_count, 10) # Pool is maxed out now assert_raises(NoConnectionAvailable, pool.get) assert_equal(listener.connect_count, 10) assert_equal(listener.max_count, 1) for i in range(0, 5): pool.return_conn(conns[i]) assert_equal(listener.close_count, 0) assert_equal(listener.checkin_count, 5) for i in range(5, 10): pool.return_conn(conns[i]) assert_equal(listener.close_count, 5) assert_equal(listener.checkin_count, 10) conns = [] # These connections should come from the pool for i in range(5): conns.append(pool.get()) assert_equal(listener.connect_count, 10) assert_equal(listener.checkout_count, 15) # But these will need to be made for i in range(5): conns.append(pool.get()) assert_equal(listener.connect_count, 15) assert_equal(listener.checkout_count, 20) assert_equal(listener.close_count, 5) for i in range(10): conns[i].return_to_pool() assert_equal(listener.checkin_count, 20) assert_equal(listener.close_count, 10) assert_raises(InvalidRequestError, conns[0].return_to_pool) assert_equal(listener.checkin_count, 20) assert_equal(listener.close_count, 10) print "in test:", id(conns[-1]) assert_raises(InvalidRequestError, conns[-1].return_to_pool) assert_equal(listener.checkin_count, 20) assert_equal(listener.close_count, 10) pool.dispose() assert_equal(listener.dispose_count, 1)
def test_queue_pool(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, pool_timeout=0.1, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False) conns = [] for i in range(10): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['checked_out'], 10) # Pool is maxed out now assert_raises(NoConnectionAvailable, pool.get) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['at_max'], 1) for i in range(0, 5): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['disposed']['success'], 0) assert_equal(stats_logger.stats['checked_in'], 5) for i in range(5, 10): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['disposed']['success'], 5) assert_equal(stats_logger.stats['checked_in'], 10) conns = [] # These connections should come from the pool for i in range(5): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['checked_out'], 15) # But these will need to be made for i in range(5): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 15) assert_equal(stats_logger.stats['checked_out'], 20) assert_equal(stats_logger.stats['disposed']['success'], 5) for i in range(10): conns[i].return_to_pool() assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) assert_raises(InvalidRequestError, conns[0].return_to_pool) assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) print("in test:", id(conns[-1])) conns[-1].return_to_pool() assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) pool.dispose()
def test_pool_connection_failure(self): stats_logger = StatsLoggerWithListStorage() def get_extra(): """Make failure count adjustments based on whether or not the permuted list starts with a good host:port""" if stats_logger.serv_list[0] == 'localhost:9160': return 0 else: return 1 pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, keyspace='PycassaTestKeyspace', credentials=_credentials, pool_timeout=0.01, timeout=0.05, listeners=[stats_logger], use_threadlocal=False, server_list=['localhost:9160', 'foobar:1']) assert_equal(stats_logger.stats['failed'], 4 + get_extra()) for i in range(0, 7): pool.get() assert_equal(stats_logger.stats['failed'], 6 + get_extra()) pool.dispose() stats_logger.reset() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, keyspace='PycassaTestKeyspace', credentials=_credentials, pool_timeout=0.01, timeout=0.05, listeners=[stats_logger], use_threadlocal=True, server_list=['localhost:9160', 'foobar:1']) assert_equal(stats_logger.stats['failed'], 4 + get_extra()) threads = [] for i in range(0, 7): threads.append(threading.Thread(target=pool.get)) threads[-1].start() for thread in threads: thread.join() assert_equal(stats_logger.stats['failed'], 6 + get_extra()) pool.dispose()
def test_queue_threadlocal_failover(self): listener = _TestListener() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=True, timeout=0.05, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(1,5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'}) assert_equal(listener.failure_count, i) assert_equal(cf.get('key'), {'col': 'val%d' % i, 'col2': 'val'}) pool.dispose() listener.reset() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, timeout=0.05, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() threads = [] args=('key', {'col': 'val', 'col2': 'val'}) for i in range(5): threads.append(threading.Thread(target=cf.insert, args=args)) threads[-1].start() for thread in threads: thread.join() assert_equal(listener.failure_count, 5) pool.dispose()
def test_queue_threadlocal_retry_limit(self): listener = _TestListener() pool = ConnectionPool( pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Standard1') assert_raises(MaximumRetryException, cf.insert, 'key', { 'col': 'val', 'col2': 'val' }) assert_equal(listener.failure_count, 4) # On the 4th failure, didn't retry pool.dispose()
def test_queue_failover(self): for prefill in (True, False): listener = _TestListener() pool = ConnectionPool( pool_size=1, max_overflow=0, recycle=10000, prefill=prefill, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(1, 5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'}) assert_equal(listener.failure_count, i) assert_equal(cf.get('key'), { 'col': 'val%d' % i, 'col2': 'val' }) pool.dispose()
def test_failure_connection_info(self): stats_logger = StatsLoggerRequestInfo() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=True, max_retries=0, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=True, server_list=['localhost:9160']) cf = ColumnFamily(pool, 'Counter1') # Corrupt the connection conn = pool.get() setattr(conn, 'send_get', conn._fail_once) conn._should_fail = True conn.return_to_pool() assert_raises(MaximumRetryException, cf.get, 'greunt', columns=['col']) assert_true('request' in stats_logger.failure_dict['connection'].info) request = stats_logger.failure_dict['connection'].info['request'] assert_equal(request['method'], 'get') assert_equal(request['args'], ('greunt', ColumnPath('Counter1', None, 'col'), 1)) assert_equal(request['kwargs'], {})
def test_queue_failure_on_retry(self): listener = _TestListener() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) def raiser(): raise IOError # Replace wrapper will open a connection to get the version, so if it # fails we need to retry as with any other connection failure pool._replace_wrapper = raiser # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Standard1') assert_raises(MaximumRetryException, cf.insert, 'key', {'col':'val', 'col2': 'val'}) assert_equal(listener.failure_count, 4) # On the 4th failure, didn't retry pool.dispose()
def test_queue_failure_with_no_retries(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool( pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Counter1') assert_raises(MaximumRetryException, cf.insert, 'key', { 'col': 2, 'col2': 2 }) assert_equal(stats_logger.stats['failed'], 1) # didn't retry at all pool.dispose()
def test_pool_connection_failure(self): listener = _TestListener() def get_extra(): """Make failure count adjustments based on whether or not the permuted list starts with a good host:port""" if listener.serv_list[0] == 'localhost:9160': return 0 else: return 1 pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, keyspace='PycassaTestKeyspace', credentials=_credentials, pool_timeout=0.01, timeout=0.05, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'foobar:1']) assert_equal(listener.failure_count, 4 + get_extra()) for i in range(0,7): pool.get() assert_equal(listener.failure_count, 6 + get_extra()) pool.dispose() listener.reset() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, keyspace='PycassaTestKeyspace', credentials=_credentials, pool_timeout=0.01, timeout=0.05, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160', 'foobar:1']) assert_equal(listener.failure_count, 4 + get_extra()) threads = [] for i in range(0, 7): threads.append(threading.Thread(target=pool.get)) threads[-1].start() for thread in threads: thread.join() assert_equal(listener.failure_count, 6 + get_extra()) pool.dispose()
def test_queue_threadlocal_retry_limit(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=True, server_list=['localhost:9160', 'localhost:9160']) # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Standard1') assert_raises(MaximumRetryException, cf.insert, 'key', {'col': 'val', 'col2': 'val'}) assert_equal(stats_logger.stats['failed'], 4) # On the 4th failure, didn't retry pool.dispose()
def test_queue_failure_with_no_retries(self): listener = _TestListener() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Counter1') assert_raises(MaximumRetryException, cf.insert, 'key', {'col': 2, 'col2': 2}) assert_equal(listener.failure_count, 1) # didn't retry at all pool.dispose()
def test_failure_connection_info(self): listener = _TestListenerRequestInfo() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=True, max_retries=0, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True, server_list=['localhost:9160']) cf = ColumnFamily(pool, 'Counter1') # Corrupt the connection conn = pool.get() setattr(conn, 'send_get', conn._fail_once) conn._should_fail = True conn.return_to_pool() assert_raises(MaximumRetryException, cf.get, 'greunt', columns=['col']) assert_true('request' in listener.failure_dict['connection'].info) request = listener.failure_dict['connection'].info['request'] assert_equal(request['method'], 'get') assert_equal(request['args'], ('greunt', ColumnPath('Counter1', None, 'col'), 1)) assert_equal(request['kwargs'], {})
def test_queue_failure_on_retry(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool( pool_size=5, max_overflow=5, recycle=10000, prefill=True, max_retries=3, # allow 3 retries keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) def raiser(): raise IOError # Replace wrapper will open a connection to get the version, so if it # fails we need to retry as with any other connection failure pool._replace_wrapper = raiser # Corrupt all of the connections for i in range(5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() cf = ColumnFamily(pool, 'Standard1') assert_raises(MaximumRetryException, cf.insert, 'key', { 'col': 'val', 'col2': 'val' }) assert_equal(stats_logger.stats['failed'], 4) # On the 4th failure, didn't retry pool.dispose()
def test_queue_failover(self): for prefill in (True, False): listener = _TestListener() pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000, prefill=prefill, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=False, server_list=['localhost:9160', 'localhost:9160']) cf = ColumnFamily(pool, 'Standard1') for i in range(1,5): conn = pool.get() setattr(conn, 'send_batch_mutate', conn._fail_once) conn._should_fail = True conn.return_to_pool() # The first insert attempt should fail, but failover should occur # and the insert should succeed cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'}) assert_equal(listener.failure_count, i) assert_equal(cf.get('key'), {'col': 'val%d' % i, 'col2': 'val'}) pool.dispose()
def test_queue_pool(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, pool_timeout=0.1, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=False) conns = [] for i in range(10): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['checked_out'], 10) # Pool is maxed out now assert_raises(NoConnectionAvailable, pool.get) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['at_max'], 1) for i in range(0, 5): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['disposed']['success'], 0) assert_equal(stats_logger.stats['checked_in'], 5) for i in range(5, 10): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['disposed']['success'], 5) assert_equal(stats_logger.stats['checked_in'], 10) conns = [] # These connections should come from the pool for i in range(5): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 10) assert_equal(stats_logger.stats['checked_out'], 15) # But these will need to be made for i in range(5): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 15) assert_equal(stats_logger.stats['checked_out'], 20) assert_equal(stats_logger.stats['disposed']['success'], 5) for i in range(10): conns[i].return_to_pool() assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) assert_raises(InvalidRequestError, conns[0].return_to_pool) assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) print "in test:", id(conns[-1]) assert_raises(InvalidRequestError, conns[-1].return_to_pool) assert_equal(stats_logger.stats['checked_in'], 20) assert_equal(stats_logger.stats['disposed']['success'], 10) pool.dispose()
def test_queue_pool_threadlocal(self): listener = _TestListener() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, pool_timeout=0.01, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[listener], use_threadlocal=True) conns = [] assert_equal(listener.connect_count, 5) # These connections should all be the same for i in range(10): conns.append(pool.get()) assert_equal(listener.connect_count, 5) assert_equal(listener.checkout_count, 1) for i in range(0, 5): pool.return_conn(conns[i]) assert_equal(listener.checkin_count, 1) for i in range(5, 10): pool.return_conn(conns[i]) assert_equal(listener.checkin_count, 1) conns = [] assert_equal(listener.connect_count, 5) # A single connection should come from the pool for i in range(5): conns.append(pool.get()) assert_equal(listener.connect_count, 5) assert_equal(listener.checkout_count, 2) for conn in conns: pool.return_conn(conn) conns = [] threads = [] listener.reset() def checkout_return(): conn = pool.get() time.sleep(1) pool.return_conn(conn) for i in range(5): threads.append(threading.Thread(target=checkout_return)) threads[-1].start() for thread in threads: thread.join() assert_equal(listener.connect_count, 0) # Still 5 connections in pool assert_equal(listener.checkout_count, 5) assert_equal(listener.checkin_count, 5) # These should come from the pool threads = [] for i in range(5): threads.append(threading.Thread(target=checkout_return)) threads[-1].start() for thread in threads: thread.join() assert_equal(listener.connect_count, 0) assert_equal(listener.checkout_count, 10) assert_equal(listener.checkin_count, 10) pool.dispose()
def test_queue_pool_threadlocal(self): stats_logger = StatsLoggerWithListStorage() pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True, pool_timeout=0.01, timeout=1, keyspace='PycassaTestKeyspace', credentials=_credentials, listeners=[stats_logger], use_threadlocal=True) conns = [] assert_equal(stats_logger.stats['created']['success'], 5) # These connections should all be the same for i in range(10): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 5) assert_equal(stats_logger.stats['checked_out'], 1) for i in range(0, 5): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['checked_in'], 1) for i in range(5, 10): pool.return_conn(conns[i]) assert_equal(stats_logger.stats['checked_in'], 1) conns = [] assert_equal(stats_logger.stats['created']['success'], 5) # A single connection should come from the pool for i in range(5): conns.append(pool.get()) assert_equal(stats_logger.stats['created']['success'], 5) assert_equal(stats_logger.stats['checked_out'], 2) for conn in conns: pool.return_conn(conn) conns = [] threads = [] stats_logger.reset() def checkout_return(): conn = pool.get() time.sleep(1) pool.return_conn(conn) for i in range(5): threads.append(threading.Thread(target=checkout_return)) threads[-1].start() for thread in threads: thread.join() assert_equal(stats_logger.stats['created']['success'], 0) # Still 5 connections in pool assert_equal(stats_logger.stats['checked_out'], 5) assert_equal(stats_logger.stats['checked_in'], 5) # These should come from the pool threads = [] for i in range(5): threads.append(threading.Thread(target=checkout_return)) threads[-1].start() for thread in threads: thread.join() assert_equal(stats_logger.stats['created']['success'], 0) assert_equal(stats_logger.stats['checked_out'], 10) assert_equal(stats_logger.stats['checked_in'], 10) pool.dispose()