def test_max_pool_size_with_connection_failure(self): # The pool acquires its semaphore before attempting to connect; ensure # it releases the semaphore on connection failure. test_pool = Pool( ('somedomainthatdoesntexist.org', 27017), PoolOptions( max_pool_size=1, connect_timeout=1, socket_timeout=1, wait_queue_timeout=1)) test_pool.ready() # First call to get_socket fails; if pool doesn't release its semaphore # then the second call raises "ConnectionFailure: Timed out waiting for # socket from pool" instead of AutoReconnect. for i in range(2): with self.assertRaises(AutoReconnect) as context: with test_pool.get_socket({}): pass # Testing for AutoReconnect instead of ConnectionFailure, above, # is sufficient right *now* to catch a semaphore leak. But that # seems error-prone, so check the message too. self.assertNotIn('waiting for socket from pool', str(context.exception))
def __init__(self, client, pair, *args, **kwargs): # MockPool gets a 'client' arg, regular pools don't. Weakref it to # avoid cycle with __del__, causing ResourceWarnings in Python 3.3. self.client = weakref.proxy(client) self.mock_host, self.mock_port = pair # Actually connect to the default server. Pool.__init__(self, (client_context.host, client_context.port), *args, **kwargs)
def __init__(self, client, pair, *args, **kwargs): # MockPool gets a 'client' arg, regular pools don't. Weakref it to # avoid cycle with __del__, causing ResourceWarnings in Python 3.3. self.client = weakref.proxy(client) self.mock_host, self.mock_port = pair # Actually connect to the default server. Pool.__init__(self, (default_host, default_port), *args, **kwargs)
def create_pool(self, pair=(client_context.host, client_context.port), *args, **kwargs): # Start the pool with the correct ssl options. pool_options = client_context.client._topology_settings.pool_options kwargs['ssl_context'] = pool_options.ssl_context kwargs['ssl_match_hostname'] = pool_options.ssl_match_hostname kwargs['server_api'] = pool_options.server_api pool = Pool(pair, PoolOptions(*args, **kwargs)) pool.ready() return pool
def __init__(self, client, pair, *args, **kwargs): # MockPool gets a 'client' arg, regular pools don't. self.client = client self.mock_host, self.mock_port = pair # Actually connect to the default server. Pool.__init__(self, pair=(default_host, default_port), max_size=None, net_timeout=None, conn_timeout=20, use_ssl=False, use_greenlets=False)
def __init__(self, client, pair, *args, **kwargs): # MockPool gets a 'client' arg, regular pools don't. self.client = client self.mock_host, self.mock_port = pair # Actually connect to the default server. Pool.__init__( self, pair=(default_host, default_port), max_size=None, net_timeout=None, conn_timeout=20, use_ssl=False, use_greenlets=False)
def __init__(self, pair, *args, **kwargs): if pair: # RS client passes 'pair' to Pool's constructor. self.mock_host, self.mock_port = pair else: # MongoClient passes pair to get_socket() instead. self.mock_host, self.mock_port = None, None Pool.__init__( self, pair=(default_host, default_port), max_size=None, net_timeout=None, conn_timeout=20, use_ssl=False, use_greenlets=False)
def test_pool_reuses_open_socket(self): # Test Pool's _check_closed() method doesn't close a healthy socket cx_pool = Pool((host,port), 10, None, None, False) sock_info = cx_pool.get_socket() cx_pool.return_socket(sock_info) # trigger _check_closed, which only runs on sockets that haven't been # used in a second time.sleep(1) new_sock_info = cx_pool.get_socket() self.assertEqual(sock_info, new_sock_info) del sock_info, new_sock_info # Assert sock_info was returned to the pool *once* force_reclaim_sockets(cx_pool, 1) self.assertEqual(1, len(cx_pool.sockets))
def test_pool_removes_dead_socket(self): # Test that Pool removes dead socket and the socket doesn't return # itself PYTHON-344 cx_pool = Pool((host,port), 10, None, None, False) sock_info = cx_pool.get_socket() # Simulate a closed socket without telling the SocketInfo it's closed sock_info.sock.close() self.assertTrue(pymongo.pool._closed(sock_info.sock)) cx_pool.return_socket(sock_info) time.sleep(1) # trigger _check_closed new_sock_info = cx_pool.get_socket() self.assertEqual(0, len(cx_pool.sockets)) self.assertNotEqual(sock_info, new_sock_info) del sock_info, new_sock_info # new_sock_info returned to the pool, but not the closed sock_info force_reclaim_sockets(cx_pool, 1) self.assertEqual(1, len(cx_pool.sockets))
def test_max_pool_size_with_connection_failure(self): # The pool acquires its semaphore before attempting to connect; ensure # it releases the semaphore on connection failure. test_pool = Pool( ("example.com", 27017), PoolOptions(max_pool_size=1, connect_timeout=1, socket_timeout=1, wait_queue_timeout=1), ) # First call to get_socket fails; if pool doesn't release its semaphore # then the second call raises "ConnectionFailure: Timed out waiting for # socket from pool" instead of AutoReconnect. for i in range(2): with self.assertRaises(AutoReconnect) as context: with test_pool.get_socket({}, checkout=True): pass # Testing for AutoReconnect instead of ConnectionFailure, above, # is sufficient right *now* to catch a semaphore leak. But that # seems error-prone, so check the message too. self.assertNotIn("waiting for socket from pool", str(context.exception))
def get_socket(self, force=False): client = self.client host_and_port = '%s:%s' % (self.mock_host, self.mock_port) if host_and_port in client.mock_down_hosts: raise socket.error('mock error') assert host_and_port in ( client.mock_standalones + client.mock_members + client.mock_mongoses), "bad host: %s" % host_and_port sock_info = Pool.get_socket(self, force) sock_info.mock_host = self.mock_host sock_info.mock_port = self.mock_port return sock_info
def get_socket(self, all_credentials, checkout=False): client = self.client host_and_port = '%s:%s' % (self.mock_host, self.mock_port) if host_and_port in client.mock_down_hosts: raise AutoReconnect('mock error') assert host_and_port in ( client.mock_standalones + client.mock_members + client.mock_mongoses), "bad host: %s" % host_and_port with Pool.get_socket(self, all_credentials) as sock_info: sock_info.mock_host = self.mock_host sock_info.mock_port = self.mock_port yield sock_info
def test_request(self): # Check that Pool gives two different sockets in two calls to # get_socket() -- doesn't automatically put us in a request any more cx_pool = Pool( pair=(host,port), max_size=10, net_timeout=1000, conn_timeout=1000, use_ssl=False ) sock0 = cx_pool.get_socket() sock1 = cx_pool.get_socket() self.assertNotEqual(sock0, sock1) # Now in a request, we'll get the same socket both times cx_pool.start_request() sock2 = cx_pool.get_socket() sock3 = cx_pool.get_socket() self.assertEqual(sock2, sock3) # Pool didn't keep reference to sock0 or sock1; sock2 and 3 are new self.assertNotEqual(sock0, sock2) self.assertNotEqual(sock1, sock2) # Return the request sock to pool cx_pool.end_request() sock4 = cx_pool.get_socket() sock5 = cx_pool.get_socket() # Not in a request any more, we get different sockets self.assertNotEqual(sock4, sock5) # end_request() returned sock2 to pool self.assertEqual(sock4, sock2)
def test_pool_removes_dead_socket_after_request(self): # Test that Pool keeps handles a socket dying that *used* to be the # request socket. cx_pool = Pool((host,port), 10, None, None, False) cx_pool.start_request() # Get the request socket sock_info = cx_pool.get_socket() self.assertEqual(sock_info, cx_pool._get_request_state()) # End request cx_pool.end_request() self.assertEqual(1, len(cx_pool.sockets)) # Kill old request socket sock_info.sock.close() del sock_info time.sleep(1) # trigger _check_closed # Dead socket detected and removed new_sock_info = cx_pool.get_socket() self.assertEqual(0, len(cx_pool.sockets)) self.assertFalse(pymongo.pool._closed(new_sock_info.sock))
def get_pool(self, *args, **kwargs): kwargs['use_greenlets'] = self.use_greenlets return Pool(*args, **kwargs)
def test_pool_removes_dead_request_socket(self): # Test that Pool keeps request going even if a socket dies in request cx_pool = Pool((host,port), 10, None, None, False) cx_pool.start_request() # Get the request socket sock_info = cx_pool.get_socket() self.assertEqual(0, len(cx_pool.sockets)) self.assertEqual(sock_info, cx_pool._get_request_state()) sock_info.sock.close() cx_pool.return_socket(sock_info) time.sleep(1) # trigger _check_closed # Although the request socket died, we're still in a request with a # new socket new_sock_info = cx_pool.get_socket() self.assertNotEqual(sock_info, new_sock_info) self.assertEqual(new_sock_info, cx_pool._get_request_state()) cx_pool.return_socket(new_sock_info) self.assertEqual(new_sock_info, cx_pool._get_request_state()) self.assertEqual(0, len(cx_pool.sockets)) cx_pool.end_request() self.assertEqual(1, len(cx_pool.sockets))
def get_socket(self, pair=None, force=False): sock_info = Pool.get_socket(self, (default_host, default_port), force) sock_info.host = self.mock_host or pair[0] return sock_info
def create_pool(self, pair=(client_context.host, client_context.port), *args, **kwargs): return Pool(pair, PoolOptions(*args, **kwargs))
def test_reset_and_request(self): # reset() is called after a fork, or after a socket error. Ensure that # a new request is begun if a request was in progress when the reset() # occurred, otherwise no request is begun. p = Pool((host, port), 10, None, None, False) self.assertFalse(p.in_request()) p.start_request() self.assertTrue(p.in_request()) p.reset() self.assertTrue(p.in_request()) p.end_request() self.assertFalse(p.in_request()) p.reset() self.assertFalse(p.in_request())
def create_pool(self, pair=(host, port), *args, **kwargs): return Pool(pair, PoolOptions(*args, **kwargs))