def test_multiple_connections(self): a = self.get_connection(auto_start_request=False) b = self.get_connection(auto_start_request=False) self.assertEqual(1, len(a._MongoClient__pool.sockets)) self.assertEqual(1, len(b._MongoClient__pool.sockets)) a.start_request() a.pymongo_test.test.find_one() self.assertEqual(0, len(a._MongoClient__pool.sockets)) a.end_request() self.assertEqual(1, len(a._MongoClient__pool.sockets)) self.assertEqual(1, len(b._MongoClient__pool.sockets)) a_sock = one(a._MongoClient__pool.sockets) b.end_request() self.assertEqual(1, len(a._MongoClient__pool.sockets)) self.assertEqual(1, len(b._MongoClient__pool.sockets)) b.start_request() b.pymongo_test.test.find_one() self.assertEqual(1, len(a._MongoClient__pool.sockets)) self.assertEqual(0, len(b._MongoClient__pool.sockets)) b.end_request() b_sock = one(b._MongoClient__pool.sockets) b.pymongo_test.test.find_one() a.pymongo_test.test.find_one() self.assertEqual(b_sock, b._MongoClient__pool.get_socket((b.host, b.port))) self.assertEqual(a_sock, a._MongoClient__pool.get_socket((a.host, a.port))) a_sock.close() b_sock.close()
def test_multiple_connections(self): a = self.get_client(auto_start_request=False) b = self.get_client(auto_start_request=False) self.assertEqual(1, len(get_pool(a).sockets)) self.assertEqual(1, len(get_pool(b).sockets)) a.start_request() a.pymongo_test.test.find_one() self.assertEqual(0, len(get_pool(a).sockets)) a.end_request() self.assertEqual(1, len(get_pool(a).sockets)) self.assertEqual(1, len(get_pool(b).sockets)) a_sock = one(get_pool(a).sockets) b.end_request() self.assertEqual(1, len(get_pool(a).sockets)) self.assertEqual(1, len(get_pool(b).sockets)) b.start_request() b.pymongo_test.test.find_one() self.assertEqual(1, len(get_pool(a).sockets)) self.assertEqual(0, len(get_pool(b).sockets)) b.end_request() b_sock = one(get_pool(b).sockets) b.pymongo_test.test.find_one() a.pymongo_test.test.find_one() self.assertEqual(b_sock, get_pool(b).get_socket()) self.assertEqual(a_sock, get_pool(a).get_socket()) a_sock.close() b_sock.close()
def run_mongo_thread(self): pool = get_pool(self.client) assert len(pool.sockets) == 1, "Expected 1 socket, found %d" % (len( pool.sockets)) sock_info = one(pool.sockets) self.client.start_request() # start_request() hasn't yet moved the socket from the general pool into # the request assert len(pool.sockets) == 1 assert one(pool.sockets) == sock_info self.client[DB].test.find_one() # find_one() causes the socket to be used in the request, so now it's # bound to this thread assert len(pool.sockets) == 0 assert pool._get_request_state() == sock_info self.client.end_request() # The socket is back in the pool assert len(pool.sockets) == 1 assert one(pool.sockets) == sock_info
def test_multiple_connections(self): a = self.get_client(auto_start_request=False) b = self.get_client(auto_start_request=False) self.assertEqual(1, len(a._MongoClient__pool.sockets)) self.assertEqual(1, len(b._MongoClient__pool.sockets)) a.start_request() a.pymongo_test.test.find_one() self.assertEqual(0, len(a._MongoClient__pool.sockets)) a.end_request() self.assertEqual(1, len(a._MongoClient__pool.sockets)) self.assertEqual(1, len(b._MongoClient__pool.sockets)) a_sock = one(a._MongoClient__pool.sockets) b.end_request() self.assertEqual(1, len(a._MongoClient__pool.sockets)) self.assertEqual(1, len(b._MongoClient__pool.sockets)) b.start_request() b.pymongo_test.test.find_one() self.assertEqual(1, len(a._MongoClient__pool.sockets)) self.assertEqual(0, len(b._MongoClient__pool.sockets)) b.end_request() b_sock = one(b._MongoClient__pool.sockets) b.pymongo_test.test.find_one() a.pymongo_test.test.find_one() self.assertEqual(b_sock, b._MongoClient__pool.get_socket((b.host, b.port))) self.assertEqual(a_sock, a._MongoClient__pool.get_socket((a.host, a.port))) a_sock.close() b_sock.close()
def run_mongo_thread(self): pool = get_pool(self.client) assert len(pool.sockets) == 1, "Expected 1 socket, found %d" % ( len(pool.sockets) ) sock_info = one(pool.sockets) self.client.start_request() # start_request() hasn't yet moved the socket from the general pool into # the request assert len(pool.sockets) == 1 assert one(pool.sockets) == sock_info self.client[DB].test.find_one() # find_one() causes the socket to be used in the request, so now it's # bound to this thread assert len(pool.sockets) == 0 assert pool._get_request_state() == sock_info self.client.end_request() # The socket is back in the pool assert len(pool.sockets) == 1 assert one(pool.sockets) == sock_info
def test_atexit_hook(self): client = single_client(client_context.host, client_context.port) executor = one(client._topology._servers.values())._monitor._executor connected(client) # The executor stores a weakref to itself in _EXECUTORS. ref = one([r for r in _EXECUTORS.copy() if r() is executor]) del executor del client wait_until(partial(unregistered, ref), 'unregister executor', timeout=5)
def test_timeout_does_not_mark_member_down(self): # If a query times out, the RS client shouldn't mark the member "down". c = self._get_client(socketTimeoutMS=3000) collection = c.pymongo_test.test collection.insert({}, w=self.w) # Query the primary. self.assertRaises(ConnectionFailure, collection.find_one, {'$where': delay(5)}) # primary_member returns None if primary is marked "down". rs_state = c._MongoReplicaSetClient__rs_state self.assertTrue(rs_state.primary_member) collection.find_one() # No error. # Query the secondary. self.assertRaises(ConnectionFailure, collection.find_one, {'$where': delay(5)}, read_preference=SECONDARY) rs_state = c._MongoReplicaSetClient__rs_state secondary_host = one(rs_state.secondaries) self.assertTrue(rs_state.get(secondary_host)) collection.find_one(read_preference=SECONDARY) # No error.
def test_auth_network_error(self): # Make sure there's no semaphore leak if we get a network error # when authenticating a new socket with cached credentials. auth_client = self._get_client() if not server_started_with_auth(auth_client): raise SkipTest('Authentication is not enabled on server') auth_client.admin.add_user('admin', 'password') auth_client.admin.authenticate('admin', 'password') try: # Get a client with one socket so we detect if it's leaked. c = self._get_client(max_pool_size=1, waitQueueTimeoutMS=1) # Simulate an authenticate() call on a different socket. credentials = auth._build_credentials_tuple( 'MONGODB-CR', 'admin', unicode('admin'), unicode('password'), {}) c._cache_credentials('test', credentials, connect=False) # Cause a network error on the actual socket. pool = get_pool(c) socket_info = one(pool.sockets) socket_info.sock.close() # In __check_auth, the client authenticates its socket with the # new credential, but gets a socket.error. Should be reraised as # AutoReconnect. self.assertRaises(AutoReconnect, c.test.collection.find_one) # No semaphore leak, the pool is allowed to make a new socket. c.test.collection.find_one() finally: remove_all_users(auth_client.admin)
def test_stepdown_triggers_refresh(self): c_find_one = yield motor.MotorReplicaSetClient( self.seed, replicaSet=self.name).open() # We've started the primary and one secondary primary = ha_tools.get_primary() secondary = ha_tools.get_secondaries()[0] self.assertEqual(one(c_find_one.secondaries), _partition_node(secondary)) ha_tools.stepdown_primary() # Make sure the stepdown completes yield self.pause(1) # Trigger a refresh with assert_raises(AutoReconnect): yield c_find_one.test.test.find_one() # Wait for the immediate refresh to complete - we're not waiting for # the periodic refresh, which has been disabled yield self.pause(1) # We've detected the stepdown self.assertTrue(not c_find_one.primary or primary != c_find_one.primary)
def test_timeout_does_not_mark_member_down(self): # If a query times out, the RS client shouldn't mark the member "down". c = self._get_client(socketTimeoutMS=3000) collection = c.pymongo_test.test collection.insert({}, w=self.w) # Query the primary. self.assertRaises( ConnectionFailure, collection.find_one, {'$where': delay(5)}) # primary_member returns None if primary is marked "down". rs_state = c._MongoReplicaSetClient__rs_state self.assertTrue(rs_state.primary_member) collection.find_one() # No error. # Query the secondary. self.assertRaises( ConnectionFailure, collection.find_one, {'$where': delay(5)}, read_preference=SECONDARY) rs_state = c._MongoReplicaSetClient__rs_state secondary_host = one(rs_state.secondaries) self.assertTrue(rs_state.get(secondary_host)) collection.find_one(read_preference=SECONDARY) # No error.
def test_auth_network_error(self): # Make sure there's no semaphore leak if we get a network error # when authenticating a new socket with cached credentials. # Get a client with one socket so we detect if it's leaked. c = connected(rs_or_single_client(maxPoolSize=1, waitQueueTimeoutMS=1)) # Simulate an authenticate() call on a different socket. credentials = auth._build_credentials_tuple( 'DEFAULT', 'admin', db_user, db_pwd, {}) c._cache_credentials('test', credentials, connect=False) # Cause a network error on the actual socket. pool = get_pool(c) socket_info = one(pool.sockets) socket_info.sock.close() # SocketInfo.check_auth logs in with the new credential, but gets a # socket.error. Should be reraised as AutoReconnect. self.assertRaises(AutoReconnect, c.test.collection.find_one) # No semaphore leak, the pool is allowed to make a new socket. c.test.collection.find_one()
def test_get_default_database_with_authsource(self): # Ensure we distinguish database name from authSource. host = one(self.hosts) uri = "mongodb://%s:%d/foo?replicaSet=%s&authSource=src" % (host[0], host[1], self.name) c = MongoReplicaSetClient(uri, _connect=False) self.assertEqual(Database(c, "foo"), c.get_default_database())
def test_get_default_database_error(self): host = one(self.hosts) # URI with no database. uri = "mongodb://%s:%d/?replicaSet=%s" % (host[0], host[1], self.name) c = MongoReplicaSetClient(uri, _connect=False) self.assertRaises(ConfigurationError, c.get_default_database)
def test_auth_network_error(self): if not test.env.auth: raise SkipTest('Authentication is not enabled on server') # Make sure there's no semaphore leak if we get a network error # when authenticating a new socket with cached credentials. # Get a client with one socket so we detect if it's leaked. c = self.motor_rsc(max_pool_size=1, waitQueueTimeoutMS=1) yield c.open() # Simulate an authenticate() call on a different socket. credentials = pymongo.auth._build_credentials_tuple( 'DEFAULT', 'admin', text_type(db_user), text_type(db_password), {}) c.delegate._cache_credentials('test', credentials, connect=False) # Cause a network error on the actual socket. pool = c._get_primary_pool() socket_info = one(pool.sockets) socket_info.sock.close() # In __check_auth, the client authenticates its socket with the # new credential, but gets a socket.error. Should be reraised as # AutoReconnect. with self.assertRaises(pymongo.errors.AutoReconnect): yield c.test.collection.find_one() # No semaphore leak, the pool is allowed to make a new socket. yield c.test.collection.find_one()
def test_recovering_member_triggers_refresh(self): # To test that find_one() and count() trigger immediate refreshes, # we'll create a separate client for each self.c_find_one, self.c_count = yield [ motor.MotorReplicaSetClient( self.seed, replicaSet=self.name, read_preference=SECONDARY ).open() for _ in xrange(2)] # We've started the primary and one secondary primary = ha_tools.get_primary() secondary = ha_tools.get_secondaries()[0] # Pre-condition: just make sure they all connected OK for c in self.c_find_one, self.c_count: self.assertEqual(one(c.secondaries), _partition_node(secondary)) ha_tools.set_maintenance(secondary, True) # Trigger a refresh in various ways with assert_raises(AutoReconnect): yield self.c_find_one.test.test.find_one() with assert_raises(AutoReconnect): yield self.c_count.test.test.count() # Wait for the immediate refresh to complete - we're not waiting for # the periodic refresh, which has been disabled yield self.pause(1) for c in self.c_find_one, self.c_count: self.assertFalse(c.secondaries) self.assertEqual(_partition_node(primary), c.primary)
def test_get_default_database(self): host = one(self.hosts) uri = "mongodb://%s:%d/foo?replicaSet=%s" % ( host[0], host[1], self.name) c = MongoReplicaSetClient(uri, _connect=False) self.assertEqual(Database(c, 'foo'), c.get_default_database())
def test_auth_network_error(self): # Make sure there's no semaphore leak if we get a network error # when authenticating a new socket with cached credentials. # Get a client with one socket so we detect if it's leaked. # Generous wait queue timeout in case the main thread contends # with the monitor, though -- a semaphore leak will be detected # eventually, even with a long timeout. c = self._get_client(max_pool_size=1, waitQueueTimeoutMS=10000) # Simulate an authenticate() call on a different socket. credentials = auth._build_credentials_tuple( 'DEFAULT', 'admin', unicode(db_user), unicode(db_pwd), {}) c._cache_credentials('test', credentials, connect=False) # Cause a network error on the actual socket. pool = get_pool(c) socket_info = one(pool.sockets) socket_info.sock.close() # In __check_auth, the client authenticates its socket with the # new credential, but gets a socket.error. Reraised as AutoReconnect, # unless periodic monitoring or Pool._check prevent the error. try: c.test.collection.find_one() except AutoReconnect: pass # No semaphore leak, the pool is allowed to make a new socket. c.test.collection.find_one()
def test_init_disconnected_with_auth(self): c = self._get_client() c.admin.system.users.remove({}) c.pymongo_test.system.users.remove({}) try: c.admin.add_user("admin", "pass") c.admin.authenticate("admin", "pass") c.pymongo_test.add_user("user", "pass") # Auth with lazy connection. host = one(self.hosts) uri = "mongodb://*****:*****@%s:%d/pymongo_test?replicaSet=%s" % (host[0], host[1], self.name) authenticated_client = MongoReplicaSetClient(uri, _connect=False) authenticated_client.pymongo_test.test.find_one() # Wrong password. bad_uri = "mongodb://*****:*****@%s:%d/pymongo_test?replicaSet=%s" % (host[0], host[1], self.name) bad_client = MongoReplicaSetClient(bad_uri, _connect=False) self.assertRaises(OperationFailure, bad_client.pymongo_test.test.find_one) finally: # Clean up. c.admin.system.users.remove({}) c.pymongo_test.system.users.remove({})
def test_init_disconnected_with_auth(self): c = self._get_client() if not server_started_with_auth(c): raise SkipTest('Authentication is not enabled on server') c.admin.add_user("admin", "pass") c.admin.authenticate("admin", "pass") try: c.pymongo_test.add_user("user", "pass", roles=['readWrite', 'userAdmin']) # Auth with lazy connection. host = one(self.hosts) uri = "mongodb://*****:*****@%s:%d/pymongo_test?replicaSet=%s" % ( host[0], host[1], self.name) authenticated_client = MongoReplicaSetClient(uri, _connect=False) authenticated_client.pymongo_test.test.find_one() # Wrong password. bad_uri = "mongodb://*****:*****@%s:%d/pymongo_test?replicaSet=%s" % ( host[0], host[1], self.name) bad_client = MongoReplicaSetClient(bad_uri, _connect=False) self.assertRaises(OperationFailure, bad_client.pymongo_test.test.find_one) finally: # Clean up. remove_all_users(c.pymongo_test) remove_all_users(c.admin)
def test_recovering_member_triggers_refresh(self): # To test that find_one() and count() trigger immediate refreshes, # we'll create a separate client for each self.c_find_one, self.c_count = yield [ motor.MotorReplicaSetClient(self.seed, replicaSet=self.name, read_preference=SECONDARY).open() for _ in range(2) ] # We've started the primary and one secondary primary = ha_tools.get_primary() secondary = ha_tools.get_secondaries()[0] # Pre-condition: just make sure they all connected OK for c in self.c_find_one, self.c_count: self.assertEqual(one(c.secondaries), _partition_node(secondary)) ha_tools.set_maintenance(secondary, True) # Trigger a refresh in various ways with assert_raises(AutoReconnect): yield self.c_find_one.test.test.find_one() with assert_raises(AutoReconnect): yield self.c_count.test.test.count() # Wait for the immediate refresh to complete - we're not waiting for # the periodic refresh, which has been disabled yield self.pause(1) for c in self.c_find_one, self.c_count: self.assertFalse(c.secondaries) self.assertEqual(_partition_node(primary), c.primary)
def test_stepdown_triggers_refresh(self): c_find_one = MongoReplicaSetClient(self.seed, replicaSet=self.name, use_greenlets=use_greenlets) # We've started the primary and one secondary primary = ha_tools.get_primary() secondary = ha_tools.get_secondaries()[0] self.assertEqual(one(c_find_one.secondaries), _partition_node(secondary)) ha_tools.stepdown_primary() # Make sure the stepdown completes sleep(1) # Trigger a refresh self.assertRaises(AutoReconnect, c_find_one.test.test.find_one) # Wait for the immediate refresh to complete - we're not waiting for # the periodic refresh, which has been disabled sleep(1) # We've detected the stepdown self.assertTrue(not c_find_one.primary or primary != _partition_node(c_find_one.primary))
def test_init_disconnected_with_auth(self): c = self._get_client() c.admin.system.users.remove({}) c.pymongo_test.system.users.remove({}) try: c.admin.add_user("admin", "pass") c.admin.authenticate("admin", "pass") c.pymongo_test.add_user("user", "pass") # Auth with lazy connection. host = one(self.hosts) uri = "mongodb://*****:*****@%s:%d/pymongo_test?replicaSet=%s" % ( host[0], host[1], self.name) authenticated_client = MongoReplicaSetClient(uri, _connect=False) authenticated_client.pymongo_test.test.find_one() # Wrong password. bad_uri = "mongodb://*****:*****@%s:%d/pymongo_test?replicaSet=%s" % ( host[0], host[1], self.name) bad_client = MongoReplicaSetClient(bad_uri, _connect=False) self.assertRaises( OperationFailure, bad_client.pymongo_test.test.find_one) finally: # Clean up. c.admin.system.users.remove({}) c.pymongo_test.system.users.remove({})
def test_get_default_database(self): host = one(self.hosts) uri = "mongodb://%s:%d/foo?replicaSet=%s" % (host[0], host[1], self.name) c = MongoReplicaSetClient(uri, _connect=False) self.assertEqual(Database(c, 'foo'), c.get_default_database())
def test_auth_network_error(self): if not test.env.auth: raise SkipTest('Authentication is not enabled on server') # Make sure there's no semaphore leak if we get a network error # when authenticating a new socket with cached credentials. # Get a client with one socket so we detect if it's leaked. c = self.motor_rsc(maxPoolSize=1, waitQueueTimeoutMS=1) yield c.admin.command('ismaster') # Simulate an authenticate() call on a different socket. credentials = pymongo.auth._build_credentials_tuple( 'DEFAULT', 'admin', text_type(db_user), text_type(db_password), {}, 'admin') c.delegate._cache_credentials('test', credentials, connect=False) # Cause a network error on the actual socket. pool = get_primary_pool(c) socket_info = one(pool.sockets) socket_info.sock.close() # In __check_auth, the client authenticates its socket with the # new credential, but gets a socket.error. Should be reraised as # AutoReconnect. with self.assertRaises(pymongo.errors.AutoReconnect): yield c.test.collection.find_one() # No semaphore leak, the pool is allowed to make a new socket. yield c.test.collection.find_one()
def test_stepdown_triggers_refresh(self, done): c_find_one = motor.MotorReplicaSetClient( self.seed, replicaSet=self.name).open_sync() # We've started the primary and one secondary primary = ha_tools.get_primary() secondary = ha_tools.get_secondaries()[0] self.assertEqual( one(c_find_one.secondaries), _partition_node(secondary)) ha_tools.stepdown_primary() # Make sure the stepdown completes yield gen.Task(IOLoop.instance().add_timeout, time.time() + 1) # Trigger a refresh yield AssertRaises(AutoReconnect, c_find_one.test.test.find_one) # Wait for the immediate refresh to complete - we're not waiting for # the periodic refresh, which has been disabled yield gen.Task(IOLoop.instance().add_timeout, time.time() + 1) # We've detected the stepdown self.assertTrue( not c_find_one.primary or primary != _partition_node(c_find_one.primary)) done()
def test_init_disconnected_with_auth(self): c = self._get_client() if not server_started_with_auth(c): raise SkipTest('Authentication is not enabled on server') c.admin.add_user("admin", "pass") c.admin.authenticate("admin", "pass") try: c.pymongo_test.add_user("user", "pass", roles=['readWrite', 'userAdmin']) # Auth with lazy connection. host = one(self.hosts) uri = "mongodb://*****:*****@%s:%d/pymongo_test?replicaSet=%s" % ( host[0], host[1], self.name) authenticated_client = MongoReplicaSetClient(uri, _connect=False) authenticated_client.pymongo_test.test.find_one() # Wrong password. bad_uri = "mongodb://*****:*****@%s:%d/pymongo_test?replicaSet=%s" % ( host[0], host[1], self.name) bad_client = MongoReplicaSetClient(bad_uri, _connect=False) self.assertRaises( OperationFailure, bad_client.pymongo_test.test.find_one) finally: # Clean up. remove_all_users(c.pymongo_test) remove_all_users(c.admin)
def test_stepdown_triggers_refresh(self): c_find_one = yield motor.MotorReplicaSetClient( self.seed, replicaSet=self.name).open() # We've started the primary and one secondary primary = ha_tools.get_primary() secondary = ha_tools.get_secondaries()[0] self.assertEqual( one(c_find_one.secondaries), _partition_node(secondary)) ha_tools.stepdown_primary() # Make sure the stepdown completes yield self.pause(1) # Trigger a refresh with assert_raises(AutoReconnect): yield c_find_one.test.test.find_one() # Wait for the immediate refresh to complete - we're not waiting for # the periodic refresh, which has been disabled yield self.pause(1) # We've detected the stepdown self.assertTrue( not c_find_one.primary or primary != _partition_node(c_find_one.primary))
def test_stepdown_triggers_refresh(self): c_find_one = MongoReplicaSetClient( self.seed, replicaSet=self.name, use_greenlets=use_greenlets) # We've started the primary and one secondary primary = ha_tools.get_primary() secondary = ha_tools.get_secondaries()[0] self.assertEqual( one(c_find_one.secondaries), _partition_node(secondary)) ha_tools.stepdown_primary() # Make sure the stepdown completes sleep(1) # Trigger a refresh self.assertRaises(AutoReconnect, c_find_one.test.test.find_one) # Wait for the immediate refresh to complete - we're not waiting for # the periodic refresh, which has been disabled sleep(1) # We've detected the stepdown self.assertTrue( not c_find_one.primary or _partition_node(primary) != c_find_one.primary)
def test_get_default_database_with_authsource(self): # Ensure we distinguish database name from authSource. host = one(self.hosts) uri = "mongodb://%s:%d/foo?replicaSet=%s&authSource=src" % ( host[0], host[1], self.name) c = MongoReplicaSetClient(uri, _connect=False) self.assertEqual(Database(c, 'foo'), c.get_default_database())
def test_exhaust_query_server_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid counter leak. client = yield self._get_client(max_pool_size=1).open() collection = client.motor_test.test pool = client._get_primary_pool() sock_info = one(pool.sockets) # This will cause OperationFailure in all mongo versions since # the value for $orderby must be a document. cursor = collection.find( SON([('$query', {}), ('$orderby', True)]), exhaust=True) with self.assertRaises(pymongo.errors.OperationFailure): yield cursor.fetch_next self.assertFalse(sock_info.closed) self.assertEqual(sock_info, one(pool.sockets))
def test_exhaust_query_server_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid counter leak. client = yield self._get_client(max_pool_size=1).open() collection = client.motor_test.test pool = client._get_primary_pool() sock_info = one(pool.sockets) # This will cause OperationFailure in all mongo versions since # the value for $orderby must be a document. cursor = collection.find(SON([('$query', {}), ('$orderby', True)]), exhaust=True) with self.assertRaises(pymongo.errors.OperationFailure): yield cursor.fetch_next self.assertFalse(sock_info.closed) self.assertEqual(sock_info, one(pool.sockets))
def test_check_socket(self): # Test that MotorPool._check(socket_info) replaces a closed socket # and doesn't leak a counter. yield from self.cx.open() pool = self.cx._get_primary_pool() pool._check_interval_seconds = 0 # Always check. counter = pool.motor_sock_counter sock_info = one(pool.sockets) sock_info.sock.close() pool.maybe_return_socket(sock_info) # New socket replaces closed one. yield from self.cx.server_info() sock_info2 = one(pool.sockets) self.assertNotEqual(sock_info, sock_info2) # Counter isn't leaked. self.assertEqual(counter, pool.motor_sock_counter)
def test_not_master_error(self): secondary_address = one(self.secondaries) direct_client = single_client(*secondary_address) with self.assertRaises(NotMasterError): direct_client.pymongo_test.collection.insert_one({}) db = direct_client.get_database( "pymongo_test", write_concern=WriteConcern(w=0)) with self.assertRaises(NotMasterError): db.collection.insert_one({})
def _test_exhaust_query_server_error(self, rs): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid counter leak. server = self.primary_or_standalone(rs=rs) client = motor_asyncio.AsyncIOMotorClient(server.uri, max_pool_size=1, io_loop=self.loop) yield from client.open() pool = client._get_primary_pool() sock_info = one(pool.sockets) cursor = client.db.collection.find(exhaust=True) fetch_next = self.fetch_next(cursor) request = yield from self.run_thread(server.receives, OpQuery) request.fail() with self.assertRaises(pymongo.errors.OperationFailure): yield from fetch_next self.assertFalse(sock_info.closed) self.assertEqual(sock_info, one(pool.sockets))
def test_not_master_error(self): secondary_address = one(self.secondaries) direct_client = single_client(*secondary_address) with self.assertRaises(NotMasterError): direct_client.pymongo_test.collection.insert_one({}) db = direct_client.get_database("pymongo_test", write_concern=WriteConcern(w=0)) with self.assertRaises(NotMasterError): db.collection.insert_one({})
def _test_exhaust_query_server_error(self, rs): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid counter leak. server = self.primary_or_standalone(rs=rs) client = motor.MotorClient(server.uri, maxPoolSize=1) yield client.admin.command('ismaster') pool = get_primary_pool(client) sock_info = one(pool.sockets) cursor = client.db.collection.find(cursor_type=CursorType.EXHAUST) # With Tornado, simply accessing fetch_next starts the fetch. fetch_next = cursor.fetch_next request = yield self.run_thread(server.receives, OpQuery) request.fail() with self.assertRaises(pymongo.errors.OperationFailure): yield fetch_next self.assertFalse(sock_info.closed) self.assertEqual(sock_info, one(pool.sockets))
def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. secondary_host, secondary_port = one(self.secondaries) client = single_client(secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False) # Still no connection. fs = gridfs.GridFS(client.test_gridfs_secondary_lazy) # Connects, doesn't create index. self.assertRaises(NoFile, fs.get_last_version) self.assertRaises(ConnectionFailure, fs.put, "data")
def test_gridfs_secondary(self): secondary_host, secondary_port = one(self.client.secondaries) secondary_connection = single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY) # Should detect it's connected to secondary and not attempt to # create index fs = gridfs.GridFS(secondary_connection.gfsreplica, 'gfssecondarytest') # This won't detect secondary, raises error self.assertRaises(NotMasterError, fs.put, b'foo')
def test_gridfs_secondary(self): secondary_host, secondary_port = one(self.client.secondaries) secondary_connection = single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY) # Should detect it's connected to secondary and not attempt to # create index gfs = gridfs.GridFSBucket( secondary_connection.gfsbucketreplica, 'gfsbucketsecondarytest') # This won't detect secondary, raises error self.assertRaises(NotPrimaryError, gfs.upload_from_stream, "test_filename", b'foo')
def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. secondary_host, secondary_port = one(self.client.secondaries) client = single_client(secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False) # Still no connection. fs = gridfs.GridFS(client.gfsreplica, 'gfssecondarylazytest') # Connects, doesn't create index. self.assertRaises(NoFile, fs.get_last_version) self.assertRaises(NotMasterError, fs.put, 'data')
def test_gridfs_secondary(self): primary_host, primary_port = self.primary primary_connection = single_client(primary_host, primary_port) secondary_host, secondary_port = one(self.secondaries) secondary_connection = single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY) # Should detect it's connected to secondary and not attempt to # create index fs = gridfs.GridFS(secondary_connection.gfsreplica, 'gfssecondarytest') # This won't detect secondary, raises error self.assertRaises(ConnectionFailure, fs.put, b'foo')
def test_gridfs_secondary(self): primary_host, primary_port = self.primary primary_connection = single_client(primary_host, primary_port) secondary_host, secondary_port = one(self.secondaries) secondary_connection = single_client(secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY) primary_connection.pymongo_test.drop_collection("fs.files") primary_connection.pymongo_test.drop_collection("fs.chunks") # Should detect it's connected to secondary and not attempt to # create index fs = gridfs.GridFS(secondary_connection.pymongo_test) # This won't detect secondary, raises error self.assertRaises(ConnectionFailure, fs.put, b"foo")
def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. secondary_host, secondary_port = one(self.secondaries) client = single_client(secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False) # Still no connection. gfs = gridfs.GridFSBucket(client.test_gridfs_secondary_lazy) # Connects, doesn't create index. self.assertRaises(NoFile, gfs.open_download_stream_by_name, "test_filename") self.assertRaises(ConnectionFailure, gfs.upload_from_stream, "test_filename", b'data')
def test_gridfs_secondary(self): primary_host, primary_port = self.primary primary_connection = single_client(primary_host, primary_port) secondary_host, secondary_port = one(self.secondaries) secondary_connection = single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY) primary_connection.pymongo_test.drop_collection("fs.files") primary_connection.pymongo_test.drop_collection("fs.chunks") # Should detect it's connected to secondary and not attempt to # create index fs = gridfs.GridFS(secondary_connection.pymongo_test) # This won't detect secondary, raises error self.assertRaises(ConnectionFailure, fs.put, b'foo')
def test_exhaust_query_network_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid counter leak. client = yield self._get_client(max_pool_size=1).open() collection = client.motor_test.test pool = client._get_primary_pool() pool._check_interval_seconds = None # Never check. # Cause a network error. sock_info = one(pool.sockets) sock_info.sock.close() cursor = collection.find(exhaust=True) with self.assertRaises(pymongo.errors.ConnectionFailure): yield cursor.fetch_next self.assertTrue(sock_info.closed) del cursor self.assertNotIn(sock_info, pool.sockets)
def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. secondary_host, secondary_port = one(self.secondaries) client = single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False) # Still no connection. gfs = gridfs.GridFSBucket(client.test_gridfs_secondary_lazy) # Connects, doesn't create index. self.assertRaises(NoFile, gfs.open_download_stream_by_name, "test_filename") self.assertRaises(ConnectionFailure, gfs.upload_from_stream, "test_filename", b'data')
def test_exhaust_query_server_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. client = connected(rs_or_single_client(maxPoolSize=1)) collection = client.pymongo_test.test pool = get_pool(client) sock_info = one(pool.sockets) # This will cause OperationFailure in all mongo versions since # the value for $orderby must be a document. cursor = collection.find(SON([("$query", {}), ("$orderby", True)]), cursor_type=CursorType.EXHAUST) self.assertRaises(OperationFailure, cursor.next) self.assertFalse(sock_info.closed) # The socket was checked in and the semaphore was decremented. self.assertIn(sock_info, pool.sockets) self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
def test_exhaust_query_network_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. client = connected(rs_or_single_client(maxPoolSize=1)) collection = client.pymongo_test.test pool = get_pool(client) pool._check_interval_seconds = None # Never check. # Cause a network error. sock_info = one(pool.sockets) sock_info.sock.close() cursor = collection.find(cursor_type=CursorType.EXHAUST) self.assertRaises(ConnectionFailure, cursor.next) self.assertTrue(sock_info.closed) # The socket was closed and the semaphore was decremented. self.assertNotIn(sock_info, pool.sockets) self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
def test_exhaust_query_server_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. client = connected(rs_or_single_client(maxPoolSize=1)) collection = client.pymongo_test.test pool = get_pool(client) sock_info = one(pool.sockets) # This will cause OperationFailure in all mongo versions since # the value for $orderby must be a document. cursor = collection.find(SON([('$query', {}), ('$orderby', True)]), cursor_type=CursorType.EXHAUST) self.assertRaises(OperationFailure, cursor.next) self.assertFalse(sock_info.closed) # The socket was checked in and the semaphore was decremented. self.assertIn(sock_info, pool.sockets) self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
def test_recovering_member_triggers_refresh(self): # To test that find_one() and count() trigger immediate refreshes, # we'll create a separate client for each self.c_find_one, self.c_count = [ MongoClient( self.seed, replicaSet=self.name, read_preference=SECONDARY, serverSelectionTimeoutMS=self.server_selection_timeout) for _ in xrange(2)] # We've started the primary and one secondary primary = ha_tools.get_primary() secondary = ha_tools.get_secondaries()[0] # Pre-condition: just make sure they all connected OK for c in self.c_find_one, self.c_count: wait_until( lambda: c.primary == partition_node(primary), 'connect to the primary') wait_until( lambda: one(c.secondaries) == partition_node(secondary), 'connect to the secondary') ha_tools.set_maintenance(secondary, True) # Trigger a refresh in various ways self.assertRaises(AutoReconnect, self.c_find_one.test.test.find_one) self.assertRaises(AutoReconnect, self.c_count.test.test.count) # Wait for the immediate refresh to complete - we're not waiting for # the periodic refresh, which has been disabled time.sleep(1) self.assertFalse(self.c_find_one.secondaries) self.assertEqual(partition_node(primary), self.c_find_one.primary) self.assertFalse(self.c_count.secondaries) self.assertEqual(partition_node(primary), self.c_count.primary)