def test(self): server = MockupDB() self.addCleanup(server.stop) server.run() server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', minWireVersion=2, maxWireVersion=6) pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = MongoClient(server.uri, read_preference=pref) self.addCleanup(client.close) with going(operation.function, client): request = server.receive() request.reply(operation.reply) if operation.op_type == 'always-use-secondary': self.assertEqual(ReadPreference.SECONDARY.document, request.doc.get('$readPreference')) slave_ok = mode != 'primary' elif operation.op_type == 'must-use-primary': slave_ok = False elif operation.op_type == 'may-use-secondary': slave_ok = mode != 'primary' self.assertEqual(pref.document, request.doc.get('$readPreference')) else: self.fail('unrecognized op_type %r' % operation.op_type) if slave_ok: self.assertTrue(request.slave_ok, 'SlaveOkay not set') else: self.assertFalse(request.slave_ok, 'SlaveOkay set')
class TestMixedVersionSharded(unittest.TestCase): def setup_server(self, upgrade): self.mongos_old, self.mongos_new = MockupDB(), MockupDB() # Collect queries to either server in one queue. self.q = Queue() for server in self.mongos_old, self.mongos_new: server.subscribe(self.q.put) server.autoresponds('getlasterror') server.run() self.addCleanup(server.stop) # Max wire version is too old for the upgraded operation. self.mongos_old.autoresponds('ismaster', ismaster=True, msg='isdbgrid', maxWireVersion=upgrade.wire_version - 1) # Up-to-date max wire version. self.mongos_new.autoresponds('ismaster', ismaster=True, msg='isdbgrid', maxWireVersion=upgrade.wire_version) self.mongoses_uri = 'mongodb://%s,%s' % (self.mongos_old.address_string, self.mongos_new.address_string) self.client = MongoClient(self.mongoses_uri) def tearDown(self): if hasattr(self, 'client') and self.client: self.client.close()
def test_auto_dequeue(self): server = MockupDB(auto_ismaster=True) server.run() client = MongoClient(server.uri) future = go(client.admin.command, 'ping') server.autoresponds('ping') # Should dequeue the request. future()
def test_autoresponds_case_insensitive(self): server = MockupDB(auto_ismaster=True) # Little M. Note this is only case-insensitive because it's a Command. server.autoresponds(Command('fooBar'), foo='bar') server.run() response = MongoClient(server.uri).admin.command('Foobar') self.assertEqual('bar', response['foo'])
def test_query_and_read_mode_sharded_op_msg(self): """Test OP_MSG sends non-primary $readPreference and never $query.""" server = MockupDB() server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', minWireVersion=2, maxWireVersion=6) server.run() self.addCleanup(server.stop) client = MongoClient(server.uri) self.addCleanup(client.close) read_prefs = ( Primary(), SecondaryPreferred(), PrimaryPreferred(), Secondary(), Nearest(), SecondaryPreferred([{'tag': 'value'}]),) for query in ({'a': 1}, {'$query': {'a': 1}},): for mode in read_prefs: collection = client.db.get_collection('test', read_preference=mode) cursor = collection.find(query.copy()) with going(next, cursor): request = server.receives() # Command is not nested in $query. request.assert_matches(OpMsg( SON([('find', 'test'), ('filter', {'a': 1}), ('$readPreference', mode.document)]))) request.replies({'cursor': {'id': 0, 'firstBatch': [{}]}})
def test_auth_recovering_member(self): # Test that we don't attempt auth against a recovering RS member. server = MockupDB() server.autoresponds( 'ismaster', { 'minWireVersion': 2, 'maxWireVersion': 6, 'ismaster': False, 'secondary': False, 'setName': 'rs' }) server.run() self.addCleanup(server.stop) client = MongoClient(server.uri, replicaSet='rs', serverSelectionTimeoutMS=100, socketTimeoutMS=100) self.addCleanup(client.close) # Should see there's no primary or secondary and raise selection timeout # error. If it raises AutoReconnect we know it actually tried the # server, and that's wrong. with self.assertRaises(ServerSelectionTimeoutError): client.db.authenticate('user', 'password')
def test_aggregate(self): server = MockupDB() server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', minWireVersion=2, maxWireVersion=6) self.addCleanup(server.stop) server.run() client = MongoClient(server.uri) self.addCleanup(client.close) collection = client.test.collection with going(collection.aggregate, []): command = server.receives(aggregate='collection', pipeline=[]) self.assertFalse(command.slave_ok, 'SlaveOkay set') command.ok(result=[{}]) secondary_collection = collection.with_options( read_preference=ReadPreference.SECONDARY) with going(secondary_collection.aggregate, []): command = server.receives(OpMsg({"aggregate": "collection", "pipeline": [], '$readPreference': {'mode': 'secondary'}})) command.ok(result=[{}]) self.assertTrue(command.slave_ok, 'SlaveOkay not set')
def test_autoresponds_case_insensitive(self): server = MockupDB() # Little M. Note this is only case-insensitive because it's a Command. server.autoresponds(Command('ismaster'), foo='bar') server.run() response = MongoClient(server.uri).admin.command('isMaster') # Big M. self.assertEqual('bar', response['foo'])
def test_aggregate(self): server = MockupDB() server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', minWireVersion=2, maxWireVersion=5) self.addCleanup(server.stop) server.run() client = MongoClient(server.uri) self.addCleanup(client.close) collection = client.test.collection with going(collection.aggregate, []): command = server.receives(aggregate='collection', pipeline=[]) self.assertFalse(command.slave_ok, 'SlaveOkay set') self.assertNotIn('$readPreference', command) command.ok(result=[{}]) secondary_collection = collection.with_options( read_preference=ReadPreference.SECONDARY) with going(secondary_collection.aggregate, []): command = server.receives({ '$query': SON([('aggregate', 'collection'), ('pipeline', []), ('cursor', {})]), '$readPreference': { 'mode': 'secondary' } }) command.ok(result=[{}]) self.assertTrue(command.slave_ok, 'SlaveOkay not set')
def cluster_time_conversation(self, callback, replies): cluster_time = Timestamp(0, 0) server = MockupDB() # First test all commands include $clusterTime with wire version 6. responder = server.autoresponds( 'ismaster', { 'minWireVersion': 0, 'maxWireVersion': 6, '$clusterTime': { 'clusterTime': cluster_time } }) server.run() self.addCleanup(server.stop) client = MongoClient(server.uri) self.addCleanup(client.close) with going(callback, client): for reply in replies: request = server.receives() self.assertIn('$clusterTime', request) self.assertEqual(request['$clusterTime']['clusterTime'], cluster_time) cluster_time = Timestamp(cluster_time.time, cluster_time.inc + 1) reply['$clusterTime'] = {'clusterTime': cluster_time} request.reply(reply) # Now test that no commands include $clusterTime with wire version 5, # even though the isMaster reply still has $clusterTime. server.cancel_responder(responder) server.autoresponds( 'ismaster', { 'minWireVersion': 0, 'maxWireVersion': 5, '$clusterTime': { 'clusterTime': cluster_time } }) client = MongoClient(server.uri) self.addCleanup(client.close) with going(callback, client): for reply in replies: request = server.receives() self.assertNotIn('$clusterTime', request) request.reply(reply)
def client(): server = MockupDB(auto_ismaster=True) server.run() server.autoresponds(OpMsg("find", "todos"), cursor={ "id": 0, "firstBatch": items.json_data }) mongo_uri = f"{server.uri}/test" # Create the new app. os.environ["SECRET_KEY"] = "SECRET_KEY" os.environ["LOGIN_DISABLED"] = "1" test_app = app.create_app(mongo_uri) # Use the app to create a test_client that can be used in our tests. with test_app.test_client() as client: yield client
def test_mongos(self): mongos = MockupDB() mongos.autoresponds('ismaster', maxWireVersion=5, ismaster=True, msg='isdbgrid') mongos.run() self.addCleanup(mongos.stop) # No maxStalenessSeconds. uri = 'mongodb://localhost:%d/?readPreference=secondary' % mongos.port client = MongoClient(uri) self.addCleanup(client.close) with going(client.db.coll.find_one) as future: request = mongos.receives() self.assertNotIn('maxStalenessSeconds', request.doc['$readPreference']) self.assertTrue(request.slave_okay) request.ok(cursor={'firstBatch': [], 'id': 0}) # find_one succeeds with no result. self.assertIsNone(future()) # Set maxStalenessSeconds to 1. Client has no minimum with mongos, # we let mongos enforce the 90-second minimum and return an error: # SERVER-27146. uri = 'mongodb://localhost:%d/?readPreference=secondary' \ '&maxStalenessSeconds=1' % mongos.port client = MongoClient(uri) self.addCleanup(client.close) with going(client.db.coll.find_one) as future: request = mongos.receives() self.assertEqual( 1, request.doc['$readPreference']['maxStalenessSeconds']) self.assertTrue(request.slave_okay) request.ok(cursor={'firstBatch': [], 'id': 0}) self.assertIsNone(future())
class TestSlaveOkayRS(unittest.TestCase): def setup_server(self, wire_version): self.primary, self.secondary = MockupDB(), MockupDB() for server in self.primary, self.secondary: server.run() self.addCleanup(server.stop) hosts = [ server.address_string for server in self.primary, self.secondary ] self.primary.autoresponds('ismaster', ismaster=True, setName='rs', hosts=hosts, maxWireVersion=wire_version) self.secondary.autoresponds('ismaster', ismaster=False, secondary=True, setName='rs', hosts=hosts, maxWireVersion=wire_version)
def test_query_and_read_mode_sharded_op_query(self): server = MockupDB() server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', minWireVersion=2, maxWireVersion=5) server.run() self.addCleanup(server.stop) client = MongoClient(server.uri) self.addCleanup(client.close) modes_without_query = ( Primary(), SecondaryPreferred(),) modes_with_query = ( PrimaryPreferred(), Secondary(), Nearest(), SecondaryPreferred([{'tag': 'value'}]),) find_command = SON([('find', 'test'), ('filter', {'a': 1})]) for query in ({'a': 1}, {'$query': {'a': 1}},): for mode in modes_with_query + modes_without_query: collection = client.db.get_collection('test', read_preference=mode) cursor = collection.find(query.copy()) with going(next, cursor): request = server.receives() if mode in modes_without_query: # Filter is hoisted out of $query. request.assert_matches(Command(find_command)) self.assertFalse('$readPreference' in request) else: # Command is nested in $query. request.assert_matches(Command( SON([('$query', find_command), ('$readPreference', mode.document)]))) request.replies({'cursor': {'id': 0, 'firstBatch': [{}]}})
def test_query_and_read_mode_sharded(self): server = MockupDB() server.autoresponds('ismaster', ismaster=True, msg='isdbgrid') server.run() self.addCleanup(server.stop) client = MongoClient(server.uri) self.addCleanup(client.close) modes_without_query = ( Primary(), SecondaryPreferred(),) modes_with_query = ( PrimaryPreferred(), Secondary(), Nearest(), SecondaryPreferred([{'tag': 'value'}]),) for query in ({'a': 1}, {'$query': {'a': 1}},): for mode in modes_with_query + modes_without_query: collection = client.db.get_collection('test', read_preference=mode) cursor = collection.find(query.copy()) with going(next, cursor): request = server.receives(OpQuery) if mode in modes_without_query: # Query is not edited: {'a': 1} is not nested in $query, # {'$query': {'a': 1}} is not hoisted. request.assert_matches(query) self.assertFalse('$readPreference' in request) else: # {'a': 1} is *always* nested in $query. request.assert_matches({ '$query': {'a': 1}, '$readPreference': mode.document }) request.replies({})
class TestResetAndRequestCheck(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestResetAndRequestCheck, self).__init__(*args, **kwargs) self.ismaster_time = 0 self.client = None self.server = None def setup_server(self): self.server = MockupDB() def responder(request): self.ismaster_time = time.time() return request.ok(ismaster=True, minWireVersion=2, maxWireVersion=6) self.server.autoresponds('ismaster', responder) self.server.run() self.addCleanup(self.server.stop) kwargs = {'socketTimeoutMS': 100} # Disable retryable reads when pymongo supports it. kwargs['retryReads'] = False self.client = MongoClient(self.server.uri, **kwargs) wait_until(lambda: self.client.nodes, 'connect to standalone') def tearDown(self): if hasattr(self, 'client') and self.client: self.client.close() def _test_disconnect(self, operation): # Application operation fails. Test that client resets server # description and does *not* schedule immediate check. self.setup_server() # Network error on application operation. with self.assertRaises(ConnectionFailure): with going(operation.function, self.client): self.server.receives().hangup() # Server is Unknown. topology = self.client._topology with self.assertRaises(ConnectionFailure): topology.select_server_by_address(self.server.address, 0) time.sleep(0.5) after = time.time() # Demand a reconnect. with going(self.client.db.command, 'buildinfo'): self.server.receives('buildinfo').ok() last = self.ismaster_time self.assertGreaterEqual(last, after, 'called ismaster before needed') def _test_timeout(self, operation): # Application operation times out. Test that client does *not* reset # server description and does *not* schedule immediate check. self.setup_server() with self.assertRaises(ConnectionFailure): with going(operation.function, self.client): self.server.receives() before = self.ismaster_time time.sleep(0.5) # Server is *not* Unknown. topology = self.client._topology server = topology.select_server_by_address(self.server.address, 0) self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) after = self.ismaster_time self.assertEqual(after, before, 'unneeded ismaster call') def _test_not_master(self, operation): # Application operation gets a "not master" error. self.setup_server() with self.assertRaises(ConnectionFailure): with going(operation.function, self.client): request = self.server.receives() before = self.ismaster_time request.replies(operation.not_master) time.sleep(1) # Server is rediscovered. topology = self.client._topology server = topology.select_server_by_address(self.server.address, 0) self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) after = self.ismaster_time self.assertGreater(after, before, 'ismaster not called')
class ManagerTestCase(unittest.TestCase): maxDiff = None # unittest: show full diff on assertion failure def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) self.server = MockupDB(auto_ismaster={"maxWireVersion": 6}) self.server.run() self.server.autoresponds( Command("find", "switch_collection", namespace="topology_database"), { "cursor": { "id": 0, "firstBatch": [{ **d, "_id": i } for i, d in enumerate(TOPOLOGY_DATABASE_DATA)], } }, ) self._stack = AsyncExitStack() td = self._stack.enter_context(tempfile.TemporaryDirectory()) self.rpc_unix_sock = os.path.join(td, "l.sock") self._stack.enter_context( patch.object(settings, "REMOTE_DATABASE_MONGO_URI", self.server.uri)) self._stack.enter_context( patch.object(settings, "NEGOTIATOR_RPC_UNIX_SOCK_PATH", self.rpc_unix_sock)) self._stack.enter_context( patch("agile_mesh_network.ryu.amn_manager.OVSManager", DummyOVSManager)) self._stack.enter_context( # To avoid automatic connection to a relay. patch.object(settings, "IS_RELAY", True)) self._stack.enter_context( patch.object(events_scheduler, "RyuAppEventLoopScheduler")) self.ryu_ev_loop_scheduler = events_scheduler.RyuAppEventLoopScheduler( ) self._stack.enter_context(self.ryu_ev_loop_scheduler) async def command_cb(session, msg): assert isinstance(msg, RPCCommand) await self._rpc_command_cb(msg) self.rpc_server = self.loop.run_until_complete( self._stack.enter_async_context( RPCUnixServer(self.rpc_unix_sock, command_cb))) async def _rpc_command_cb(self, msg: RPCCommand): self.assertEqual(msg.name, "dump_tunnels_state") await msg.respond({"tunnels": []}) def tearDown(self): self.loop.run_until_complete(self._stack.aclose()) self.loop.run_until_complete(self.loop.shutdown_asyncgens()) self.loop.close() self.server.stop() def test_topology_database_sync(self): async def f(): async with AgileMeshNetworkManager( ryu_ev_loop_scheduler=self.ryu_ev_loop_scheduler ) as manager: manager.start_initialization() topology_database = manager.topology_database local_database = topology_database.local await local_database.is_filled_event.wait() self.assertTrue(local_database.is_filled) self.assertListEqual( topology_database.find_random_relay_switches(), [SwitchEntity.from_dict(SWITCH_ENTITY_RELAY_DATA)], ) with self.assertRaises(KeyError): topology_database.find_switch_by_mac(UNK_MAC) self.assertEqual( topology_database.find_switch_by_mac( SWITCH_ENTITY_BOARD_DATA["mac"]), SwitchEntity.from_dict(SWITCH_ENTITY_BOARD_DATA), ) self.assertListEqual( topology_database.find_switches_by_mac_list([]), []) self.assertListEqual( topology_database.find_switches_by_mac_list([UNK_MAC]), []) self.assertListEqual( topology_database.find_switches_by_mac_list( [UNK_MAC, SWITCH_ENTITY_BOARD_DATA["mac"]]), [SwitchEntity.from_dict(SWITCH_ENTITY_BOARD_DATA)], ) # TODO after resync extra tunnels/flows are destroyed self.loop.run_until_complete(asyncio.wait_for(f(), timeout=3)) def test_rpc(self): async def f(): rpc_responses = iter([ ("dump_tunnels_state", { "tunnels": [TUNNEL_MODEL_BOARD_DATA] }), ( "create_tunnel", { "tunnel": TUNNEL_MODEL_RELAY_DATA, "tunnels": [ TUNNEL_MODEL_BOARD_DATA, TUNNEL_MODEL_RELAY_DATA, ], }, ), ( "create_tunnel", { "tunnel": TUNNEL_MODEL_BOARD_DATA, "tunnels": [TUNNEL_MODEL_BOARD_DATA], }, ), ]) async def _rpc_command_cb(msg: RPCCommand): name, resp = next(rpc_responses) self.assertEqual(msg.name, name) await msg.respond(resp) with ExitStack() as stack: stack.enter_context( patch.object(self, "_rpc_command_cb", _rpc_command_cb)) stack.enter_context(patch.object(settings, "IS_RELAY", False)) async with AgileMeshNetworkManager( ryu_ev_loop_scheduler=self.ryu_ev_loop_scheduler ) as manager: manager.start_initialization() await manager._initialization_task self.assertDictEqual({}, manager._tunnel_creation_tasks) # Don't attempt to connect to unknown macs. manager.ask_for_tunnel(UNK_MAC) self.assertDictEqual({}, manager._tunnel_creation_tasks) # Connect to a switch, ensure that the task is cleaned up. manager.ask_for_tunnel(SECOND_MAC) await next(iter(manager._tunnel_creation_tasks.values())) self.assertDictEqual({}, manager._tunnel_creation_tasks) # Send a broadcast await next(iter(self.rpc_server.sessions)).issue_broadcast( "tunnel_created", { "tunnel": TUNNEL_MODEL_RELAY_DATA, "tunnels": [TUNNEL_MODEL_RELAY_DATA], }, ) await asyncio.sleep(0.001) # TODO unknown tunnels after resync are dropped via RPC expected_event_calls = [ # Initialization list: [TunnelModel.from_dict(TUNNEL_MODEL_BOARD_DATA)], # Initialization relay tunnel: [ TunnelModel.from_dict(TUNNEL_MODEL_BOARD_DATA), TunnelModel.from_dict(TUNNEL_MODEL_RELAY_DATA), ], # ask_for_tunnel: [TunnelModel.from_dict(TUNNEL_MODEL_BOARD_DATA)], # Broadcast: [TunnelModel.from_dict(TUNNEL_MODEL_RELAY_DATA)], ] for (args, kwargs), ev_expected in zip_equal( self.ryu_ev_loop_scheduler.send_event_to_observers. call_args_list, expected_event_calls, ): ev = args[0] self.assertListEqual( sorted(t for t, _ in ev.mac_to_tunswitch.values()), sorted(ev_expected), ) self.loop.run_until_complete(asyncio.wait_for(f(), timeout=3)) def test_flows(self): async def f(): async with AgileMeshNetworkManager( ryu_ev_loop_scheduler=self.ryu_ev_loop_scheduler ) as manager: manager.start_initialization() # TODO missing flows from RPC sync are added # TODO after packet in a tunnel creation request is sent # TODO after tunnel creation a flow is set up pass self.loop.run_until_complete(asyncio.wait_for(f(), timeout=3))