def run_scenario(self): if 'heartbeatFrequencyMS' in scenario_def: frequency = int(scenario_def['heartbeatFrequencyMS']) / 1000.0 else: frequency = HEARTBEAT_FREQUENCY # Initialize topologies. seeds, hosts = get_addresses( scenario_def['topology_description']['servers']) topology = Topology( TopologySettings(seeds=seeds, monitor_class=MockMonitor, pool_class=MockPool, heartbeat_frequency=frequency)) # Update topologies with server descriptions. for server in scenario_def['topology_description']['servers']: server_description = make_server_description(server, hosts) topology.on_change(server_description) # Create server selector. # Make first letter lowercase to match read_pref's modes. pref_def = scenario_def['read_preference'] mode_string = pref_def.get('mode', 'primary') mode_string = mode_string[:1].lower() + mode_string[1:] mode = read_preferences.read_pref_mode_from_name(mode_string) max_staleness = pref_def.get('maxStalenessSeconds', -1) tag_sets = pref_def.get('tag_sets') if scenario_def.get('error'): with self.assertRaises(ConfigurationError): # Error can be raised when making Read Pref or selecting. pref = read_preferences.make_read_preference( mode, tag_sets=tag_sets, max_staleness=max_staleness) topology.select_server(pref) return expected_addrs = set([ server['address'] for server in scenario_def['in_latency_window']]) # Select servers. pref = read_preferences.make_read_preference( mode, tag_sets=tag_sets, max_staleness=max_staleness) if not expected_addrs: with self.assertRaises(ConnectionFailure): topology.select_servers(pref, server_selection_timeout=0) return servers = topology.select_servers(pref, server_selection_timeout=0) actual_addrs = set(['%s:%d' % s.description.address for s in servers]) for unexpected in actual_addrs - expected_addrs: self.fail("'%s' shouldn't have been selected, but was" % unexpected) for unselected in expected_addrs - actual_addrs: self.fail("'%s' should have been selected, but wasn't" % unselected)
def run_scenario(self): if 'heartbeatFrequencyMS' in scenario_def: frequency = int(scenario_def['heartbeatFrequencyMS']) / 1000.0 else: frequency = HEARTBEAT_FREQUENCY # Initialize topologies. seeds, hosts = get_addresses( scenario_def['topology_description']['servers']) topology = Topology( TopologySettings(seeds=seeds, monitor_class=MockMonitor, pool_class=MockPool, heartbeat_frequency=frequency)) # Update topologies with server descriptions. for server in scenario_def['topology_description']['servers']: server_description = make_server_description(server, hosts) topology.on_change(server_description) # Create server selector. # Make first letter lowercase to match read_pref's modes. pref_def = scenario_def['read_preference'] mode_string = pref_def.get('mode', 'primary') mode_string = mode_string[:1].lower() + mode_string[1:] mode = read_preferences.read_pref_mode_from_name(mode_string) max_staleness = pref_def.get('maxStalenessMS', 0) / 1000.0 tag_sets = pref_def.get('tag_sets') if scenario_def.get('error'): with self.assertRaises(ConfigurationError): # Error can be raised when making Read Pref or selecting. pref = read_preferences.make_read_preference( mode, tag_sets=tag_sets, max_staleness=max_staleness) topology.select_server(pref) return expected_addrs = set([ server['address'] for server in scenario_def['in_latency_window']]) # Select servers. pref = read_preferences.make_read_preference( mode, tag_sets=tag_sets, max_staleness=max_staleness) if not expected_addrs: with self.assertRaises(ConnectionFailure): topology.select_servers(pref, server_selection_timeout=0) return servers = topology.select_servers(pref, server_selection_timeout=0) actual_addrs = set(['%s:%d' % s.description.address for s in servers]) for unexpected in actual_addrs - expected_addrs: self.fail("'%s' shouldn't have been selected, but was" % unexpected) for unselected in expected_addrs - actual_addrs: self.fail("'%s' should have been selected, but wasn't" % unselected)
def test(self): ismaster_with_version = ismaster.copy() ismaster_with_version['maxWireVersion'] = operation.wire_version self.server.autoresponds('ismaster', **ismaster_with_version) if operation.op_type == 'always-use-secondary': slave_ok = True elif operation.op_type == 'may-use-secondary': slave_ok = mode != 'primary' or server_type != 'mongos' elif operation.op_type == 'must-use-primary': slave_ok = server_type != 'mongos' else: assert False, 'unrecognized op_type %r' % operation.op_type pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = MongoClient(self.server.uri, read_preference=pref) self.addCleanup(client.close) with going(operation.function, client): request = self.server.receive() request.reply(operation.reply) self.assertEqual(topology_type_name(client), 'Single') if slave_ok: self.assertTrue(request.slave_ok, 'SlaveOkay not set') else: self.assertFalse(request.slave_ok, 'SlaveOkay set')
def test(self): server = MockupDB() self.addCleanup(server.stop) server.run() server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', minWireVersion=2, maxWireVersion=6) pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = MongoClient(server.uri, read_preference=pref) self.addCleanup(client.close) with going(operation.function, client): request = server.receive() request.reply(operation.reply) if operation.op_type == 'always-use-secondary': self.assertEqual(ReadPreference.SECONDARY.document, request.doc.get('$readPreference')) slave_ok = mode != 'primary' elif operation.op_type == 'must-use-primary': slave_ok = False elif operation.op_type == 'may-use-secondary': slave_ok = mode != 'primary' self.assertEqual(pref.document, request.doc.get('$readPreference')) else: self.fail('unrecognized op_type %r' % operation.op_type) if slave_ok: self.assertTrue(request.slave_ok, 'SlaveOkay not set') else: self.assertFalse(request.slave_ok, 'SlaveOkay set')
def test(self): pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = self.setup_client(read_preference=pref) if operation.op_type == 'always-use-secondary': expected_server = self.secondary expected_pref = ReadPreference.SECONDARY elif operation.op_type == 'must-use-primary': expected_server = self.primary expected_pref = ReadPreference.PRIMARY elif operation.op_type == 'may-use-secondary': if mode in ('primary', 'primaryPreferred'): expected_server = self.primary else: expected_server = self.secondary expected_pref = pref else: self.fail('unrecognized op_type %r' % operation.op_type) # For single mongod we send primaryPreferred instead of primary. if expected_pref == ReadPreference.PRIMARY and self.single_mongod: expected_pref = ReadPreference.PRIMARY_PREFERRED with going(operation.function, client) as future: request = expected_server.receive() request.reply(operation.reply) future() # No error. self.assertEqual(expected_pref.document, request.doc.get('$readPreference')) self.assertNotIn('$query', request.doc)
def _parse_read_preference(options): """Parse read preference options.""" if 'read_preference' in options: return options['read_preference'] mode = options.get('readpreference', 0) tags = options.get('readpreferencetags') return make_read_preference(mode, tags)
def _parse_read_preference(options): """Parse read preference options.""" if "read_preference" in options: return options["read_preference"] mode = options.get("readpreference", 0) tags = options.get("readpreferencetags") return make_read_preference(mode, tags)
def parse_read_preference(pref): # Make first letter lowercase to match read_pref's modes. mode_string = pref.get('mode', 'primary') mode_string = mode_string[:1].lower() + mode_string[1:] mode = read_preferences.read_pref_mode_from_name(mode_string) max_staleness = pref.get('maxStalenessSeconds', -1) tag_sets = pref.get('tag_sets') return read_preferences.make_read_preference( mode, tag_sets=tag_sets, max_staleness=max_staleness)
def _parse_read_preference(options): """Parse read preference options.""" if 'read_preference' in options: return options['read_preference'] mode = options.get('readpreference', 0) tags = options.get('readpreferencetags') max_staleness = options.get('maxstalenessseconds', -1) return make_read_preference(mode, tags, max_staleness)
def _parse_read_preference(options): """Parse read preference options.""" if 'read_preference' in options: return options['read_preference'] name = options.get('readpreference', 'primary') mode = read_pref_mode_from_name(name) tags = options.get('readpreferencetags') max_staleness = options.get('maxstalenessseconds', -1) return make_read_preference(mode, tags, max_staleness)
def _parse_read_preference(options): """Parse read preference options.""" if 'read_preference' in options: return options['read_preference'] mode = options.get('readpreference', 0) tags = options.get('readpreferencetags') # common.validate() has converted from ms to seconds. max_staleness = options.get('maxstalenessms') return make_read_preference(mode, tags, max_staleness)
def _parse_read_preference(options): """Parse read preference options.""" if 'read_preference' in options: return options['read_preference'] mode = options.get('readpreference', 0) tags = options.get('readpreferencetags') # common.validate() has converted from ms to seconds. max_staleness = options.get('maxstalenessms', 0) return make_read_preference(mode, tags, max_staleness)
def _parse_read_preference(options): """Parse read preference options.""" if "read_preference" in options: return options["read_preference"] name = options.get("readpreference", "primary") mode = read_pref_mode_from_name(name) tags = options.get("readpreferencetags") max_staleness = options.get("maxstalenessseconds", -1) return make_read_preference(mode, tags, max_staleness)
def test(self): self.setup_server(operation.wire_version) if operation.op_type == 'always-use-secondary': slave_ok = True elif operation.op_type == 'may-use-secondary': slave_ok = mode != 'primary' elif operation.op_type == 'must-use-primary': slave_ok = False else: assert False, 'unrecognized op_type %r' % operation.op_type pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = MongoClient(self.mongoses_uri, read_preference=pref) self.addCleanup(client.close) with going(operation.function, client): request = self.q.get(timeout=1) request.reply(operation.reply) if slave_ok: self.assertTrue(request.slave_ok, 'SlaveOkay not set') else: self.assertFalse(request.slave_ok, 'SlaveOkay set')
def run_scenario(self): # Initialize topologies. seeds, hosts = get_addresses( scenario_def['topology_description']['servers']) # "Eligible servers" is defined in the server selection spec as # the set of servers matching both the ReadPreference's mode # and tag sets. top_latency = Topology( TopologySettings(seeds=seeds, monitor_class=MockMonitor, pool_class=MockPool)) # "In latency window" is defined in the server selection # spec as the subset of suitable_servers that falls within the # allowable latency window. top_suitable = Topology( TopologySettings(seeds=seeds, local_threshold_ms=1000000, monitor_class=MockMonitor, pool_class=MockPool)) # Update topologies with server descriptions. for server in scenario_def['topology_description']['servers']: server_description = make_server_description(server, hosts) top_suitable.on_change(server_description) top_latency.on_change(server_description) # Create server selector. if scenario_def["operation"] == "write": instance = writable_server_selector else: # Make first letter lowercase to match read_pref's modes. mode_string = scenario_def['read_preference']['mode'] if mode_string: mode_string = mode_string[:1].lower() + mode_string[1:] mode = read_preferences.read_pref_mode_from_name(mode_string) tag_sets = None if scenario_def['read_preference']['tag_sets'][0]: tag_sets = scenario_def['read_preference']['tag_sets'] instance = read_preferences.make_read_preference(mode, tag_sets) # Select servers. if not scenario_def['suitable_servers']: self.assertRaises(AutoReconnect, top_suitable.select_server, instance, server_selection_timeout=0) return if not scenario_def['in_latency_window']: self.assertRaises(AutoReconnect, top_latency.select_server, instance, server_selection_timeout=0) return actual_suitable_s = top_suitable.select_servers(instance, server_selection_timeout=0) actual_latency_s = top_latency.select_servers(instance, server_selection_timeout=0) expected_suitable_servers = {} for server in scenario_def['suitable_servers']: server_description = make_server_description(server, hosts) expected_suitable_servers[server['address']] = server_description actual_suitable_servers = {} for s in actual_suitable_s: actual_suitable_servers["%s:%d" % (s.description.address[0], s.description.address[1])] = s.description self.assertEqual(len(actual_suitable_servers), len(expected_suitable_servers)) for k, actual in actual_suitable_servers.items(): expected = expected_suitable_servers[k] self.assertEqual(expected.address, actual.address) self.assertEqual(expected.server_type, actual.server_type) self.assertEqual(expected.round_trip_time, actual.round_trip_time) self.assertEqual(expected.tags, actual.tags) self.assertEqual(expected.all_hosts, actual.all_hosts) expected_latency_servers = {} for server in scenario_def['in_latency_window']: server_description = make_server_description(server, hosts) expected_latency_servers[server['address']] = server_description actual_latency_servers = {} for s in actual_latency_s: actual_latency_servers["%s:%d" % (s.description.address[0], s.description.address[1])] = s.description self.assertEqual(len(actual_latency_servers), len(expected_latency_servers)) for k, actual in actual_latency_servers.items(): expected = expected_latency_servers[k] self.assertEqual(expected.address, actual.address) self.assertEqual(expected.server_type, actual.server_type) self.assertEqual(expected.round_trip_time, actual.round_trip_time) self.assertEqual(expected.tags, actual.tags) self.assertEqual(expected.all_hosts, actual.all_hosts)
def run_scenario(self): # Initialize topologies. if 'heartbeatFrequencyMS' in scenario_def: frequency = int(scenario_def['heartbeatFrequencyMS']) / 1000.0 else: frequency = HEARTBEAT_FREQUENCY settings = dict( monitor_class=MockMonitor, heartbeat_frequency=frequency, pool_class=MockPool) settings['seeds'], hosts = get_addresses( scenario_def['topology_description']['servers']) # "Eligible servers" is defined in the server selection spec as # the set of servers matching both the ReadPreference's mode # and tag sets. top_latency = Topology(TopologySettings(**settings)) # "In latency window" is defined in the server selection # spec as the subset of suitable_servers that falls within the # allowable latency window. settings['local_threshold_ms'] = 1000000 top_suitable = Topology(TopologySettings(**settings)) # Update topologies with server descriptions. for server in scenario_def['topology_description']['servers']: server_description = make_server_description(server, hosts) top_suitable.on_change(server_description) top_latency.on_change(server_description) # Create server selector. if scenario_def.get("operation") == "write": pref = writable_server_selector else: # Make first letter lowercase to match read_pref's modes. pref_def = scenario_def['read_preference'] mode_string = pref_def.get('mode', 'primary') mode_string = mode_string[:1].lower() + mode_string[1:] mode = read_preferences.read_pref_mode_from_name(mode_string) max_staleness = pref_def.get('maxStalenessSeconds', -1) tag_sets = pref_def.get('tag_sets') if scenario_def.get('error'): with self.assertRaises((ConfigurationError, ValueError)): # Error can be raised when making Read Pref or selecting. pref = read_preferences.make_read_preference( mode, tag_sets=tag_sets, max_staleness=max_staleness) top_latency.select_server(pref) return pref = read_preferences.make_read_preference( mode, tag_sets=tag_sets, max_staleness=max_staleness) # Select servers. if not scenario_def.get('suitable_servers'): with self.assertRaises(AutoReconnect): top_suitable.select_server(pref, server_selection_timeout=0) return if not scenario_def['in_latency_window']: with self.assertRaises(AutoReconnect): top_latency.select_server(pref, server_selection_timeout=0) return actual_suitable_s = top_suitable.select_servers( pref, server_selection_timeout=0) actual_latency_s = top_latency.select_servers( pref, server_selection_timeout=0) expected_suitable_servers = {} for server in scenario_def['suitable_servers']: server_description = make_server_description(server, hosts) expected_suitable_servers[server['address']] = server_description actual_suitable_servers = {} for s in actual_suitable_s: actual_suitable_servers["%s:%d" % (s.description.address[0], s.description.address[1])] = s.description self.assertEqual(len(actual_suitable_servers), len(expected_suitable_servers)) for k, actual in actual_suitable_servers.items(): expected = expected_suitable_servers[k] self.assertEqual(expected.address, actual.address) self.assertEqual(expected.server_type, actual.server_type) self.assertEqual(expected.round_trip_time, actual.round_trip_time) self.assertEqual(expected.tags, actual.tags) self.assertEqual(expected.all_hosts, actual.all_hosts) expected_latency_servers = {} for server in scenario_def['in_latency_window']: server_description = make_server_description(server, hosts) expected_latency_servers[server['address']] = server_description actual_latency_servers = {} for s in actual_latency_s: actual_latency_servers["%s:%d" % (s.description.address[0], s.description.address[1])] = s.description self.assertEqual(len(actual_latency_servers), len(expected_latency_servers)) for k, actual in actual_latency_servers.items(): expected = expected_latency_servers[k] self.assertEqual(expected.address, actual.address) self.assertEqual(expected.server_type, actual.server_type) self.assertEqual(expected.round_trip_time, actual.round_trip_time) self.assertEqual(expected.tags, actual.tags) self.assertEqual(expected.all_hosts, actual.all_hosts)
def run_scenario(self): dbname = scenario_def['database_name'] collname = scenario_def['collection_name'] coll = self.client[dbname][collname] coll.drop() coll.insert_many(scenario_def['data']) self.listener.results.clear() name = camel_to_snake(test['operation']['name']) if 'read_preference' in test['operation']: mode = read_pref_mode_from_name( test['operation']['read_preference']['mode']) coll = coll.with_options( read_preference=make_read_preference(mode, None)) test_args = test['operation']['arguments'] if 'writeConcern' in test_args: concern = test_args.pop('writeConcern') coll = coll.with_options(write_concern=WriteConcern(**concern)) args = {} for arg in test_args: args[camel_to_snake(arg)] = test_args[arg] if name == 'bulk_write': bulk_args = [] for request in args['requests']: opname = next(iter(request)) klass = opname[0:1].upper() + opname[1:] arg = getattr(pymongo, klass)(**request[opname]) bulk_args.append(arg) try: coll.bulk_write(bulk_args, args.get('ordered', True)) except OperationFailure: pass elif name == 'find': if 'sort' in args: args['sort'] = list(args['sort'].items()) for arg in 'skip', 'limit': if arg in args: args[arg] = int(args[arg]) try: # Iterate the cursor. tuple(coll.find(**args)) except OperationFailure: pass # Wait for the killCursors thread to run if necessary. if 'limit' in args and client_context.version[:2] < (3, 1): self.client._kill_cursors_executor.wake() started = self.listener.results['started'] succeeded = self.listener.results['succeeded'] wait_until(lambda: started[-1].command_name == 'killCursors', "publish a start event for killCursors.") wait_until(lambda: succeeded[-1].command_name == 'killCursors', "publish a succeeded event for killCursors.") else: try: getattr(coll, name)(**args) except OperationFailure: pass res = self.listener.results for expectation in test['expectations']: event_type = next(iter(expectation)) if event_type == "command_started_event": event = res['started'][0] if len(res['started']) else None if event is not None: # The tests substitute 42 for any number other than 0. if (event.command_name == 'getMore' and event.command['getMore']): event.command['getMore'] = 42 elif event.command_name == 'killCursors': event.command['cursors'] = [42] elif event_type == "command_succeeded_event": event = (res['succeeded'].pop(0) if len(res['succeeded']) else None) if event is not None: reply = event.reply # The tests substitute 42 for any number other than 0, # and "" for any error message. if 'writeErrors' in reply: for doc in reply['writeErrors']: doc['code'] = 42 doc['errmsg'] = "" elif 'cursor' in reply: if reply['cursor']['id']: reply['cursor']['id'] = 42 elif event.command_name == 'killCursors': # Make the tests continue to pass when the killCursors # command is actually in use. if 'cursorsKilled' in reply: reply.pop('cursorsKilled') reply['cursorsUnknown'] = [42] # Found succeeded event. Pop related started event. res['started'].pop(0) elif event_type == "command_failed_event": event = res['failed'].pop(0) if len(res['failed']) else None if event is not None: # Found failed event. Pop related started event. res['started'].pop(0) else: self.fail("Unknown event type") if event is None: event_name = event_type.split('_')[1] self.fail("Expected %s event for %s command. Actual " "results:%s" % (event_name, expectation[event_type]['command_name'], format_actual_results(res))) for attr, expected in expectation[event_type].items(): actual = getattr(event, attr) if isinstance(expected, dict): for key, val in expected.items(): self.assertEqual(val, actual[key]) else: self.assertEqual(actual, expected)
def run_scenario(self): self.assertTrue(scenario_def['tests'], "tests cannot be empty") dbname = scenario_def['database_name'] collname = scenario_def['collection_name'] # Clear the kill cursors queue. self.client._kill_cursors_executor.wake() for test in scenario_def['tests']: coll = self.client[dbname][collname] coll.drop() coll.insert_many(scenario_def['data']) self.listener.results.clear() name = camel_to_snake(test['operation']['name']) args = test['operation']['arguments'] # Don't send $readPreference to mongos before 2.4. if (client_context.version.at_least(2, 4, 0) and 'readPreference' in args): pref = make_read_preference( args['readPreference']['mode'], None) coll = coll.with_options(read_preference=pref) if 'writeConcern' in args: coll = coll.with_options( write_concern=WriteConcern(**args['writeConcern'])) for arg in args: args[camel_to_snake(arg)] = args.pop(arg) if name == 'bulk_write': bulk_args = [] for request in args['requests']: opname = next(iter(request)) klass = opname[0:1].upper() + opname[1:] arg = getattr(pymongo, klass)(**request[opname]) bulk_args.append(arg) try: coll.bulk_write(bulk_args, args.get('ordered', True)) except OperationFailure: pass elif name == 'find': if 'sort' in args: args['sort'] = list(args['sort'].items()) try: # Iterate the cursor. tuple(coll.find(**args)) except OperationFailure: pass # Wait for the killCursors thread to run. if 'limit' in args: started = self.listener.results['started'] wait_until( lambda: started[-1].command_name == 'killCursors', "publish a start event for killCursors.") else: try: getattr(coll, name)(**args) except OperationFailure: pass for expectation in test['expectations']: event_type = next(iter(expectation)) if event_type == "command_started_event": event = self.listener.results['started'].pop(0) # The tests substitute 42 for any number other than 0. if event.command_name == 'getMore': event.command['getMore'] = 42 elif event.command_name == 'killCursors': event.command['cursors'] = [42] elif event_type == "command_succeeded_event": event = self.listener.results['succeeded'].pop(0) reply = event.reply # The tests substitute 42 for any number other than 0, # and "" for any error message. if 'writeErrors' in reply: for doc in reply['writeErrors']: doc['code'] = 42 doc['errmsg'] = "" elif 'cursor' in reply: if reply['cursor']['id']: reply['cursor']['id'] = 42 elif event.command_name == 'killCursors': # Make the tests continue to pass when the killCursors # command is actually in use. if 'cursorsKilled' in reply: reply.pop('cursorsKilled') reply['cursorsUnknown'] = [42] elif event_type == "command_failed_event": event = self.listener.results['failed'].pop(0) else: self.fail("Unknown event type") for attr, expected in expectation[event_type].items(): actual = getattr(event, attr) if isinstance(expected, dict): for key, val in expected.items(): self.assertEqual(val, actual[key]) else: self.assertEqual(actual, expected)
def run_scenario(self): dbname = scenario_def['database_name'] collname = scenario_def['collection_name'] coll = self.client[dbname][collname] coll.drop() coll.insert_many(scenario_def['data']) self.listener.results.clear() name = camel_to_snake(test['operation']['name']) # Don't send $readPreference to mongos before 2.4. if (client_context.version.at_least(2, 4, 0) and 'read_preference' in test['operation']): mode = read_pref_mode_from_name( test['operation']['read_preference']['mode']) coll = coll.with_options( read_preference=make_read_preference(mode, None)) test_args = test['operation']['arguments'] if 'writeConcern' in test_args: concern = test_args.pop('writeConcern') coll = coll.with_options( write_concern=WriteConcern(**concern)) args = {} for arg in test_args: args[camel_to_snake(arg)] = test_args[arg] if name == 'bulk_write': bulk_args = [] for request in args['requests']: opname = next(iter(request)) klass = opname[0:1].upper() + opname[1:] arg = getattr(pymongo, klass)(**request[opname]) bulk_args.append(arg) try: coll.bulk_write(bulk_args, args.get('ordered', True)) except OperationFailure: pass elif name == 'find': if 'sort' in args: args['sort'] = list(args['sort'].items()) try: # Iterate the cursor. tuple(coll.find(**args)) except OperationFailure: pass # Wait for the killCursors thread to run if necessary. if 'limit' in args and client_context.version[:2] < (3, 1): self.client._kill_cursors_executor.wake() started = self.listener.results['started'] succeeded = self.listener.results['succeeded'] wait_until( lambda: started[-1].command_name == 'killCursors', "publish a start event for killCursors.") wait_until( lambda: succeeded[-1].command_name == 'killCursors', "publish a succeeded event for killCursors.") else: try: getattr(coll, name)(**args) except OperationFailure: pass res = self.listener.results for expectation in test['expectations']: event_type = next(iter(expectation)) if event_type == "command_started_event": event = res['started'][0] if len(res['started']) else None if event is not None: # The tests substitute 42 for any number other than 0. if (event.command_name == 'getMore' and event.command['getMore']): event.command['getMore'] = 42 elif event.command_name == 'killCursors': event.command['cursors'] = [42] elif event_type == "command_succeeded_event": event = ( res['succeeded'].pop(0) if len(res['succeeded']) else None) if event is not None: reply = event.reply # The tests substitute 42 for any number other than 0, # and "" for any error message. if 'writeErrors' in reply: for doc in reply['writeErrors']: doc['code'] = 42 doc['errmsg'] = "" elif 'cursor' in reply: if reply['cursor']['id']: reply['cursor']['id'] = 42 elif event.command_name == 'killCursors': # Make the tests continue to pass when the killCursors # command is actually in use. if 'cursorsKilled' in reply: reply.pop('cursorsKilled') reply['cursorsUnknown'] = [42] # Found succeeded event. Pop related started event. res['started'].pop(0) elif event_type == "command_failed_event": event = res['failed'].pop(0) if len(res['failed']) else None if event is not None: # Found failed event. Pop related started event. res['started'].pop(0) else: self.fail("Unknown event type") if event is None: event_name = event_type.split('_')[1] self.fail( "Expected %s event for %s command. Actual " "results:%s" % ( event_name, expectation[event_type]['command_name'], format_actual_results(res))) for attr, expected in expectation[event_type].items(): actual = getattr(event, attr) if isinstance(expected, dict): for key, val in expected.items(): self.assertEqual(val, actual[key]) else: self.assertEqual(actual, expected)
def run_scenario(self): self.assertTrue(scenario_def['tests'], "tests cannot be empty") dbname = scenario_def['database_name'] collname = scenario_def['collection_name'] # Clear the kill cursors queue. self.client._kill_cursors_executor.wake() for test in scenario_def['tests']: coll = self.client[dbname][collname] coll.drop() coll.insert_many(scenario_def['data']) self.listener.results.clear() name = camel_to_snake(test['operation']['name']) args = test['operation']['arguments'] # Don't send $readPreference to mongos before 2.4. if (client_context.version.at_least(2, 4, 0) and 'readPreference' in args): pref = make_read_preference(args['readPreference']['mode'], None) coll = coll.with_options(read_preference=pref) if 'writeConcern' in args: coll = coll.with_options(write_concern=WriteConcern( **args['writeConcern'])) for arg in args: args[camel_to_snake(arg)] = args.pop(arg) if name == 'bulk_write': bulk_args = [] for request in args['requests']: opname = next(iter(request)) klass = opname[0:1].upper() + opname[1:] arg = getattr(pymongo, klass)(**request[opname]) bulk_args.append(arg) try: coll.bulk_write(bulk_args, args.get('ordered', True)) except OperationFailure: pass elif name == 'find': if 'limit' in args: # XXX: Skip killCursors test when using the find command. if client_context.version.at_least(3, 1, 1): continue self.listener.remove_command_filter('killCursors') if 'sort' in args: args['sort'] = list(args['sort'].items()) try: # Iterate the cursor. tuple(coll.find(**args)) except OperationFailure: pass # Wait for the killCursors thread to run. if 'limit' in args: started = self.listener.results['started'] wait_until( lambda: started[-1].command_name == 'killCursors', "publish a start event for killCursors.") self.listener.add_command_filter('killCursors') else: try: getattr(coll, name)(**args) except OperationFailure: pass for expectation in test['expectations']: event_type = next(iter(expectation)) if event_type == "command_started_event": event = self.listener.results['started'].pop(0) # The tests substitute 42 for any number other than 0. if event.command_name == 'getMore': event.command['getMore'] = 42 elif event.command_name == 'killCursors': event.command['cursors'] = [42] elif event_type == "command_succeeded_event": event = self.listener.results['succeeded'].pop(0) reply = event.reply # The tests substitute 42 for any number other than 0, # and "" for any error message. if 'writeErrors' in reply: for doc in reply['writeErrors']: doc['code'] = 42 doc['errmsg'] = "" elif 'cursor' in reply: if reply['cursor']['id']: reply['cursor']['id'] = 42 elif event.command_name == 'killCursors': # Make the tests continue to pass when the killCursors # command is actually in use. if 'cursorsKilled' in reply: reply.pop('cursorsKilled') reply['cursorsUnknown'] = [42] elif event_type == "command_failed_event": event = self.listener.results['failed'].pop(0) else: self.fail("Unknown event type") for attr, expected in expectation[event_type].items(): actual = getattr(event, attr) if isinstance(expected, dict): for key, val in expected.items(): self.assertEqual(val, actual[key]) else: self.assertEqual(actual, expected)
def run_scenario(self): # Initialize topologies. seeds, hosts = get_addresses( scenario_def['topology_description']['servers']) # "Eligible servers" is defined in the server selection spec as # the set of servers matching both the ReadPreference's mode # and tag sets. top_latency = Topology( TopologySettings(seeds=seeds, monitor_class=MockMonitor, pool_class=MockPool)) # "In latency window" is defined in the server selection # spec as the subset of suitable_servers that falls within the # allowable latency window. top_suitable = Topology( TopologySettings(seeds=seeds, local_threshold_ms=1000000, monitor_class=MockMonitor, pool_class=MockPool)) # Update topologies with server descriptions. for server in scenario_def['topology_description']['servers']: server_description = make_server_description(server, hosts) top_suitable.on_change(server_description) top_latency.on_change(server_description) # Create server selector. if scenario_def["operation"] == "write": instance = writable_server_selector else: # Make first letter lowercase to match read_pref's modes. mode_string = scenario_def['read_preference']['mode'] if mode_string: mode_string = mode_string[:1].lower() + mode_string[1:] mode = read_preferences.read_pref_mode_from_name(mode_string) tag_sets = None if scenario_def['read_preference']['tag_sets'][0]: tag_sets = scenario_def['read_preference']['tag_sets'] instance = read_preferences.make_read_preference(mode, tag_sets) # Select servers. if not scenario_def['suitable_servers']: self.assertRaises(AutoReconnect, top_suitable.select_server, instance, server_selection_timeout=0) return if not scenario_def['in_latency_window']: self.assertRaises(AutoReconnect, top_latency.select_server, instance, server_selection_timeout=0) return actual_suitable_s = top_suitable.select_servers( instance, server_selection_timeout=0) actual_latency_s = top_latency.select_servers( instance, server_selection_timeout=0) expected_suitable_servers = {} for server in scenario_def['suitable_servers']: server_description = make_server_description(server, hosts) expected_suitable_servers[server['address']] = server_description actual_suitable_servers = {} for s in actual_suitable_s: actual_suitable_servers["%s:%d" % (s.description.address[0], s.description.address[1])] = s.description self.assertEqual(len(actual_suitable_servers), len(expected_suitable_servers)) for k, actual in actual_suitable_servers.items(): expected = expected_suitable_servers[k] self.assertEqual(expected.address, actual.address) self.assertEqual(expected.server_type, actual.server_type) self.assertEqual(expected.round_trip_time, actual.round_trip_time) self.assertEqual(expected.tags, actual.tags) self.assertEqual(expected.all_hosts, actual.all_hosts) expected_latency_servers = {} for server in scenario_def['in_latency_window']: server_description = make_server_description(server, hosts) expected_latency_servers[server['address']] = server_description actual_latency_servers = {} for s in actual_latency_s: actual_latency_servers["%s:%d" % (s.description.address[0], s.description.address[1])] = s.description self.assertEqual(len(actual_latency_servers), len(expected_latency_servers)) for k, actual in actual_latency_servers.items(): expected = expected_latency_servers[k] self.assertEqual(expected.address, actual.address) self.assertEqual(expected.server_type, actual.server_type) self.assertEqual(expected.round_trip_time, actual.round_trip_time) self.assertEqual(expected.tags, actual.tags) self.assertEqual(expected.all_hosts, actual.all_hosts)
def run_scenario(self): # Initialize topologies. if 'heartbeatFrequencyMS' in scenario_def: frequency = int(scenario_def['heartbeatFrequencyMS']) / 1000.0 else: frequency = HEARTBEAT_FREQUENCY settings = dict(monitor_class=MockMonitor, heartbeat_frequency=frequency, pool_class=MockPool) settings['seeds'], hosts = get_addresses( scenario_def['topology_description']['servers']) # "Eligible servers" is defined in the server selection spec as # the set of servers matching both the ReadPreference's mode # and tag sets. top_latency = Topology(TopologySettings(**settings)) top_latency.open() # "In latency window" is defined in the server selection # spec as the subset of suitable_servers that falls within the # allowable latency window. settings['local_threshold_ms'] = 1000000 top_suitable = Topology(TopologySettings(**settings)) top_suitable.open() # Update topologies with server descriptions. for server in scenario_def['topology_description']['servers']: server_description = make_server_description(server, hosts) top_suitable.on_change(server_description) top_latency.on_change(server_description) # Create server selector. if scenario_def.get("operation") == "write": pref = writable_server_selector else: # Make first letter lowercase to match read_pref's modes. pref_def = scenario_def['read_preference'] mode_string = pref_def.get('mode', 'primary') mode_string = mode_string[:1].lower() + mode_string[1:] mode = read_preferences.read_pref_mode_from_name(mode_string) max_staleness = pref_def.get('maxStalenessSeconds', -1) tag_sets = pref_def.get('tag_sets') if scenario_def.get('error'): with self.assertRaises((ConfigurationError, ValueError)): # Error can be raised when making Read Pref or selecting. pref = read_preferences.make_read_preference( mode, tag_sets=tag_sets, max_staleness=max_staleness) top_latency.select_server(pref) return pref = read_preferences.make_read_preference( mode, tag_sets=tag_sets, max_staleness=max_staleness) # Select servers. if not scenario_def.get('suitable_servers'): with self.assertRaises(AutoReconnect): top_suitable.select_server(pref, server_selection_timeout=0) return if not scenario_def['in_latency_window']: with self.assertRaises(AutoReconnect): top_latency.select_server(pref, server_selection_timeout=0) return actual_suitable_s = top_suitable.select_servers( pref, server_selection_timeout=0) actual_latency_s = top_latency.select_servers( pref, server_selection_timeout=0) expected_suitable_servers = {} for server in scenario_def['suitable_servers']: server_description = make_server_description(server, hosts) expected_suitable_servers[server['address']] = server_description actual_suitable_servers = {} for s in actual_suitable_s: actual_suitable_servers["%s:%d" % (s.description.address[0], s.description.address[1])] = s.description self.assertEqual(len(actual_suitable_servers), len(expected_suitable_servers)) for k, actual in actual_suitable_servers.items(): expected = expected_suitable_servers[k] self.assertEqual(expected.address, actual.address) self.assertEqual(expected.server_type, actual.server_type) self.assertEqual(expected.round_trip_time, actual.round_trip_time) self.assertEqual(expected.tags, actual.tags) self.assertEqual(expected.all_hosts, actual.all_hosts) expected_latency_servers = {} for server in scenario_def['in_latency_window']: server_description = make_server_description(server, hosts) expected_latency_servers[server['address']] = server_description actual_latency_servers = {} for s in actual_latency_s: actual_latency_servers["%s:%d" % (s.description.address[0], s.description.address[1])] = s.description self.assertEqual(len(actual_latency_servers), len(expected_latency_servers)) for k, actual in actual_latency_servers.items(): expected = expected_latency_servers[k] self.assertEqual(expected.address, actual.address) self.assertEqual(expected.server_type, actual.server_type) self.assertEqual(expected.round_trip_time, actual.round_trip_time) self.assertEqual(expected.tags, actual.tags) self.assertEqual(expected.all_hosts, actual.all_hosts)
def run_scenario(self): self.assertTrue(scenario_def['tests'], "tests cannot be empty") dbname = scenario_def['database_name'] collname = scenario_def['collection_name'] for test in scenario_def['tests']: ver = client_context.version[:2] if "ignore_if_server_version_greater_than" in test: version = test["ignore_if_server_version_greater_than"] if ver > tuple(map(int, version.split("."))): continue if "ignore_if_server_version_less_than" in test: version = test["ignore_if_server_version_less_than"] if ver < tuple(map(int, version.split("."))): continue if "ignore_if_topology_type" in test: types = set(test["ignore_if_topology_type"]) if "sharded" in types and client_context.is_mongos: continue coll = self.client[dbname][collname] coll.drop() coll.insert_many(scenario_def['data']) self.listener.results.clear() name = camel_to_snake(test['operation']['name']) # Don't send $readPreference to mongos before 2.4. if (client_context.version.at_least(2, 4, 0) and 'read_preference' in test['operation']): mode = read_pref_mode_from_name( test['operation']['read_preference']['mode']) coll = coll.with_options( read_preference=make_read_preference(mode, None)) test_args = test['operation']['arguments'] if 'writeConcern' in test_args: concern = test_args.pop('writeConcern') coll = coll.with_options(write_concern=WriteConcern(**concern)) args = {} for arg in test_args: args[camel_to_snake(arg)] = test_args[arg] if name == 'bulk_write': bulk_args = [] for request in args['requests']: opname = next(iter(request)) klass = opname[0:1].upper() + opname[1:] arg = getattr(pymongo, klass)(**request[opname]) bulk_args.append(arg) try: coll.bulk_write(bulk_args, args.get('ordered', True)) except OperationFailure: pass elif name == 'find': if 'sort' in args: args['sort'] = list(args['sort'].items()) try: # Iterate the cursor. tuple(coll.find(**args)) except OperationFailure: pass # Wait for the killCursors thread to run if necessary. if 'limit' in args and client_context.version[:2] < (3, 1): self.client._kill_cursors_executor.wake() started = self.listener.results['started'] wait_until( lambda: started[-1].command_name == 'killCursors', "publish a start event for killCursors.") else: try: getattr(coll, name)(**args) except OperationFailure: pass for expectation in test['expectations']: event_type = next(iter(expectation)) if event_type == "command_started_event": event = self.listener.results['started'].pop(0) # The tests substitute 42 for any number other than 0. if (event.command_name == 'getMore' and event.command['getMore']): event.command['getMore'] = 42 elif event.command_name == 'killCursors': event.command['cursors'] = [42] elif event_type == "command_succeeded_event": event = self.listener.results['succeeded'].pop(0) reply = event.reply # The tests substitute 42 for any number other than 0, # and "" for any error message. if 'writeErrors' in reply: for doc in reply['writeErrors']: doc['code'] = 42 doc['errmsg'] = "" elif 'cursor' in reply: if reply['cursor']['id']: reply['cursor']['id'] = 42 elif event.command_name == 'killCursors': # Make the tests continue to pass when the killCursors # command is actually in use. if 'cursorsKilled' in reply: reply.pop('cursorsKilled') reply['cursorsUnknown'] = [42] elif event_type == "command_failed_event": event = self.listener.results['failed'].pop(0) else: self.fail("Unknown event type") for attr, expected in expectation[event_type].items(): actual = getattr(event, attr) if isinstance(expected, dict): for key, val in expected.items(): self.assertEqual(val, actual[key]) else: self.assertEqual(actual, expected)
def run_operation(self, sessions, collection, operation): session = None name = camel_to_snake(operation['name']) self.transaction_test_debug(name) session_name = operation['arguments'].pop('session', None) if session_name: session = sessions[session_name] # Combine arguments with options and handle special cases. arguments = operation['arguments'] arguments.update(arguments.pop("options", {})) pref = write_c = read_c = None if 'readPreference' in arguments: pref = make_read_preference(read_pref_mode_from_name( arguments.pop('readPreference')['mode']), tag_sets=None) if 'writeConcern' in arguments: write_c = WriteConcern(**dict(arguments.pop('writeConcern'))) if 'readConcern' in arguments: read_c = ReadConcern(**dict(arguments.pop('readConcern'))) if name == 'start_transaction': cmd = partial(session.start_transaction, write_concern=write_c, read_concern=read_c) elif name in ('commit_transaction', 'abort_transaction'): cmd = getattr(session, name) else: collection = collection.with_options(write_concern=write_c, read_concern=read_c, read_preference=pref) cmd = getattr(collection, name) arguments['session'] = session for arg_name in list(arguments): c2s = camel_to_snake(arg_name) # PyMongo accepts sort as list of tuples. Asserting len=1 # because ordering dicts from JSON in 2.6 is unwieldy. if arg_name == "sort": sort_dict = arguments[arg_name] assert len(sort_dict) == 1, 'test can only have 1 sort key' arguments[arg_name] = list(iteritems(sort_dict)) # Named "key" instead not fieldName. if arg_name == "fieldName": arguments["key"] = arguments.pop(arg_name) # Aggregate uses "batchSize", while find uses batch_size. elif arg_name == "batchSize" and name == "aggregate": continue # Requires boolean returnDocument. elif arg_name == "returnDocument": arguments[c2s] = arguments[arg_name] == "After" elif c2s == "requests": # Parse each request into a bulk write model. requests = [] for request in arguments["requests"]: bulk_model = camel_to_upper_camel(request["name"]) bulk_class = getattr(operations, bulk_model) bulk_arguments = camel_to_snake_args(request["arguments"]) requests.append(bulk_class(**dict(bulk_arguments))) arguments["requests"] = requests else: arguments[c2s] = arguments.pop(arg_name) result = cmd(**dict(arguments)) if name == "aggregate": if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]: out = collection.database[arguments["pipeline"][-1]["$out"]] return out.find() if isinstance(result, Cursor) or isinstance(result, CommandCursor): return list(result) return result