def parse_options(opts): if 'readPreference' in opts: opts['read_preference'] = parse_read_preference( opts.pop('readPreference')) if 'writeConcern' in opts: opts['write_concern'] = WriteConcern( **dict(opts.pop('writeConcern'))) if 'readConcern' in opts: opts['read_concern'] = ReadConcern( **dict(opts.pop('readConcern'))) if 'maxTimeMS' in opts: opts['max_time_ms'] = opts.pop('maxTimeMS') if 'maxCommitTimeMS' in opts: opts['max_commit_time_ms'] = opts.pop('maxCommitTimeMS') return dict(opts)
def parse_options(opts): if 'readPreference' in opts: opts['read_preference'] = parse_read_preference( opts.pop('readPreference')) if 'writeConcern' in opts: opts['write_concern'] = WriteConcern( **dict(opts.pop('writeConcern'))) if 'readConcern' in opts: opts['read_concern'] = ReadConcern( **dict(opts.pop('readConcern'))) if 'maxTimeMS' in opts: opts['max_time_ms'] = opts.pop('maxTimeMS') if 'maxCommitTimeMS' in opts: opts['max_commit_time_ms'] = opts.pop('maxCommitTimeMS') if 'hint' in opts: hint = opts.pop('hint') if not isinstance(hint, string_type): hint = list(iteritems(hint)) opts['hint'] = hint # Properly format 'hint' arguments for the Bulk API tests. if 'requests' in opts: reqs = opts.pop('requests') for req in reqs: args = req.pop('arguments') if 'hint' in args: hint = args.pop('hint') if not isinstance(hint, string_type): hint = list(iteritems(hint)) args['hint'] = hint req['arguments'] = args opts['requests'] = reqs return dict(opts)
def run_scenario(self): dbname = scenario_def['database_name'] collname = scenario_def['collection_name'] coll = self.client[dbname][collname] coll.drop() coll.insert_many(scenario_def['data']) self.listener.results.clear() name = camel_to_snake(test['operation']['name']) if 'read_preference' in test['operation']: coll = coll.with_options(read_preference=parse_read_preference( test['operation']['read_preference'])) if 'collectionOptions' in test['operation']: colloptions = test['operation']['collectionOptions'] if 'writeConcern' in colloptions: concern = colloptions['writeConcern'] coll = coll.with_options( write_concern=WriteConcern(**concern)) test_args = test['operation']['arguments'] if 'options' in test_args: options = test_args.pop('options') test_args.update(options) args = {} for arg in test_args: args[camel_to_snake(arg)] = test_args[arg] if name == 'count': self.skipTest('PyMongo does not support count') elif name == 'bulk_write': bulk_args = [] for request in args['requests']: opname = request['name'] klass = opname[0:1].upper() + opname[1:] arg = getattr(pymongo, klass)(**request['arguments']) bulk_args.append(arg) try: coll.bulk_write(bulk_args, args.get('ordered', True)) except OperationFailure: pass elif name == 'find': if 'sort' in args: args['sort'] = list(args['sort'].items()) if 'hint' in args: args['hint'] = list(args['hint'].items()) for arg in 'skip', 'limit': if arg in args: args[arg] = int(args[arg]) try: # Iterate the cursor. tuple(coll.find(**args)) except OperationFailure: pass else: try: getattr(coll, name)(**args) except OperationFailure: pass res = self.listener.results for expectation in test['expectations']: event_type = next(iter(expectation)) if event_type == "command_started_event": event = res['started'][0] if len(res['started']) else None if event is not None: # The tests substitute 42 for any number other than 0. if (event.command_name == 'getMore' and event.command['getMore']): event.command['getMore'] = 42 elif event.command_name == 'killCursors': event.command['cursors'] = [42] elif event.command_name == 'update': # TODO: remove this once PYTHON-1744 is done. # Add upsert and multi fields back into # expectations. updates = expectation[event_type]['command'][ 'updates'] for update in updates: update.setdefault('upsert', False) update.setdefault('multi', False) elif event_type == "command_succeeded_event": event = ( res['succeeded'].pop(0) if len(res['succeeded']) else None) if event is not None: reply = event.reply # The tests substitute 42 for any number other than 0, # and "" for any error message. if 'writeErrors' in reply: for doc in reply['writeErrors']: # Remove any new fields the server adds. The tests # only have index, code, and errmsg. diff = set(doc) - set(['index', 'code', 'errmsg']) for field in diff: doc.pop(field) doc['code'] = 42 doc['errmsg'] = "" elif 'cursor' in reply: if reply['cursor']['id']: reply['cursor']['id'] = 42 elif event.command_name == 'killCursors': # Make the tests continue to pass when the killCursors # command is actually in use. if 'cursorsKilled' in reply: reply.pop('cursorsKilled') reply['cursorsUnknown'] = [42] # Found succeeded event. Pop related started event. res['started'].pop(0) elif event_type == "command_failed_event": event = res['failed'].pop(0) if len(res['failed']) else None if event is not None: # Found failed event. Pop related started event. res['started'].pop(0) else: self.fail("Unknown event type") if event is None: event_name = event_type.split('_')[1] self.fail( "Expected %s event for %s command. Actual " "results:%s" % ( event_name, expectation[event_type]['command_name'], format_actual_results(res))) for attr, expected in expectation[event_type].items(): if 'options' in expected: options = expected.pop('options') expected.update(options) actual = getattr(event, attr) if isinstance(expected, dict): for key, val in expected.items(): self.assertEqual(val, actual[key]) else: self.assertEqual(actual, expected)
def run_scenario(self): _, hosts = get_addresses( scenario_def['topology_description']['servers']) # "Eligible servers" is defined in the server selection spec as # the set of servers matching both the ReadPreference's mode # and tag sets. top_latency = create_topology(scenario_def) # "In latency window" is defined in the server selection # spec as the subset of suitable_servers that falls within the # allowable latency window. top_suitable = create_topology(scenario_def, local_threshold_ms=1000000) # Create server selector. if scenario_def.get("operation") == "write": pref = writable_server_selector else: # Make first letter lowercase to match read_pref's modes. pref_def = scenario_def['read_preference'] if scenario_def.get('error'): with self.assertRaises((ConfigurationError, ValueError)): # Error can be raised when making Read Pref or selecting. pref = parse_read_preference(pref_def) top_latency.select_server(pref) return pref = parse_read_preference(pref_def) # Select servers. if not scenario_def.get('suitable_servers'): with self.assertRaises(AutoReconnect): top_suitable.select_server(pref, server_selection_timeout=0) return if not scenario_def['in_latency_window']: with self.assertRaises(AutoReconnect): top_latency.select_server(pref, server_selection_timeout=0) return actual_suitable_s = top_suitable.select_servers( pref, server_selection_timeout=0) actual_latency_s = top_latency.select_servers( pref, server_selection_timeout=0) expected_suitable_servers = {} for server in scenario_def['suitable_servers']: server_description = make_server_description(server, hosts) expected_suitable_servers[server['address']] = server_description actual_suitable_servers = {} for s in actual_suitable_s: actual_suitable_servers["%s:%d" % (s.description.address[0], s.description.address[1])] = s.description self.assertEqual(len(actual_suitable_servers), len(expected_suitable_servers)) for k, actual in actual_suitable_servers.items(): expected = expected_suitable_servers[k] self.assertEqual(expected.address, actual.address) self.assertEqual(expected.server_type, actual.server_type) self.assertEqual(expected.round_trip_time, actual.round_trip_time) self.assertEqual(expected.tags, actual.tags) self.assertEqual(expected.all_hosts, actual.all_hosts) expected_latency_servers = {} for server in scenario_def['in_latency_window']: server_description = make_server_description(server, hosts) expected_latency_servers[server['address']] = server_description actual_latency_servers = {} for s in actual_latency_s: actual_latency_servers["%s:%d" % (s.description.address[0], s.description.address[1])] = s.description self.assertEqual(len(actual_latency_servers), len(expected_latency_servers)) for k, actual in actual_latency_servers.items(): expected = expected_latency_servers[k] self.assertEqual(expected.address, actual.address) self.assertEqual(expected.server_type, actual.server_type) self.assertEqual(expected.round_trip_time, actual.round_trip_time) self.assertEqual(expected.tags, actual.tags) self.assertEqual(expected.all_hosts, actual.all_hosts)
def run_scenario(self): # Initialize topologies. if 'heartbeatFrequencyMS' in scenario_def: frequency = int(scenario_def['heartbeatFrequencyMS']) / 1000.0 else: frequency = HEARTBEAT_FREQUENCY seeds, hosts = get_addresses( scenario_def['topology_description']['servers']) settings = get_topology_settings_dict(heartbeat_frequency=frequency, seeds=seeds) # "Eligible servers" is defined in the server selection spec as # the set of servers matching both the ReadPreference's mode # and tag sets. top_latency = Topology(TopologySettings(**settings)) top_latency.open() # "In latency window" is defined in the server selection # spec as the subset of suitable_servers that falls within the # allowable latency window. settings['local_threshold_ms'] = 1000000 top_suitable = Topology(TopologySettings(**settings)) top_suitable.open() # Update topologies with server descriptions. for server in scenario_def['topology_description']['servers']: server_description = make_server_description(server, hosts) top_suitable.on_change(server_description) top_latency.on_change(server_description) # Create server selector. if scenario_def.get("operation") == "write": pref = writable_server_selector else: # Make first letter lowercase to match read_pref's modes. pref_def = scenario_def['read_preference'] if scenario_def.get('error'): with self.assertRaises((ConfigurationError, ValueError)): # Error can be raised when making Read Pref or selecting. pref = parse_read_preference(pref_def) top_latency.select_server(pref) return pref = parse_read_preference(pref_def) # Select servers. if not scenario_def.get('suitable_servers'): with self.assertRaises(AutoReconnect): top_suitable.select_server(pref, server_selection_timeout=0) return if not scenario_def['in_latency_window']: with self.assertRaises(AutoReconnect): top_latency.select_server(pref, server_selection_timeout=0) return actual_suitable_s = top_suitable.select_servers( pref, server_selection_timeout=0) actual_latency_s = top_latency.select_servers( pref, server_selection_timeout=0) expected_suitable_servers = {} for server in scenario_def['suitable_servers']: server_description = make_server_description(server, hosts) expected_suitable_servers[server['address']] = server_description actual_suitable_servers = {} for s in actual_suitable_s: actual_suitable_servers["%s:%d" % (s.description.address[0], s.description.address[1])] = s.description self.assertEqual(len(actual_suitable_servers), len(expected_suitable_servers)) for k, actual in actual_suitable_servers.items(): expected = expected_suitable_servers[k] self.assertEqual(expected.address, actual.address) self.assertEqual(expected.server_type, actual.server_type) self.assertEqual(expected.round_trip_time, actual.round_trip_time) self.assertEqual(expected.tags, actual.tags) self.assertEqual(expected.all_hosts, actual.all_hosts) expected_latency_servers = {} for server in scenario_def['in_latency_window']: server_description = make_server_description(server, hosts) expected_latency_servers[server['address']] = server_description actual_latency_servers = {} for s in actual_latency_s: actual_latency_servers["%s:%d" % (s.description.address[0], s.description.address[1])] = s.description self.assertEqual(len(actual_latency_servers), len(expected_latency_servers)) for k, actual in actual_latency_servers.items(): expected = expected_latency_servers[k] self.assertEqual(expected.address, actual.address) self.assertEqual(expected.server_type, actual.server_type) self.assertEqual(expected.round_trip_time, actual.round_trip_time) self.assertEqual(expected.tags, actual.tags) self.assertEqual(expected.all_hosts, actual.all_hosts)