def run_operation(self, sessions, collection, operation): original_collection = collection name = camel_to_snake(operation['name']) if name == 'run_command': name = 'command' elif name == 'download_by_name': name = 'open_download_stream_by_name' elif name == 'download': name = 'open_download_stream' database = collection.database collection = database.get_collection(collection.name) if 'collectionOptions' in operation: collection = collection.with_options( **self.parse_options(operation['collectionOptions'])) object_name = self.get_object_name(operation) if object_name == 'gridfsbucket': # Only create the GridFSBucket when we need it (for the gridfs # retryable reads tests). obj = GridFSBucket(database, bucket_name=collection.name, disable_md5=True) else: objects = { 'client': database.client, 'database': database, 'collection': collection, 'testRunner': self } objects.update(sessions) obj = objects[object_name] # Combine arguments with options and handle special cases. arguments = operation.get('arguments', {}) arguments.update(arguments.pop("options", {})) self.parse_options(arguments) cmd = getattr(obj, name) with_txn_callback = functools.partial(self.run_operations, sessions, original_collection, in_with_transaction=True) prepare_spec_arguments(operation, arguments, name, sessions, with_txn_callback) if name == 'run_on_thread': args = {'sessions': sessions, 'collection': collection} args.update(arguments) arguments = args result = cmd(**dict(arguments)) if name == "aggregate": if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]: # Read from the primary to ensure causal consistency. out = collection.database.get_collection( arguments["pipeline"][-1]["$out"], read_preference=ReadPreference.PRIMARY) return out.find() if name == "map_reduce": if isinstance(result, dict) and 'results' in result: return result['results'] if 'download' in name: result = Binary(result.read()) if isinstance(result, Cursor) or isinstance(result, CommandCursor): return list(result) return result
def run_operation(self, sessions, collection, operation): original_collection = collection name = camel_to_snake(operation['name']) if name == 'run_command': name = 'command' elif name == 'download_by_name': name = 'open_download_stream_by_name' elif name == 'download': name = 'open_download_stream' database = collection.database collection = database.get_collection(collection.name) if 'collectionOptions' in operation: collection = collection.with_options( **self.parse_options(operation['collectionOptions'])) object_name = self.get_object_name(operation) if object_name == 'gridfsbucket': # Only create the GridFSBucket when we need it (for the gridfs # retryable reads tests). obj = GridFSBucket(database, bucket_name=collection.name, disable_md5=True) else: objects = { 'client': database.client, 'database': database, 'collection': collection, 'testRunner': self } objects.update(sessions) obj = objects[object_name] # Combine arguments with options and handle special cases. arguments = operation.get('arguments', {}) arguments.update(arguments.pop("options", {})) self.parse_options(arguments) cmd = getattr(obj, name) for arg_name in list(arguments): c2s = camel_to_snake(arg_name) # PyMongo accepts sort as list of tuples. if arg_name == "sort": sort_dict = arguments[arg_name] arguments[arg_name] = list(iteritems(sort_dict)) # Named "key" instead not fieldName. if arg_name == "fieldName": arguments["key"] = arguments.pop(arg_name) # Aggregate uses "batchSize", while find uses batch_size. elif ((arg_name == "batchSize" or arg_name == "allowDiskUse") and name == "aggregate"): continue # Requires boolean returnDocument. elif arg_name == "returnDocument": arguments[c2s] = arguments.pop(arg_name) == "After" elif c2s == "requests": # Parse each request into a bulk write model. requests = [] for request in arguments["requests"]: bulk_model = camel_to_upper_camel(request["name"]) bulk_class = getattr(operations, bulk_model) bulk_arguments = camel_to_snake_args(request["arguments"]) requests.append(bulk_class(**dict(bulk_arguments))) arguments["requests"] = requests elif arg_name == "session": arguments['session'] = sessions[arguments['session']] elif (name in ('command', 'run_admin_command') and arg_name == 'command'): # Ensure the first key is the command name. ordered_command = SON([(operation['command_name'], 1)]) ordered_command.update(arguments['command']) arguments['command'] = ordered_command elif name == 'open_download_stream' and arg_name == 'id': arguments['file_id'] = arguments.pop(arg_name) elif name != 'find' and c2s == 'max_time_ms': # find is the only method that accepts snake_case max_time_ms. # All other methods take kwargs which must use the server's # camelCase maxTimeMS. See PYTHON-1855. arguments['maxTimeMS'] = arguments.pop('max_time_ms') elif name == 'with_transaction' and arg_name == 'callback': callback_ops = arguments[arg_name]['operations'] arguments['callback'] = lambda _: self.run_operations( sessions, original_collection, copy.deepcopy(callback_ops), in_with_transaction=True) elif name == 'drop_collection' and arg_name == 'collection': arguments['name_or_collection'] = arguments.pop(arg_name) elif name == 'create_collection' and arg_name == 'collection': arguments['name'] = arguments.pop(arg_name) elif name == 'create_index' and arg_name == 'keys': arguments['keys'] = list(arguments.pop(arg_name).items()) elif name == 'drop_index' and arg_name == 'name': arguments['index_or_name'] = arguments.pop(arg_name) else: arguments[c2s] = arguments.pop(arg_name) if name == 'run_on_thread': args = {'sessions': sessions, 'collection': collection} args.update(arguments) arguments = args result = cmd(**dict(arguments)) if name == "aggregate": if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]: # Read from the primary to ensure causal consistency. out = collection.database.get_collection( arguments["pipeline"][-1]["$out"], read_preference=ReadPreference.PRIMARY) return out.find() if name == "map_reduce": if isinstance(result, dict) and 'results' in result: return result['results'] if 'download' in name: result = Binary(result.read()) if isinstance(result, Cursor) or isinstance(result, CommandCursor): return list(result) return result
def run_operation(self, sessions, collection, operation): original_collection = collection name = camel_to_snake(operation['name']) if name == 'run_command': name = 'command' elif name == 'download_by_name': name = 'open_download_stream_by_name' elif name == 'download': name = 'open_download_stream' def parse_options(opts): if 'readPreference' in opts: opts['read_preference'] = parse_read_preference( opts.pop('readPreference')) if 'writeConcern' in opts: opts['write_concern'] = WriteConcern( **dict(opts.pop('writeConcern'))) if 'readConcern' in opts: opts['read_concern'] = ReadConcern( **dict(opts.pop('readConcern'))) return opts database = collection.database collection = database.get_collection(collection.name) if 'collectionOptions' in operation: collection = collection.with_options( **dict(parse_options(operation['collectionOptions']))) object_name = self.get_object_name(operation) if object_name == 'gridfsbucket': # Only create the GridFSBucket when we need it (for the gridfs # retryable reads tests). obj = GridFSBucket( database, bucket_name=collection.name, disable_md5=True) else: objects = { 'client': database.client, 'database': database, 'collection': collection, 'testRunner': self } objects.update(sessions) obj = objects[object_name] # Combine arguments with options and handle special cases. arguments = operation.get('arguments', {}) arguments.update(arguments.pop("options", {})) parse_options(arguments) cmd = getattr(obj, name) for arg_name in list(arguments): c2s = camel_to_snake(arg_name) # PyMongo accepts sort as list of tuples. if arg_name == "sort": sort_dict = arguments[arg_name] arguments[arg_name] = list(iteritems(sort_dict)) # Named "key" instead not fieldName. if arg_name == "fieldName": arguments["key"] = arguments.pop(arg_name) # Aggregate uses "batchSize", while find uses batch_size. elif arg_name == "batchSize" and name == "aggregate": continue # Requires boolean returnDocument. elif arg_name == "returnDocument": arguments[c2s] = arguments.pop(arg_name) == "After" elif c2s == "requests": # Parse each request into a bulk write model. requests = [] for request in arguments["requests"]: bulk_model = camel_to_upper_camel(request["name"]) bulk_class = getattr(operations, bulk_model) bulk_arguments = camel_to_snake_args(request["arguments"]) requests.append(bulk_class(**dict(bulk_arguments))) arguments["requests"] = requests elif arg_name == "session": arguments['session'] = sessions[arguments['session']] elif name == 'command' and arg_name == 'command': # Ensure the first key is the command name. ordered_command = SON([(operation['command_name'], 1)]) ordered_command.update(arguments['command']) arguments['command'] = ordered_command elif name == 'open_download_stream' and arg_name == 'id': arguments['file_id'] = arguments.pop(arg_name) elif name == 'with_transaction' and arg_name == 'callback': callback_ops = arguments[arg_name]['operations'] arguments['callback'] = lambda _: self.run_operations( sessions, original_collection, copy.deepcopy(callback_ops), in_with_transaction=True) else: arguments[c2s] = arguments.pop(arg_name) result = cmd(**dict(arguments)) if name == "aggregate": if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]: # Read from the primary to ensure causal consistency. out = collection.database.get_collection( arguments["pipeline"][-1]["$out"], read_preference=ReadPreference.PRIMARY) return out.find() if name == "map_reduce": if isinstance(result, dict) and 'results' in result: return result['results'] if 'download' in name: result = Binary(result.read()) if isinstance(result, Cursor) or isinstance(result, CommandCursor): return list(result) return result