Пример #1
0
    def run_operation(self, collection, test):
        # Iterate over all operations.
        for opdef in test['operations']:
            # Convert command from CamelCase to pymongo.collection method.
            operation = camel_to_snake(opdef['name'])

            # Get command handle on target entity (collection/database).
            target_object = opdef.get('object', 'collection')
            if target_object == 'database':
                cmd = getattr(collection.database, operation)
            elif target_object == 'collection':
                collection = collection.with_options(**dict(
                    parse_collection_options(opdef.get('collectionOptions',
                                                       {}))))
                cmd = getattr(collection, operation)
            else:
                self.fail("Unknown object name %s" % (target_object, ))

            # Convert arguments to snake_case and handle special cases.
            arguments = opdef['arguments']
            options = arguments.pop("options", {})

            for option_name in options:
                arguments[camel_to_snake(option_name)] = options[option_name]

            if operation == "bulk_write":
                # Parse each request into a bulk write model.
                requests = []
                for request in arguments["requests"]:
                    bulk_model = camel_to_upper_camel(request["name"])
                    bulk_class = getattr(operations, bulk_model)
                    bulk_arguments = camel_to_snake_args(request["arguments"])
                    requests.append(bulk_class(**bulk_arguments))
                arguments["requests"] = requests
            else:
                for arg_name in list(arguments):
                    c2s = camel_to_snake(arg_name)
                    # PyMongo accepts sort as list of tuples.
                    if arg_name == "sort":
                        sort_dict = arguments[arg_name]
                        arguments[arg_name] = list(iteritems(sort_dict))
                    # Named "key" instead not fieldName.
                    if arg_name == "fieldName":
                        arguments["key"] = arguments.pop(arg_name)
                    # Aggregate uses "batchSize", while find uses batch_size.
                    elif arg_name == "batchSize" and operation == "aggregate":
                        continue
                    # Requires boolean returnDocument.
                    elif arg_name == "returnDocument":
                        arguments[c2s] = arguments[arg_name] == "After"
                    else:
                        arguments[c2s] = arguments.pop(arg_name)

            if opdef.get('error') is True:
                with self.assertRaises(PyMongoError):
                    cmd(**arguments)
            else:
                result = cmd(**arguments)
                self.check_result(opdef.get('result'), result)
    def run_operation(self, collection, test):
        # Iterate over all operations.
        for opdef in test['operations']:
            # Convert command from CamelCase to pymongo.collection method.
            operation = camel_to_snake(opdef['name'])

            # Get command handle on target entity (collection/database).
            target_object = opdef.get('object', 'collection')
            if target_object == 'database':
                cmd = getattr(collection.database, operation)
            elif target_object == 'collection':
                collection = collection.with_options(**dict(
                    parse_collection_options(opdef.get(
                        'collectionOptions', {}))))
                cmd = getattr(collection, operation)
            else:
                self.fail("Unknown object name %s" % (target_object,))

            # Convert arguments to snake_case and handle special cases.
            arguments = opdef['arguments']
            options = arguments.pop("options", {})

            for option_name in options:
                arguments[camel_to_snake(option_name)] = options[option_name]

            if operation == "bulk_write":
                # Parse each request into a bulk write model.
                requests = []
                for request in arguments["requests"]:
                    bulk_model = camel_to_upper_camel(request["name"])
                    bulk_class = getattr(operations, bulk_model)
                    bulk_arguments = camel_to_snake_args(request["arguments"])
                    requests.append(bulk_class(**bulk_arguments))
                arguments["requests"] = requests
            else:
                for arg_name in list(arguments):
                    c2s = camel_to_snake(arg_name)
                    # PyMongo accepts sort as list of tuples.
                    if arg_name == "sort":
                        sort_dict = arguments[arg_name]
                        arguments[arg_name] = list(iteritems(sort_dict))
                    # Named "key" instead not fieldName.
                    if arg_name == "fieldName":
                        arguments["key"] = arguments.pop(arg_name)
                    # Aggregate uses "batchSize", while find uses batch_size.
                    elif arg_name == "batchSize" and operation == "aggregate":
                        continue
                    # Requires boolean returnDocument.
                    elif arg_name == "returnDocument":
                        arguments[c2s] = arguments[arg_name] == "After"
                    else:
                        arguments[c2s] = arguments.pop(arg_name)

            if opdef.get('error') is True:
                with self.assertRaises(PyMongoError):
                    cmd(**arguments)
            else:
                result = cmd(**arguments)
                self.check_result(opdef.get('result'), result)
    def run_entity_operation(self, spec):
        target = self.entity_map[spec['object']]
        opname = spec['name']
        opargs = spec.get('arguments')
        expect_error = spec.get('expectError')
        if opargs:
            arguments = parse_spec_options(copy.deepcopy(opargs))
            prepare_spec_arguments(spec, arguments, camel_to_snake(opname),
                                   self.entity_map, self.run_operations)
        else:
            arguments = tuple()

        if isinstance(target, MongoClient):
            method_name = '_clientOperation_%s' % (opname, )
        elif isinstance(target, Database):
            method_name = '_databaseOperation_%s' % (opname, )
        elif isinstance(target, Collection):
            method_name = '_collectionOperation_%s' % (opname, )
        elif isinstance(target, ChangeStream):
            method_name = '_changeStreamOperation_%s' % (opname, )
        elif isinstance(target, ClientSession):
            method_name = '_sessionOperation_%s' % (opname, )
        elif isinstance(target, GridFSBucket):
            raise NotImplementedError
        else:
            method_name = 'doesNotExist'

        try:
            method = getattr(self, method_name)
        except AttributeError:
            try:
                cmd = getattr(target, camel_to_snake(opname))
            except AttributeError:
                self.fail('Unsupported operation %s on entity %s' %
                          (opname, target))
        else:
            cmd = functools.partial(method, target)

        try:
            result = cmd(**dict(arguments))
        except Exception as exc:
            if expect_error:
                return self.process_error(exc, expect_error)
            raise

        if 'expectResult' in spec:
            self.match_evaluator.match_result(spec['expectResult'], result)

        save_as_entity = spec.get('saveResultAsEntity')
        if save_as_entity:
            self.entity_map[save_as_entity] = result
Пример #4
0
def run_operation(collection, test):
    # Convert command from CamelCase to pymongo.collection method.
    operation = camel_to_snake(test['operation']['name'])
    cmd = getattr(collection, operation)

    # Convert arguments to snake_case and handle special cases.
    arguments = test['operation']['arguments']
    options = arguments.pop("options", {})
    for option_name in options:
        arguments[camel_to_snake(option_name)] = options[option_name]
    if operation == "bulk_write":
        # Parse each request into a bulk write model.
        requests = []
        for request in arguments["requests"]:
            bulk_model = camel_to_upper_camel(request["name"])
            bulk_class = getattr(operations, bulk_model)
            bulk_arguments = camel_to_snake_args(request["arguments"])
            requests.append(bulk_class(**bulk_arguments))
        arguments["requests"] = requests
    else:
        for arg_name in list(arguments):
            c2s = camel_to_snake(arg_name)
            # PyMongo accepts sort as list of tuples.
            if arg_name == "sort":
                sort_dict = arguments[arg_name]
                arguments[arg_name] = list(iteritems(sort_dict))
            # Named "key" instead not fieldName.
            if arg_name == "fieldName":
                arguments["key"] = arguments.pop(arg_name)
            # Aggregate uses "batchSize", while find uses batch_size.
            elif arg_name == "batchSize" and operation == "aggregate":
                continue
            # Requires boolean returnDocument.
            elif arg_name == "returnDocument":
                arguments[c2s] = arguments[arg_name] == "After"
            else:
                arguments[c2s] = arguments.pop(arg_name)

    result = cmd(**arguments)

    if operation == "aggregate":
        if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]:
            out = collection.database[arguments["pipeline"][-1]["$out"]]
            result = out.find()

    if isinstance(result, Cursor) or isinstance(result, CommandCursor):
        return list(result)

    return result
def run_operation(collection, test):
    # Convert command from CamelCase to pymongo.collection method.
    operation = camel_to_snake(test['operation']['name'])
    cmd = getattr(collection, operation)

    # Convert arguments to snake_case and handle special cases.
    arguments = test['operation']['arguments']
    options = arguments.pop("options", {})
    for option_name in options:
        arguments[camel_to_snake(option_name)] = options[option_name]
    if operation == "bulk_write":
        # Parse each request into a bulk write model.
        requests = []
        for request in arguments["requests"]:
            bulk_model = camel_to_upper_camel(request["name"])
            bulk_class = getattr(operations, bulk_model)
            bulk_arguments = camel_to_snake_args(request["arguments"])
            requests.append(bulk_class(**bulk_arguments))
        arguments["requests"] = requests
    else:
        for arg_name in list(arguments):
            c2s = camel_to_snake(arg_name)
            # PyMongo accepts sort as list of tuples.
            if arg_name == "sort":
                sort_dict = arguments[arg_name]
                arguments[arg_name] = list(iteritems(sort_dict))
            # Named "key" instead not fieldName.
            if arg_name == "fieldName":
                arguments["key"] = arguments.pop(arg_name)
            # Aggregate uses "batchSize", while find uses batch_size.
            elif arg_name == "batchSize" and operation == "aggregate":
                continue
            # Requires boolean returnDocument.
            elif arg_name == "returnDocument":
                arguments[c2s] = arguments[arg_name] == "After"
            else:
                arguments[c2s] = arguments.pop(arg_name)

    result = cmd(**arguments)

    if operation == "aggregate":
        if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]:
            out = collection.database[arguments["pipeline"][-1]["$out"]]
            return out.find()
    return result
Пример #6
0
 def run_operation(self, op):
     """Run a single operation in a test."""
     op_name = camel_to_snake(op['name'])
     thread = op['thread']
     meth = getattr(self, op_name)
     if thread:
         self.targets[thread].schedule(lambda: meth(op))
     else:
         meth(op)
Пример #7
0
 def check_object(self, actual, expected):
     """Assert that the actual object matches the expected object."""
     self.assertEqual(type(actual), OBJECT_TYPES[expected['type']])
     for attr, expected_val in expected.items():
         if attr == 'type':
             continue
         c2s = camel_to_snake(attr)
         actual_val = getattr(actual, c2s)
         if expected_val == 42:
             self.assertIsNotNone(actual_val)
         else:
             self.assertEqual(actual_val, expected_val)
Пример #8
0
    def check_result(self, expected_result, result):
        if expected_result is None:
            return True

        if isinstance(result, Cursor) or isinstance(result, CommandCursor):
            return list(result) == expected_result

        elif isinstance(result, _WriteResult):
            for res in expected_result:
                prop = camel_to_snake(res)
                # SPEC-869: Only BulkWriteResult has upserted_count.
                if (prop == "upserted_count"
                        and not isinstance(result, BulkWriteResult)):
                    if result.upserted_id is not None:
                        upserted_count = 1
                    else:
                        upserted_count = 0
                    if upserted_count != expected_result[res]:
                        return False
                elif prop == "inserted_ids":
                    # BulkWriteResult does not have inserted_ids.
                    if isinstance(result, BulkWriteResult):
                        if len(expected_result[res]) != result.inserted_count:
                            return False
                    else:
                        # InsertManyResult may be compared to [id1] from the
                        # crud spec or {"0": id1} from the retryable write spec.
                        ids = expected_result[res]
                        if isinstance(ids, dict):
                            ids = [ids[str(i)] for i in range(len(ids))]
                        if ids != result.inserted_ids:
                            return False
                elif prop == "upserted_ids":
                    # Convert indexes from strings to integers.
                    ids = expected_result[res]
                    expected_ids = {}
                    for str_index in ids:
                        expected_ids[int(str_index)] = ids[str_index]
                    if expected_ids != result.upserted_ids:
                        return False
                elif getattr(result, prop) != expected_result[res]:
                    return False
            return True
        else:
            if not expected_result:
                return result is None
            else:
                return result == expected_result
    def check_result(self, expected_result, result):
        if expected_result is None:
            return True

        if isinstance(result, Cursor) or isinstance(result, CommandCursor):
            return list(result) == expected_result

        elif isinstance(result, _WriteResult):
            for res in expected_result:
                prop = camel_to_snake(res)
                # SPEC-869: Only BulkWriteResult has upserted_count.
                if (prop == "upserted_count" and
                        not isinstance(result, BulkWriteResult)):
                    if result.upserted_id is not None:
                        upserted_count = 1
                    else:
                        upserted_count = 0
                    if upserted_count != expected_result[res]:
                        return False
                elif prop == "inserted_ids":
                    # BulkWriteResult does not have inserted_ids.
                    if isinstance(result, BulkWriteResult):
                        if len(expected_result[res]) != result.inserted_count:
                            return False
                    else:
                        # InsertManyResult may be compared to [id1] from the
                        # crud spec or {"0": id1} from the retryable write spec.
                        ids = expected_result[res]
                        if isinstance(ids, dict):
                            ids = [ids[str(i)] for i in range(len(ids))]
                        if ids != result.inserted_ids:
                            return False
                elif prop == "upserted_ids":
                    # Convert indexes from strings to integers.
                    ids = expected_result[res]
                    expected_ids = {}
                    for str_index in ids:
                        expected_ids[int(str_index)] = ids[str_index]
                    if expected_ids != result.upserted_ids:
                        return False
                elif getattr(result, prop) != expected_result[res]:
                    return False
            return True
        else:
            if not expected_result:
                return result is None
            else:
                return result == expected_result
Пример #10
0
    def check_result(self, expected_result, result):
        if isinstance(result, _WriteResult):
            for res in expected_result:
                prop = camel_to_snake(res)
                # SPEC-869: Only BulkWriteResult has upserted_count.
                if (prop == "upserted_count"
                        and not isinstance(result, BulkWriteResult)):
                    if result.upserted_id is not None:
                        upserted_count = 1
                    else:
                        upserted_count = 0
                    self.assertEqual(upserted_count, expected_result[res],
                                     prop)
                elif prop == "inserted_ids":
                    # BulkWriteResult does not have inserted_ids.
                    if isinstance(result, BulkWriteResult):
                        self.assertEqual(len(expected_result[res]),
                                         result.inserted_count)
                    else:
                        # InsertManyResult may be compared to [id1] from the
                        # crud spec or {"0": id1} from the retryable write spec.
                        ids = expected_result[res]
                        if isinstance(ids, dict):
                            ids = [ids[str(i)] for i in range(len(ids))]
                        self.assertEqual(ids, result.inserted_ids, prop)
                elif prop == "upserted_ids":
                    # Convert indexes from strings to integers.
                    ids = expected_result[res]
                    expected_ids = {}
                    for str_index in ids:
                        expected_ids[int(str_index)] = ids[str_index]
                    self.assertEqual(expected_ids, result.upserted_ids, prop)
                else:
                    self.assertEqual(getattr(result, prop),
                                     expected_result[res], prop)

            return True
        else:
            self.assertEqual(result, expected_result)
    def check_result(self, expected_result, result):
        if isinstance(result, _WriteResult):
            for res in expected_result:
                prop = camel_to_snake(res)
                # SPEC-869: Only BulkWriteResult has upserted_count.
                if (prop == "upserted_count"
                        and not isinstance(result, BulkWriteResult)):
                    if result.upserted_id is not None:
                        upserted_count = 1
                    else:
                        upserted_count = 0
                    self.assertEqual(upserted_count, expected_result[res], prop)
                elif prop == "inserted_ids":
                    # BulkWriteResult does not have inserted_ids.
                    if isinstance(result, BulkWriteResult):
                        self.assertEqual(len(expected_result[res]),
                                         result.inserted_count)
                    else:
                        # InsertManyResult may be compared to [id1] from the
                        # crud spec or {"0": id1} from the retryable write spec.
                        ids = expected_result[res]
                        if isinstance(ids, dict):
                            ids = [ids[str(i)] for i in range(len(ids))]
                        self.assertEqual(ids, result.inserted_ids, prop)
                elif prop == "upserted_ids":
                    # Convert indexes from strings to integers.
                    ids = expected_result[res]
                    expected_ids = {}
                    for str_index in ids:
                        expected_ids[int(str_index)] = ids[str_index]
                    self.assertEqual(expected_ids, result.upserted_ids, prop)
                else:
                    self.assertEqual(
                        getattr(result, prop), expected_result[res], prop)

            return True
        else:
            self.assertEqual(result, expected_result)
Пример #12
0
    def run_operation(self, sessions, collection, operation):
        original_collection = collection
        name = camel_to_snake(operation['name'])
        if name == 'run_command':
            name = 'command'
        elif name == 'download_by_name':
            name = 'open_download_stream_by_name'
        elif name == 'download':
            name = 'open_download_stream'

        database = collection.database
        collection = database.get_collection(collection.name)
        if 'collectionOptions' in operation:
            collection = collection.with_options(
                **self.parse_options(operation['collectionOptions']))

        object_name = self.get_object_name(operation)
        if object_name == 'gridfsbucket':
            # Only create the GridFSBucket when we need it (for the gridfs
            # retryable reads tests).
            obj = GridFSBucket(database,
                               bucket_name=collection.name,
                               disable_md5=True)
        else:
            objects = {
                'client': database.client,
                'database': database,
                'collection': collection,
                'testRunner': self
            }
            objects.update(sessions)
            obj = objects[object_name]

        # Combine arguments with options and handle special cases.
        arguments = operation.get('arguments', {})
        arguments.update(arguments.pop("options", {}))
        self.parse_options(arguments)

        cmd = getattr(obj, name)

        for arg_name in list(arguments):
            c2s = camel_to_snake(arg_name)
            # PyMongo accepts sort as list of tuples.
            if arg_name == "sort":
                sort_dict = arguments[arg_name]
                arguments[arg_name] = list(iteritems(sort_dict))
            # Named "key" instead not fieldName.
            if arg_name == "fieldName":
                arguments["key"] = arguments.pop(arg_name)
            # Aggregate uses "batchSize", while find uses batch_size.
            elif ((arg_name == "batchSize" or arg_name == "allowDiskUse")
                  and name == "aggregate"):
                continue
            # Requires boolean returnDocument.
            elif arg_name == "returnDocument":
                arguments[c2s] = arguments.pop(arg_name) == "After"
            elif c2s == "requests":
                # Parse each request into a bulk write model.
                requests = []
                for request in arguments["requests"]:
                    bulk_model = camel_to_upper_camel(request["name"])
                    bulk_class = getattr(operations, bulk_model)
                    bulk_arguments = camel_to_snake_args(request["arguments"])
                    requests.append(bulk_class(**dict(bulk_arguments)))
                arguments["requests"] = requests
            elif arg_name == "session":
                arguments['session'] = sessions[arguments['session']]
            elif (name in ('command', 'run_admin_command')
                  and arg_name == 'command'):
                # Ensure the first key is the command name.
                ordered_command = SON([(operation['command_name'], 1)])
                ordered_command.update(arguments['command'])
                arguments['command'] = ordered_command
            elif name == 'open_download_stream' and arg_name == 'id':
                arguments['file_id'] = arguments.pop(arg_name)
            elif name != 'find' and c2s == 'max_time_ms':
                # find is the only method that accepts snake_case max_time_ms.
                # All other methods take kwargs which must use the server's
                # camelCase maxTimeMS. See PYTHON-1855.
                arguments['maxTimeMS'] = arguments.pop('max_time_ms')
            elif name == 'with_transaction' and arg_name == 'callback':
                callback_ops = arguments[arg_name]['operations']
                arguments['callback'] = lambda _: self.run_operations(
                    sessions,
                    original_collection,
                    copy.deepcopy(callback_ops),
                    in_with_transaction=True)
            elif name == 'drop_collection' and arg_name == 'collection':
                arguments['name_or_collection'] = arguments.pop(arg_name)
            elif name == 'create_collection' and arg_name == 'collection':
                arguments['name'] = arguments.pop(arg_name)
            elif name == 'create_index' and arg_name == 'keys':
                arguments['keys'] = list(arguments.pop(arg_name).items())
            elif name == 'drop_index' and arg_name == 'name':
                arguments['index_or_name'] = arguments.pop(arg_name)
            else:
                arguments[c2s] = arguments.pop(arg_name)

        if name == 'run_on_thread':
            args = {'sessions': sessions, 'collection': collection}
            args.update(arguments)
            arguments = args
        result = cmd(**dict(arguments))

        if name == "aggregate":
            if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]:
                # Read from the primary to ensure causal consistency.
                out = collection.database.get_collection(
                    arguments["pipeline"][-1]["$out"],
                    read_preference=ReadPreference.PRIMARY)
                return out.find()
        if name == "map_reduce":
            if isinstance(result, dict) and 'results' in result:
                return result['results']
        if 'download' in name:
            result = Binary(result.read())

        if isinstance(result, Cursor) or isinstance(result, CommandCursor):
            return list(result)

        return result
    def run_operation(self, sessions, collection, operation):
        original_collection = collection
        name = camel_to_snake(operation['name'])
        if name == 'run_command':
            name = 'command'
        elif name == 'download_by_name':
            name = 'open_download_stream_by_name'
        elif name == 'download':
            name = 'open_download_stream'

        database = collection.database
        collection = database.get_collection(collection.name)
        if 'collectionOptions' in operation:
            collection = collection.with_options(
                **self.parse_options(operation['collectionOptions']))

        object_name = self.get_object_name(operation)
        if object_name == 'gridfsbucket':
            # Only create the GridFSBucket when we need it (for the gridfs
            # retryable reads tests).
            obj = GridFSBucket(database,
                               bucket_name=collection.name,
                               disable_md5=True)
        else:
            objects = {
                'client': database.client,
                'database': database,
                'collection': collection,
                'testRunner': self
            }
            objects.update(sessions)
            obj = objects[object_name]

        # Combine arguments with options and handle special cases.
        arguments = operation.get('arguments', {})
        arguments.update(arguments.pop("options", {}))
        self.parse_options(arguments)

        cmd = getattr(obj, name)

        with_txn_callback = functools.partial(self.run_operations,
                                              sessions,
                                              original_collection,
                                              in_with_transaction=True)
        prepare_spec_arguments(operation, arguments, name, sessions,
                               with_txn_callback)

        if name == 'run_on_thread':
            args = {'sessions': sessions, 'collection': collection}
            args.update(arguments)
            arguments = args
        result = cmd(**dict(arguments))

        if name == "aggregate":
            if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]:
                # Read from the primary to ensure causal consistency.
                out = collection.database.get_collection(
                    arguments["pipeline"][-1]["$out"],
                    read_preference=ReadPreference.PRIMARY)
                return out.find()
        if name == "map_reduce":
            if isinstance(result, dict) and 'results' in result:
                return result['results']
        if 'download' in name:
            result = Binary(result.read())

        if isinstance(result, Cursor) or isinstance(result, CommandCursor):
            return list(result)

        return result
    def run_operation(self, sessions, collection, operation):
        name = camel_to_snake(operation['name'])
        if name == 'run_command':
            name = 'command'
        self.transaction_test_debug(name)

        def parse_options(opts):
            if 'readPreference' in opts:
                opts['read_preference'] = parse_read_preference(
                    opts.pop('readPreference'))

            if 'writeConcern' in opts:
                opts['write_concern'] = WriteConcern(
                    **dict(opts.pop('writeConcern')))

            if 'readConcern' in opts:
                opts['read_concern'] = ReadConcern(
                    **dict(opts.pop('readConcern')))
            return opts

        database = collection.database
        collection = database.get_collection(collection.name)
        if 'collectionOptions' in operation:
            collection = collection.with_options(
                **dict(parse_options(operation['collectionOptions'])))

        objects = {
            'database': database,
            'collection': collection,
            'testRunner': self
        }
        objects.update(sessions)
        obj = objects[operation['object']]

        # Combine arguments with options and handle special cases.
        arguments = operation.get('arguments', {})
        arguments.update(arguments.pop("options", {}))
        parse_options(arguments)

        cmd = getattr(obj, name)

        for arg_name in list(arguments):
            c2s = camel_to_snake(arg_name)
            # PyMongo accepts sort as list of tuples.
            if arg_name == "sort":
                sort_dict = arguments[arg_name]
                arguments[arg_name] = list(iteritems(sort_dict))
            # Named "key" instead not fieldName.
            if arg_name == "fieldName":
                arguments["key"] = arguments.pop(arg_name)
            # Aggregate uses "batchSize", while find uses batch_size.
            elif arg_name == "batchSize" and name == "aggregate":
                continue
            # Requires boolean returnDocument.
            elif arg_name == "returnDocument":
                arguments[c2s] = arguments[arg_name] == "After"
            elif c2s == "requests":
                # Parse each request into a bulk write model.
                requests = []
                for request in arguments["requests"]:
                    bulk_model = camel_to_upper_camel(request["name"])
                    bulk_class = getattr(operations, bulk_model)
                    bulk_arguments = camel_to_snake_args(request["arguments"])
                    requests.append(bulk_class(**dict(bulk_arguments)))
                arguments["requests"] = requests
            elif arg_name == "session":
                arguments['session'] = sessions[arguments['session']]
            elif name == 'command' and arg_name == 'command':
                # Ensure the first key is the command name.
                ordered_command = SON([(operation['command_name'], 1)])
                ordered_command.update(arguments['command'])
                arguments['command'] = ordered_command
            else:
                arguments[c2s] = arguments.pop(arg_name)

        result = cmd(**dict(arguments))

        if name == "aggregate":
            if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]:
                # Read from the primary to ensure causal consistency.
                out = collection.database.get_collection(
                    arguments["pipeline"][-1]["$out"],
                    read_preference=ReadPreference.PRIMARY)
                return out.find()

        if isinstance(result, Cursor) or isinstance(result, CommandCursor):
            return list(result)

        return result
    def run_operation(self, sessions, collection, operation):
        original_collection = collection
        name = camel_to_snake(operation['name'])
        if name == 'run_command':
            name = 'command'
        self.transaction_test_debug(name)

        def parse_options(opts):
            if 'readPreference' in opts:
                opts['read_preference'] = parse_read_preference(
                    opts.pop('readPreference'))

            if 'writeConcern' in opts:
                opts['write_concern'] = WriteConcern(
                    **dict(opts.pop('writeConcern')))

            if 'readConcern' in opts:
                opts['read_concern'] = ReadConcern(
                    **dict(opts.pop('readConcern')))
            return opts

        database = collection.database
        collection = database.get_collection(collection.name)
        if 'collectionOptions' in operation:
            collection = collection.with_options(
                **dict(parse_options(operation['collectionOptions'])))

        objects = {
            'database': database,
            'collection': collection,
            'testRunner': self
        }
        objects.update(sessions)
        obj = objects[operation['object']]

        # Combine arguments with options and handle special cases.
        arguments = operation.get('arguments', {})
        arguments.update(arguments.pop("options", {}))
        parse_options(arguments)

        cmd = getattr(obj, name)

        for arg_name in list(arguments):
            c2s = camel_to_snake(arg_name)
            # PyMongo accepts sort as list of tuples.
            if arg_name == "sort":
                sort_dict = arguments[arg_name]
                arguments[arg_name] = list(iteritems(sort_dict))
            # Named "key" instead not fieldName.
            if arg_name == "fieldName":
                arguments["key"] = arguments.pop(arg_name)
            # Aggregate uses "batchSize", while find uses batch_size.
            elif arg_name == "batchSize" and name == "aggregate":
                continue
            # Requires boolean returnDocument.
            elif arg_name == "returnDocument":
                arguments[c2s] = arguments[arg_name] == "After"
            elif c2s == "requests":
                # Parse each request into a bulk write model.
                requests = []
                for request in arguments["requests"]:
                    bulk_model = camel_to_upper_camel(request["name"])
                    bulk_class = getattr(operations, bulk_model)
                    bulk_arguments = camel_to_snake_args(request["arguments"])
                    requests.append(bulk_class(**dict(bulk_arguments)))
                arguments["requests"] = requests
            elif arg_name == "session":
                arguments['session'] = sessions[arguments['session']]
            elif name == 'command' and arg_name == 'command':
                # Ensure the first key is the command name.
                ordered_command = SON([(operation['command_name'], 1)])
                ordered_command.update(arguments['command'])
                arguments['command'] = ordered_command
            elif name == 'with_transaction' and arg_name == 'callback':
                callback_ops = arguments[arg_name]['operations']
                arguments['callback'] = lambda _: self.run_operations(
                    sessions, original_collection, copy.deepcopy(callback_ops),
                    in_with_transaction=True)
            else:
                arguments[c2s] = arguments.pop(arg_name)

        result = cmd(**dict(arguments))

        if name == "aggregate":
            if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]:
                # Read from the primary to ensure causal consistency.
                out = collection.database.get_collection(
                    arguments["pipeline"][-1]["$out"],
                    read_preference=ReadPreference.PRIMARY)
                return out.find()

        if isinstance(result, Cursor) or isinstance(result, CommandCursor):
            return list(result)

        return result
    def run_entity_operation(self, spec):
        target = self.entity_map[spec['object']]
        opname = spec['name']
        opargs = spec.get('arguments')
        expect_error = spec.get('expectError')
        save_as_entity = spec.get('saveResultAsEntity')
        expect_result = spec.get('expectResult')
        ignore = spec.get('ignoreResultAndError')
        if ignore and (expect_error or save_as_entity or expect_result):
            raise ValueError(
                'ignoreResultAndError is incompatible with saveResultAsEntity'
                ', expectError, and expectResult')
        if opargs:
            arguments = parse_spec_options(copy.deepcopy(opargs))
            prepare_spec_arguments(spec, arguments, camel_to_snake(opname),
                                   self.entity_map, self.run_operations)
        else:
            arguments = tuple()

        if isinstance(target, MongoClient):
            method_name = '_clientOperation_%s' % (opname, )
        elif isinstance(target, Database):
            method_name = '_databaseOperation_%s' % (opname, )
        elif isinstance(target, Collection):
            method_name = '_collectionOperation_%s' % (opname, )
        elif isinstance(target, ChangeStream):
            method_name = '_changeStreamOperation_%s' % (opname, )
        elif isinstance(target, NonLazyCursor):
            method_name = '_cursor_%s' % (opname, )
        elif isinstance(target, ClientSession):
            method_name = '_sessionOperation_%s' % (opname, )
        elif isinstance(target, GridFSBucket):
            raise NotImplementedError
        else:
            method_name = 'doesNotExist'

        try:
            method = getattr(self, method_name)
        except AttributeError:
            try:
                cmd = getattr(target, camel_to_snake(opname))
            except AttributeError:
                self.fail('Unsupported operation %s on entity %s' %
                          (opname, target))
        else:
            cmd = functools.partial(method, target)

        try:
            result = cmd(**dict(arguments))
        except Exception as exc:
            # Ignore all operation errors but to avoid masking bugs don't
            # ignore things like TypeError and ValueError.
            if ignore and isinstance(exc, (PyMongoError, )):
                return
            if expect_error:
                return self.process_error(exc, expect_error)
            raise
        else:
            if expect_error:
                self.fail('Excepted error %s but "%s" succeeded: %s' %
                          (expect_error, opname, result))

        if expect_result:
            actual = coerce_result(opname, result)
            self.match_evaluator.match_result(expect_result, actual)

        if save_as_entity:
            self.entity_map[save_as_entity] = result