Exemple #1
0
    def execute_command(self, generator, write_concern, session):
        """Execute using write commands.
        """
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        op_id = _randint()

        def retryable_bulk(session, sock_info, retryable):
            self._execute_command(
                generator, write_concern, session, sock_info, op_id,
                retryable, full_result)

        client = self.collection.database.client
        with client._tmp_session(session) as s:
            client._retry_with_session(
                self.is_retryable, retryable_bulk, s, self)

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result
Exemple #2
0
 def execute_command_no_results(self, sock_info, generator, write_concern):
     """Execute write commands with OP_MSG and w=0 WriteConcern, ordered."""
     full_result = {
         "writeErrors": [],
         "writeConcernErrors": [],
         "nInserted": 0,
         "nUpserted": 0,
         "nMatched": 0,
         "nModified": 0,
         "nRemoved": 0,
         "upserted": [],
     }
     # Ordered bulk writes have to be acknowledged so that we stop
     # processing at the first error, even when the application
     # specified unacknowledged writeConcern.
     initial_write_concern = WriteConcern()
     op_id = _randint()
     try:
         self._execute_command(
             generator,
             initial_write_concern,
             None,
             sock_info,
             op_id,
             False,
             full_result,
             write_concern,
         )
     except OperationFailure:
         pass
Exemple #3
0
    def execute_command(self, generator, write_concern, session):
        """Execute using write commands.
        """
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        op_id = _randint()

        def retryable_bulk(session, sock_info, retryable):
            self._execute_command(generator, write_concern, session, sock_info,
                                  op_id, retryable, full_result)

        client = self.collection.database.client
        with client._tmp_session(session) as s:
            client._retry_with_session(self.is_retryable, retryable_bulk, s,
                                       self)

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            _raise_bulk_write_error(full_result)
        return full_result
Exemple #4
0
    def execute_op_msg_no_results(self, sock_info, generator):
        """Execute write commands with OP_MSG and w=0 writeConcern, unordered.
        """
        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners
        op_id = _randint()

        if not self.current_run:
            self.current_run = next(generator)
        run = self.current_run

        while run:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', False), ('writeConcern', {
                           'w': 0
                       })])
            bwc = self.bulk_ctx_class(db_name, cmd, sock_info, op_id,
                                      listeners, None, run.op_type,
                                      self.collection.codec_options)

            while run.idx_offset < len(run.ops):
                ops = islice(run.ops, run.idx_offset, None)
                # Run as many ops as possible.
                to_send = bwc.execute_unack(ops, client)
                run.idx_offset += len(to_send)
            self.current_run = run = next(generator, None)
Exemple #5
0
    def execute_no_results(self, sock_info, generator):
        """Execute all operations, returning no results (w=0).
        """
        if self.uses_collation:
            raise ConfigurationError(
                'Collation is unsupported for unacknowledged writes.')
        if self.uses_array_filters:
            raise ConfigurationError(
                'arrayFilters is unsupported for unacknowledged writes.')
        # Cannot have both unacknowledged write and bypass document validation.
        if self.bypass_doc_val and sock_info.max_wire_version >= 4:
            raise OperationFailure("Cannot set bypass_document_validation with"
                                   " unacknowledged write concern")
        coll = self.collection
        # If ordered is True we have to send GLE or use write
        # commands so we can abort on the first error.
        write_concern = WriteConcern(w=int(self.ordered))
        op_id = _randint()

        next_run = next(generator)
        while next_run:
            # An ordered bulk write needs to send acknowledged writes to short
            # circuit the next run. However, the final message on the final
            # run can be unacknowledged.
            run = next_run
            next_run = next(generator, None)
            needs_ack = self.ordered and next_run is not None
            try:
                if run.op_type == _INSERT:
                    self.execute_insert_no_results(
                        sock_info, run, op_id, needs_ack)
                elif run.op_type == _UPDATE:
                    for operation in run.ops:
                        doc = operation['u']
                        check_keys = True
                        if doc and next(iter(doc)).startswith('$'):
                            check_keys = False
                        coll._update(
                            sock_info,
                            operation['q'],
                            doc,
                            operation['upsert'],
                            check_keys,
                            operation['multi'],
                            write_concern=write_concern,
                            op_id=op_id,
                            ordered=self.ordered,
                            bypass_doc_val=self.bypass_doc_val)
                else:
                    for operation in run.ops:
                        coll._delete(sock_info,
                                     operation['q'],
                                     not operation['limit'],
                                     write_concern,
                                     op_id,
                                     self.ordered)
            except OperationFailure:
                if self.ordered:
                    break
Exemple #6
0
    def execute_no_results(self, sock_info, generator):
        """Execute all operations, returning no results (w=0).
        """
        if self.uses_collation:
            raise ConfigurationError(
                'Collation is unsupported for unacknowledged writes.')
        if self.uses_array_filters:
            raise ConfigurationError(
                'arrayFilters is unsupported for unacknowledged writes.')
        # Cannot have both unacknowledged write and bypass document validation.
        if self.bypass_doc_val and sock_info.max_wire_version >= 4:
            raise OperationFailure("Cannot set bypass_document_validation with"
                                   " unacknowledged write concern")
        coll = self.collection
        # If ordered is True we have to send GLE or use write
        # commands so we can abort on the first error.
        write_concern = WriteConcern(w=int(self.ordered))
        op_id = _randint()

        next_run = next(generator)
        while next_run:
            # An ordered bulk write needs to send acknowledged writes to short
            # circuit the next run. However, the final message on the final
            # run can be unacknowledged.
            run = next_run
            next_run = next(generator, None)
            needs_ack = self.ordered and next_run is not None
            try:
                if run.op_type == _INSERT:
                    self.execute_insert_no_results(
                        sock_info, run, op_id, needs_ack)
                elif run.op_type == _UPDATE:
                    for operation in run.ops:
                        doc = operation['u']
                        check_keys = True
                        if doc and next(iter(doc)).startswith('$'):
                            check_keys = False
                        coll._update(
                            sock_info,
                            operation['q'],
                            doc,
                            operation['upsert'],
                            check_keys,
                            operation['multi'],
                            write_concern=write_concern,
                            op_id=op_id,
                            ordered=self.ordered,
                            bypass_doc_val=self.bypass_doc_val)
                else:
                    for operation in run.ops:
                        coll._delete(sock_info,
                                     operation['q'],
                                     not operation['limit'],
                                     write_concern,
                                     op_id,
                                     self.ordered)
            except OperationFailure:
                if self.ordered:
                    break
Exemple #7
0
    def execute_command(self, sock_info, generator, write_concern, session):
        """Execute using write commands.
        """
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        op_id = _randint()
        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners

        with self.collection.database.client._tmp_session(session) as s:
            # sock_info.command checks auth, but we use sock_info.write_command.
            sock_info.check_session_auth_matches(s)
            for run in generator:
                cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                           ('ordered', self.ordered)])
                if write_concern.document:
                    cmd['writeConcern'] = write_concern.document
                if self.bypass_doc_val and sock_info.max_wire_version >= 4:
                    cmd['bypassDocumentValidation'] = True
                if s:
                    cmd['lsid'] = s._use_lsid()
                client._send_cluster_time(cmd)
                bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id,
                                        listeners, s)

                results = _do_batched_write_command(
                    self.namespace, run.op_type, cmd,
                    run.ops, True, self.collection.codec_options, bwc)

                _merge_command(run, full_result, results)
                last_result = results[-1][1]
                client._receive_cluster_time(last_result)

                # We're supposed to continue if errors are
                # at the write concern level (e.g. wtimeout)
                if self.ordered and full_result['writeErrors']:
                    break

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result
    def execute_no_results(self, sock_info, generator):
        """Execute all operations, returning no results (w=0).
        """
        # Cannot have both unacknowledged write and bypass document validation.
        if self.bypass_doc_val and sock_info.max_wire_version >= 4:
            raise OperationFailure("Cannot set bypass_document_validation with"
                                   " unacknowledged write concern")
        coll = self.collection
        # If ordered is True we have to send GLE or use write
        # commands so we can abort on the first error.
        write_concern = WriteConcern(w=int(self.ordered))
        op_id = _randint()

        for run in generator:
            try:
                if run.op_type == _INSERT:
                    coll._insert(
                        sock_info,
                        run.ops,
                        self.ordered,
                        write_concern=write_concern,
                        op_id=op_id,
                        bypass_doc_val=self.bypass_doc_val)
                elif run.op_type == _UPDATE:
                    for operation in run.ops:
                        doc = operation['u']
                        check_keys = True
                        if doc and next(iter(doc)).startswith('$'):
                            check_keys = False
                        coll._update(
                            sock_info,
                            operation['q'],
                            doc,
                            operation['upsert'],
                            check_keys,
                            operation['multi'],
                            write_concern=write_concern,
                            op_id=op_id,
                            ordered=self.ordered,
                            bypass_doc_val=self.bypass_doc_val)
                else:
                    for operation in run.ops:
                        coll._delete(sock_info,
                                     operation['q'],
                                     not operation['limit'],
                                     write_concern,
                                     op_id,
                                     self.ordered)
            except OperationFailure:
                if self.ordered:
                    break
Exemple #9
0
    def execute_no_results(self, sock_info, generator):
        """Execute all operations, returning no results (w=0).
        """
        # Cannot have both unacknowledged write and bypass document validation.
        if self.bypass_doc_val and sock_info.max_wire_version >= 4:
            raise OperationFailure("Cannot set bypass_document_validation with"
                                   " unacknowledged write concern")
        coll = self.collection
        # If ordered is True we have to send GLE or use write
        # commands so we can abort on the first error.
        write_concern = WriteConcern(w=int(self.ordered))
        op_id = _randint()

        for run in generator:
            try:
                if run.op_type == _INSERT:
                    coll._insert(
                        sock_info,
                        run.ops,
                        self.ordered,
                        write_concern=write_concern,
                        op_id=op_id,
                        bypass_doc_val=self.bypass_doc_val)
                elif run.op_type == _UPDATE:
                    for operation in run.ops:
                        doc = operation['u']
                        check_keys = True
                        if doc and next(iter(doc)).startswith('$'):
                            check_keys = False
                        coll._update(
                            sock_info,
                            operation['q'],
                            doc,
                            operation['upsert'],
                            check_keys,
                            operation['multi'],
                            write_concern=write_concern,
                            op_id=op_id,
                            ordered=self.ordered,
                            bypass_doc_val=self.bypass_doc_val)
                else:
                    for operation in run.ops:
                        coll._delete(sock_info,
                                     operation['q'],
                                     not operation['limit'],
                                     write_concern,
                                     op_id,
                                     self.ordered)
            except OperationFailure:
                if self.ordered:
                    break
Exemple #10
0
    def execute_no_results(self, sock_info, generator):
        """Execute all operations, returning no results (w=0).
        """
        coll = self.collection
        # If ordered is True we have to send GLE or use write
        # commands so we can abort on the first error.
        write_concern = WriteConcern(w=int(self.ordered))
        op_id = _randint()

        for run in generator:
            try:
                if run.op_type == _INSERT:
                    coll._insert(sock_info,
                                 run.ops,
                                 self.ordered,
                                 write_concern=write_concern,
                                 op_id=op_id)
                elif run.op_type == _UPDATE:
                    for operation in run.ops:
                        doc = operation['u']
                        check_keys = True
                        if doc and next(iter(doc)).startswith('$'):
                            check_keys = False
                        coll._update(sock_info,
                                     operation['q'],
                                     doc,
                                     operation['upsert'],
                                     check_keys,
                                     operation['multi'],
                                     write_concern=write_concern,
                                     op_id=op_id,
                                     ordered=self.ordered)
                else:
                    for operation in run.ops:
                        coll._delete(sock_info,
                                     operation['q'],
                                     not operation['limit'],
                                     write_concern,
                                     op_id,
                                     self.ordered)
            except OperationFailure:
                if self.ordered:
                    break
Exemple #11
0
    def execute_command(self, sock_info, generator, write_concern):
        """Execute using write commands.
        """
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        op_id = _randint()
        db_name = self.collection.database.name
        listeners = self.collection.database.client._event_listeners

        for run in generator:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', self.ordered)])
            if write_concern.document:
                cmd['writeConcern'] = write_concern.document
            if self.bypass_doc_val and sock_info.max_wire_version >= 4:
                cmd['bypassDocumentValidation'] = True

            bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id, listeners)
            results = _do_batched_write_command(
                self.namespace, run.op_type, cmd,
                run.ops, True, self.collection.codec_options, bwc)

            _merge_command(run, full_result, results)
            # We're supposed to continue if errors are
            # at the write concern level (e.g. wtimeout)
            if self.ordered and full_result['writeErrors']:
                break

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result
Exemple #12
0
    def execute_command(self, sock_info, generator, write_concern):
        """Execute using write commands.
        """
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        op_id = _randint()
        db_name = self.collection.database.name
        listeners = self.collection.database.client._event_listeners

        for run in generator:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', self.ordered)])
            if write_concern.document:
                cmd['writeConcern'] = write_concern.document

            bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id, listeners)
            results = _do_batched_write_command(self.namespace, run.op_type,
                                                cmd, run.ops, True,
                                                self.collection.codec_options,
                                                bwc)

            _merge_command(run, full_result, results)
            # We're supposed to continue if errors are
            # at the write concern level (e.g. wtimeout)
            if self.ordered and full_result['writeErrors']:
                break

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result
Exemple #13
0
    def execute_no_results(self, sock_info, generator):
        """Execute all operations, returning no results (w=0).
        """
        coll = self.collection
        # If ordered is True we have to send GLE or use write
        # commands so we can abort on the first error.
        write_concern = WriteConcern(w=int(self.ordered))
        op_id = _randint()

        for run in generator:
            try:
                if run.op_type == _INSERT:
                    coll._insert(sock_info,
                                 run.ops,
                                 self.ordered,
                                 write_concern=write_concern,
                                 op_id=op_id)
                elif run.op_type == _UPDATE:
                    for operation in run.ops:
                        doc = operation['u']
                        check_keys = True
                        if doc and next(iter(doc)).startswith('$'):
                            check_keys = False
                        coll._update(sock_info,
                                     operation['q'],
                                     doc,
                                     operation['upsert'],
                                     check_keys,
                                     operation['multi'],
                                     write_concern=write_concern,
                                     op_id=op_id,
                                     ordered=self.ordered)
                else:
                    for operation in run.ops:
                        coll._delete(sock_info, operation['q'],
                                     not operation['limit'], write_concern,
                                     op_id, self.ordered)
            except OperationFailure:
                if self.ordered:
                    break
Exemple #14
0
    def execute_op_msg_no_results(self, sock_info, generator):
        """Execute write commands with OP_MSG and w=0 writeConcern, unordered.
        """
        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners
        op_id = _randint()

        if not self.current_run:
            self.current_run = next(generator)
        run = self.current_run

        while run:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', False), ('writeConcern', {
                           'w': 0
                       })])
            bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id, listeners,
                                    None)

            while run.idx_offset < len(run.ops):
                check_keys = run.op_type == _INSERT
                ops = islice(run.ops, run.idx_offset, None)
                # Run as many ops as possible.
                request_id, msg, to_send = _do_bulk_write_command(
                    self.namespace, run.op_type, cmd, ops, check_keys,
                    self.collection.codec_options, bwc)
                if not to_send:
                    raise InvalidOperation("cannot do an empty bulk write")
                run.idx_offset += len(to_send)
                # Though this isn't strictly a "legacy" write, the helper
                # handles publishing commands and sending our message
                # without receiving a result. Send 0 for max_doc_size
                # to disable size checking. Size checking is handled while
                # the documents are encoded to BSON.
                bwc.legacy_write(request_id, msg, 0, False, to_send)
            self.current_run = run = next(generator, None)
    def execute_op_msg_no_results(self, sock_info, generator):
        """Execute write commands with OP_MSG and w=0 writeConcern, unordered.
        """
        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners
        op_id = _randint()

        if not self.current_run:
            self.current_run = next(generator)
        run = self.current_run

        while run:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', False),
                       ('writeConcern', {'w': 0})])
            bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id,
                                    listeners, None)

            while run.idx_offset < len(run.ops):
                check_keys = run.op_type == _INSERT
                ops = islice(run.ops, run.idx_offset, None)
                # Run as many ops as possible.
                request_id, msg, to_send = _do_bulk_write_command(
                    self.namespace, run.op_type, cmd, ops, check_keys,
                    self.collection.codec_options, bwc)
                if not to_send:
                    raise InvalidOperation("cannot do an empty bulk write")
                run.idx_offset += len(to_send)
                # Though this isn't strictly a "legacy" write, the helper
                # handles publishing commands and sending our message
                # without receiving a result. Send 0 for max_doc_size
                # to disable size checking. Size checking is handled while
                # the documents are encoded to BSON.
                bwc.legacy_write(request_id, msg, 0, False, to_send)
            self.current_run = run = next(generator, None)
 def execute_command_no_results(self, sock_info, generator):
     """Execute write commands with OP_MSG and w=0 WriteConcern, ordered.
     """
     full_result = {
         "writeErrors": [],
         "writeConcernErrors": [],
         "nInserted": 0,
         "nUpserted": 0,
         "nMatched": 0,
         "nModified": 0,
         "nRemoved": 0,
         "upserted": [],
     }
     # Ordered bulk writes have to be acknowledged so that we stop
     # processing at the first error, even when the application
     # specified unacknowledged writeConcern.
     write_concern = WriteConcern()
     op_id = _randint()
     try:
         self._execute_command(
             generator, write_concern, None,
             sock_info, op_id, False, full_result)
     except OperationFailure:
         pass
Exemple #17
0
    def execute_legacy(self, sock_info, generator, write_concern):
        """Execute using legacy wire protocol ops.
        """
        coll = self.collection
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        op_id = _randint()
        stop = False
        for run in generator:
            for idx, operation in enumerate(run.ops):
                try:
                    # To do per-operation reporting we have to do ops one
                    # at a time. That means the performance of bulk insert
                    # will be slower here than calling Collection.insert()
                    if run.op_type == _INSERT:
                        coll._insert(sock_info,
                                     operation,
                                     self.ordered,
                                     write_concern=write_concern,
                                     op_id=op_id)
                        result = {}
                    elif run.op_type == _UPDATE:
                        doc = operation['u']
                        check_keys = True
                        if doc and next(iter(doc)).startswith('$'):
                            check_keys = False
                        result = coll._update(sock_info,
                                              operation['q'],
                                              doc,
                                              operation['upsert'],
                                              check_keys,
                                              operation['multi'],
                                              write_concern=write_concern,
                                              op_id=op_id,
                                              ordered=self.ordered)
                    else:
                        result = coll._delete(sock_info,
                                              operation['q'],
                                              not operation['limit'],
                                              write_concern,
                                              op_id,
                                              self.ordered)
                    _merge_legacy(run, full_result, result, idx)
                except DocumentTooLarge as exc:
                    # MongoDB 2.6 uses error code 2 for "too large".
                    error = _make_error(
                        run.index(idx), _BAD_VALUE, str(exc), operation)
                    full_result['writeErrors'].append(error)
                    if self.ordered:
                        stop = True
                        break
                except OperationFailure as exc:
                    if not exc.details:
                        # Some error not related to the write operation
                        # (e.g. kerberos failure). Re-raise immediately.
                        raise
                    _merge_legacy(run, full_result, exc.details, idx)
                    # We're supposed to continue if errors are
                    # at the write concern level (e.g. wtimeout)
                    if self.ordered and full_result["writeErrors"]:
                        stop = True
                        break
            if stop:
                break

        if full_result["writeErrors"] or full_result['writeConcernErrors']:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result
Exemple #18
0
    def execute_legacy(self, sock_info, generator, write_concern):
        """Execute using legacy wire protocol ops.
        """
        coll = self.collection
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        op_id = _randint()
        stop = False
        for run in generator:
            for idx, operation in enumerate(run.ops):
                try:
                    # To do per-operation reporting we have to do ops one
                    # at a time. That means the performance of bulk insert
                    # will be slower here than calling Collection.insert()
                    if run.op_type == _INSERT:
                        coll._insert(sock_info,
                                     operation,
                                     self.ordered,
                                     write_concern=write_concern,
                                     op_id=op_id)
                        result = {}
                    elif run.op_type == _UPDATE:
                        doc = operation['u']
                        check_keys = True
                        if doc and next(iter(doc)).startswith('$'):
                            check_keys = False
                        result = coll._update(sock_info,
                                              operation['q'],
                                              doc,
                                              operation['upsert'],
                                              check_keys,
                                              operation['multi'],
                                              write_concern=write_concern,
                                              op_id=op_id,
                                              ordered=self.ordered)
                    else:
                        result = coll._delete(sock_info, operation['q'],
                                              not operation['limit'],
                                              write_concern, op_id,
                                              self.ordered)
                    _merge_legacy(run, full_result, result, idx)
                except DocumentTooLarge as exc:
                    # MongoDB 2.6 uses error code 2 for "too large".
                    error = _make_error(run.index(idx), _BAD_VALUE, str(exc),
                                        operation)
                    full_result['writeErrors'].append(error)
                    if self.ordered:
                        stop = True
                        break
                except OperationFailure as exc:
                    if not exc.details:
                        # Some error not related to the write operation
                        # (e.g. kerberos failure). Re-raise immediately.
                        raise
                    _merge_legacy(run, full_result, exc.details, idx)
                    # We're supposed to continue if errors are
                    # at the write concern level (e.g. wtimeout)
                    if self.ordered and full_result["writeErrors"]:
                        stop = True
                        break
            if stop:
                break

        if full_result["writeErrors"] or full_result['writeConcernErrors']:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result
Exemple #19
0
    def execute_command(self, sock_info, generator, write_concern, session):
        """Execute using write commands.
        """
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        op_id = _randint()
        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners

        with self.collection.database.client._tmp_session(session) as s:
            # sock_info.command checks auth, but we use sock_info.write_command.
            sock_info.check_session_auth_matches(s)
            for run in generator:
                cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                           ('ordered', self.ordered)])
                if write_concern.document:
                    cmd['writeConcern'] = write_concern.document
                if self.bypass_doc_val and sock_info.max_wire_version >= 4:
                    cmd['bypassDocumentValidation'] = True
                if s:
                    cmd['lsid'] = s._use_lsid()
                bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id,
                                        listeners, s)

                results = []
                idx_offset = 0
                while idx_offset < len(run.ops):
                    check_keys = run.op_type == _INSERT
                    ops = islice(run.ops, idx_offset, None)
                    # Run as many ops as possible.
                    client._send_cluster_time(cmd, s)
                    request_id, msg, to_send = _do_batched_write_command(
                        self.namespace, run.op_type, cmd, ops, check_keys,
                        self.collection.codec_options, bwc)
                    if not to_send:
                        raise InvalidOperation("cannot do an empty bulk write")
                    result = bwc.write_command(request_id, msg, to_send)
                    client._receive_cluster_time(result)
                    if s is not None:
                        s._advance_cluster_time(result.get("$clusterTime"))
                        s._advance_operation_time(result.get("operationTime"))
                    results.append((idx_offset, result))
                    if self.ordered and "writeErrors" in result:
                        break
                    idx_offset += len(to_send)

                _merge_command(run, full_result, results)

                # We're supposed to continue if errors are
                # at the write concern level (e.g. wtimeout)
                if self.ordered and full_result['writeErrors']:
                    break

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result