Ejemplo n.º 1
0
    def execute_command(self, sock_info, generator, write_concern, session):
        """Execute using write commands.
        """
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        op_id = _randint()
        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners

        with self.collection.database.client._tmp_session(session) as s:
            # sock_info.command checks auth, but we use sock_info.write_command.
            sock_info.check_session_auth_matches(s)
            for run in generator:
                cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                           ('ordered', self.ordered)])
                if write_concern.document:
                    cmd['writeConcern'] = write_concern.document
                if self.bypass_doc_val and sock_info.max_wire_version >= 4:
                    cmd['bypassDocumentValidation'] = True
                if s:
                    cmd['lsid'] = s._use_lsid()
                client._send_cluster_time(cmd)
                bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id,
                                        listeners, s)

                results = _do_batched_write_command(
                    self.namespace, run.op_type, cmd,
                    run.ops, True, self.collection.codec_options, bwc)

                _merge_command(run, full_result, results)
                last_result = results[-1][1]
                client._receive_cluster_time(last_result)

                # We're supposed to continue if errors are
                # at the write concern level (e.g. wtimeout)
                if self.ordered and full_result['writeErrors']:
                    break

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result
Ejemplo n.º 2
0
 def execute_insert_no_results(self, sock_info, run, op_id, acknowledged):
     """Execute insert, returning no results.
     """
     command = SON([('insert', self.collection.name),
                    ('ordered', self.ordered)])
     concern = {'w': int(self.ordered)}
     command['writeConcern'] = concern
     if self.bypass_doc_val and sock_info.max_wire_version >= 4:
         command['bypassDocumentValidation'] = True
     db = self.collection.database
     bwc = _BulkWriteContext(db.name, command, sock_info, op_id,
                             db.client._event_listeners, None, _INSERT,
                             self.collection.codec_options)
     # Legacy batched OP_INSERT.
     _do_batched_insert(self.collection.full_name, run.ops, True,
                        acknowledged, concern, not self.ordered,
                        self.collection.codec_options, bwc)
Ejemplo n.º 3
0
 def execute_insert_no_results(self, sock_info, run, op_id, acknowledged):
     """Execute insert, returning no results.
     """
     command = SON([('insert', self.collection.name),
                    ('ordered', self.ordered)])
     concern = {'w': int(self.ordered)}
     command['writeConcern'] = concern
     if self.bypass_doc_val and sock_info.max_wire_version >= 4:
         command['bypassDocumentValidation'] = True
     db = self.collection.database
     bwc = _BulkWriteContext(
         db.name, command, sock_info, op_id, db.client._event_listeners,
         session=None)
     # Legacy batched OP_INSERT.
     _do_batched_insert(
         self.collection.full_name, run.ops, True, acknowledged, concern,
         not self.ordered, self.collection.codec_options, bwc)
Ejemplo n.º 4
0
    def execute_command(self, sock_info, generator, write_concern):
        """Execute using write commands.
        """
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        op_id = _randint()
        db_name = self.collection.database.name
        listeners = self.collection.database.client._event_listeners

        for run in generator:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', self.ordered)])
            if write_concern.document:
                cmd['writeConcern'] = write_concern.document
            if self.bypass_doc_val and sock_info.max_wire_version >= 4:
                cmd['bypassDocumentValidation'] = True

            bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id, listeners)
            results = _do_batched_write_command(
                self.namespace, run.op_type, cmd,
                run.ops, True, self.collection.codec_options, bwc)

            _merge_command(run, full_result, results)
            # We're supposed to continue if errors are
            # at the write concern level (e.g. wtimeout)
            if self.ordered and full_result['writeErrors']:
                break

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result
Ejemplo n.º 5
0
    def execute_command(self, sock_info, generator, write_concern):
        """Execute using write commands.
        """
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        op_id = _randint()
        db_name = self.collection.database.name
        listeners = self.collection.database.client._event_listeners

        for run in generator:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', self.ordered)])
            if write_concern.document:
                cmd['writeConcern'] = write_concern.document

            bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id, listeners)
            results = _do_batched_write_command(self.namespace, run.op_type,
                                                cmd, run.ops, True,
                                                self.collection.codec_options,
                                                bwc)

            _merge_command(run, full_result, results)
            # We're supposed to continue if errors are
            # at the write concern level (e.g. wtimeout)
            if self.ordered and full_result['writeErrors']:
                break

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result
Ejemplo n.º 6
0
    def execute_op_msg_no_results(self, sock_info, generator):
        """Execute write commands with OP_MSG and w=0 writeConcern, unordered.
        """
        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners
        op_id = _randint()

        if not self.current_run:
            self.current_run = next(generator)
        run = self.current_run

        while run:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', False), ('writeConcern', {
                           'w': 0
                       })])
            bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id, listeners,
                                    None)

            while run.idx_offset < len(run.ops):
                check_keys = run.op_type == _INSERT
                ops = islice(run.ops, run.idx_offset, None)
                # Run as many ops as possible.
                request_id, msg, to_send = _do_bulk_write_command(
                    self.namespace, run.op_type, cmd, ops, check_keys,
                    self.collection.codec_options, bwc)
                if not to_send:
                    raise InvalidOperation("cannot do an empty bulk write")
                run.idx_offset += len(to_send)
                # Though this isn't strictly a "legacy" write, the helper
                # handles publishing commands and sending our message
                # without receiving a result. Send 0 for max_doc_size
                # to disable size checking. Size checking is handled while
                # the documents are encoded to BSON.
                bwc.legacy_write(request_id, msg, 0, False, to_send)
            self.current_run = run = next(generator, None)
Ejemplo n.º 7
0
    def execute_op_msg_no_results(self, sock_info, generator):
        """Execute write commands with OP_MSG and w=0 writeConcern, unordered.
        """
        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners
        op_id = _randint()

        if not self.current_run:
            self.current_run = next(generator)
        run = self.current_run

        while run:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', False),
                       ('writeConcern', {'w': 0})])
            bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id,
                                    listeners, None)

            while run.idx_offset < len(run.ops):
                check_keys = run.op_type == _INSERT
                ops = islice(run.ops, run.idx_offset, None)
                # Run as many ops as possible.
                request_id, msg, to_send = _do_bulk_write_command(
                    self.namespace, run.op_type, cmd, ops, check_keys,
                    self.collection.codec_options, bwc)
                if not to_send:
                    raise InvalidOperation("cannot do an empty bulk write")
                run.idx_offset += len(to_send)
                # Though this isn't strictly a "legacy" write, the helper
                # handles publishing commands and sending our message
                # without receiving a result. Send 0 for max_doc_size
                # to disable size checking. Size checking is handled while
                # the documents are encoded to BSON.
                bwc.legacy_write(request_id, msg, 0, False, to_send)
            self.current_run = run = next(generator, None)
Ejemplo n.º 8
0
    def _execute_command(self, generator, write_concern, session, sock_info,
                         op_id, retryable, full_result):
        if sock_info.max_wire_version < 5 and self.uses_collation:
            raise ConfigurationError(
                'Must be connected to MongoDB 3.4+ to use a collation.')
        if sock_info.max_wire_version < 6 and self.uses_array_filters:
            raise ConfigurationError(
                'Must be connected to MongoDB 3.6+ to use arrayFilters.')

        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners

        if not self.current_run:
            self.current_run = next(generator)
        run = self.current_run

        # sock_info.command validates the session, but we use
        # sock_info.write_command.
        sock_info.validate_session(client, session)
        while run:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', self.ordered)])
            if not write_concern.is_server_default:
                cmd['writeConcern'] = write_concern.document
            if self.bypass_doc_val and sock_info.max_wire_version >= 4:
                cmd['bypassDocumentValidation'] = True
            bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id, listeners,
                                    session)

            while run.idx_offset < len(run.ops):
                if session:
                    session._apply_to(cmd, retryable, ReadPreference.PRIMARY)
                sock_info.send_cluster_time(cmd, session, client)
                check_keys = run.op_type == _INSERT
                ops = islice(run.ops, run.idx_offset, None)
                # Run as many ops as possible.
                request_id, msg, to_send = _do_bulk_write_command(
                    self.namespace, run.op_type, cmd, ops, check_keys,
                    self.collection.codec_options, bwc)
                if not to_send:
                    raise InvalidOperation("cannot do an empty bulk write")
                result = bwc.write_command(request_id, msg, to_send)
                client._receive_cluster_time(result, session)

                # Retryable writeConcernErrors halt the execution of this run.
                wce = result.get('writeConcernError', {})
                if wce.get('code', 0) in _RETRYABLE_ERROR_CODES:
                    # Synthesize the full bulk result without modifying the
                    # current one because this write operation may be retried.
                    full = copy.deepcopy(full_result)
                    _merge_command(run, full, run.idx_offset, result)
                    _raise_bulk_write_error(full)

                _merge_command(run, full_result, run.idx_offset, result)
                # We're no longer in a retry once a command succeeds.
                self.retrying = False
                if self.ordered and "writeErrors" in result:
                    break
                run.idx_offset += len(to_send)

            # We're supposed to continue if errors are
            # at the write concern level (e.g. wtimeout)
            if self.ordered and full_result['writeErrors']:
                break
            # Reset our state
            self.current_run = run = next(generator, None)
Ejemplo n.º 9
0
    def _execute_command(self, generator, write_concern, session,
                         sock_info, op_id, retryable, full_result):
        if sock_info.max_wire_version < 5 and self.uses_collation:
            raise ConfigurationError(
                'Must be connected to MongoDB 3.4+ to use a collation.')
        if sock_info.max_wire_version < 6 and self.uses_array_filters:
            raise ConfigurationError(
                'Must be connected to MongoDB 3.6+ to use arrayFilters.')

        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners

        if not self.current_run:
            self.current_run = next(generator)
        run = self.current_run

        # sock_info.command validates the session, but we use
        # sock_info.write_command.
        sock_info.validate_session(client, session)
        while run:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', self.ordered)])
            if write_concern.document:
                cmd['writeConcern'] = write_concern.document
            if self.bypass_doc_val and sock_info.max_wire_version >= 4:
                cmd['bypassDocumentValidation'] = True
            if session:
                cmd['lsid'] = session._use_lsid()
            bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id,
                                    listeners, session)

            results = []
            while run.idx_offset < len(run.ops):
                if session and retryable:
                    cmd['txnNumber'] = session._transaction_id()
                sock_info.send_cluster_time(cmd, session, client)
                check_keys = run.op_type == _INSERT
                ops = islice(run.ops, run.idx_offset, None)
                # Run as many ops as possible.
                request_id, msg, to_send = _do_batched_write_command(
                    self.namespace, run.op_type, cmd, ops, check_keys,
                    self.collection.codec_options, bwc)
                if not to_send:
                    raise InvalidOperation("cannot do an empty bulk write")
                result = bwc.write_command(request_id, msg, to_send)
                client._receive_cluster_time(result, session)
                results.append((run.idx_offset, result))
                # We're no longer in a retry once a command succeeds.
                self.retrying = False
                if self.ordered and "writeErrors" in result:
                    break
                run.idx_offset += len(to_send)

            _merge_command(run, full_result, results)

            # We're supposed to continue if errors are
            # at the write concern level (e.g. wtimeout)
            if self.ordered and full_result['writeErrors']:
                break
            # Reset our state
            self.current_run = run = next(generator, None)
Ejemplo n.º 10
0
    def _execute_command(self, generator, write_concern, session,
                         sock_info, op_id, retryable, full_result):
        if sock_info.max_wire_version < 5 and self.uses_collation:
            raise ConfigurationError(
                'Must be connected to MongoDB 3.4+ to use a collation.')
        if sock_info.max_wire_version < 6 and self.uses_array_filters:
            raise ConfigurationError(
                'Must be connected to MongoDB 3.6+ to use arrayFilters.')

        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners

        if not self.current_run:
            self.current_run = next(generator)
        run = self.current_run

        # sock_info.command validates the session, but we use
        # sock_info.write_command.
        sock_info.validate_session(client, session)
        while run:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', self.ordered)])
            if write_concern.document:
                cmd['writeConcern'] = write_concern.document
            if self.bypass_doc_val and sock_info.max_wire_version >= 4:
                cmd['bypassDocumentValidation'] = True
            if session:
                cmd['lsid'] = session._use_lsid()
            bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id,
                                    listeners, session)

            results = []
            while run.idx_offset < len(run.ops):
                if session and retryable:
                    cmd['txnNumber'] = session._transaction_id()
                sock_info.send_cluster_time(cmd, session, client)
                check_keys = run.op_type == _INSERT
                ops = islice(run.ops, run.idx_offset, None)
                # Run as many ops as possible.
                request_id, msg, to_send = _do_batched_write_command(
                    self.namespace, run.op_type, cmd, ops, check_keys,
                    self.collection.codec_options, bwc)
                if not to_send:
                    raise InvalidOperation("cannot do an empty bulk write")
                result = bwc.write_command(request_id, msg, to_send)
                client._receive_cluster_time(result, session)
                results.append((run.idx_offset, result))
                # We're no longer in a retry once a command succeeds.
                self.retrying = False
                if self.ordered and "writeErrors" in result:
                    break
                run.idx_offset += len(to_send)

            _merge_command(run, full_result, results)

            # We're supposed to continue if errors are
            # at the write concern level (e.g. wtimeout)
            if self.ordered and full_result['writeErrors']:
                break
            # Reset our state
            self.current_run = run = next(generator, None)
Ejemplo n.º 11
0
    def execute_command(self, sock_info, generator, write_concern, session):
        """Execute using write commands.
        """
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        op_id = _randint()
        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners

        with self.collection.database.client._tmp_session(session) as s:
            # sock_info.command checks auth, but we use sock_info.write_command.
            sock_info.check_session_auth_matches(s)
            for run in generator:
                cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                           ('ordered', self.ordered)])
                if write_concern.document:
                    cmd['writeConcern'] = write_concern.document
                if self.bypass_doc_val and sock_info.max_wire_version >= 4:
                    cmd['bypassDocumentValidation'] = True
                if s:
                    cmd['lsid'] = s._use_lsid()
                bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id,
                                        listeners, s)

                results = []
                idx_offset = 0
                while idx_offset < len(run.ops):
                    check_keys = run.op_type == _INSERT
                    ops = islice(run.ops, idx_offset, None)
                    # Run as many ops as possible.
                    client._send_cluster_time(cmd, s)
                    request_id, msg, to_send = _do_batched_write_command(
                        self.namespace, run.op_type, cmd, ops, check_keys,
                        self.collection.codec_options, bwc)
                    if not to_send:
                        raise InvalidOperation("cannot do an empty bulk write")
                    result = bwc.write_command(request_id, msg, to_send)
                    client._receive_cluster_time(result)
                    if s is not None:
                        s._advance_cluster_time(result.get("$clusterTime"))
                        s._advance_operation_time(result.get("operationTime"))
                    results.append((idx_offset, result))
                    if self.ordered and "writeErrors" in result:
                        break
                    idx_offset += len(to_send)

                _merge_command(run, full_result, results)

                # We're supposed to continue if errors are
                # at the write concern level (e.g. wtimeout)
                if self.ordered and full_result['writeErrors']:
                    break

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result
Ejemplo n.º 12
0
    def _execute_command(self, generator, write_concern, session,
                         sock_info, op_id, retryable, full_result):
        if sock_info.max_wire_version < 5 and self.uses_collation:
            raise ConfigurationError(
                'Must be connected to MongoDB 3.4+ to use a collation.')
        if sock_info.max_wire_version < 6 and self.uses_array_filters:
            raise ConfigurationError(
                'Must be connected to MongoDB 3.6+ to use arrayFilters.')

        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners

        if not self.current_run:
            self.current_run = next(generator)
        run = self.current_run

        # sock_info.command validates the session, but we use
        # sock_info.write_command.
        sock_info.validate_session(client, session)
        while run:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', self.ordered)])
            if not write_concern.is_server_default:
                cmd['writeConcern'] = write_concern.document
            if self.bypass_doc_val and sock_info.max_wire_version >= 4:
                cmd['bypassDocumentValidation'] = True
            bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id,
                                    listeners, session)

            while run.idx_offset < len(run.ops):
                if session:
                    # Start a new retryable write unless one was already
                    # started for this command.
                    if retryable and not self.started_retryable_write:
                        session._start_retryable_write()
                        self.started_retryable_write = True
                    session._apply_to(cmd, retryable, ReadPreference.PRIMARY)
                sock_info.send_cluster_time(cmd, session, client)
                check_keys = run.op_type == _INSERT
                ops = islice(run.ops, run.idx_offset, None)
                # Run as many ops as possible.
                request_id, msg, to_send = _do_bulk_write_command(
                    self.namespace, run.op_type, cmd, ops, check_keys,
                    self.collection.codec_options, bwc)
                if not to_send:
                    raise InvalidOperation("cannot do an empty bulk write")
                result = bwc.write_command(request_id, msg, to_send)
                client._process_response(result, session)

                # Retryable writeConcernErrors halt the execution of this run.
                wce = result.get('writeConcernError', {})
                if wce.get('code', 0) in _RETRYABLE_ERROR_CODES:
                    # Synthesize the full bulk result without modifying the
                    # current one because this write operation may be retried.
                    full = copy.deepcopy(full_result)
                    _merge_command(run, full, run.idx_offset, result)
                    _raise_bulk_write_error(full)

                _merge_command(run, full_result, run.idx_offset, result)
                # We're no longer in a retry once a command succeeds.
                self.retrying = False
                self.started_retryable_write = False

                if self.ordered and "writeErrors" in result:
                    break
                run.idx_offset += len(to_send)

            # We're supposed to continue if errors are
            # at the write concern level (e.g. wtimeout)
            if self.ordered and full_result['writeErrors']:
                break
            # Reset our state
            self.current_run = run = next(generator, None)