Example #1
0
    def execute_command(self, generator, write_concern):
        """Execute using write commands.
        """
        uuid_subtype = self.collection.uuid_subtype
        client = self.collection.database.connection
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        for run in generator:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name), ("ordered", self.ordered)])
            if write_concern:
                cmd["writeConcern"] = write_concern

            results = _do_batched_write_command(self.namespace, run.op_type, cmd, run.ops, True, uuid_subtype, client)

            _merge_command(run, full_result, results)
            # We're supposed to continue if errors are
            # at the write concern level (e.g. wtimeout)
            if self.ordered and full_result["writeErrors"]:
                break

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            if full_result["writeErrors"]:
                full_result["writeErrors"].sort(key=lambda error: error["index"])
            raise BulkWriteError(full_result)
        return full_result
Example #2
0
    def execute_command(self, sock_info, generator, write_concern, session):
        """Execute using write commands.
        """
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        op_id = _randint()
        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners

        with self.collection.database.client._tmp_session(session) as s:
            # sock_info.command checks auth, but we use sock_info.write_command.
            sock_info.check_session_auth_matches(s)
            for run in generator:
                cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                           ('ordered', self.ordered)])
                if write_concern.document:
                    cmd['writeConcern'] = write_concern.document
                if self.bypass_doc_val and sock_info.max_wire_version >= 4:
                    cmd['bypassDocumentValidation'] = True
                if s:
                    cmd['lsid'] = s._use_lsid()
                client._send_cluster_time(cmd)
                bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id,
                                        listeners, s)

                results = _do_batched_write_command(
                    self.namespace, run.op_type, cmd,
                    run.ops, True, self.collection.codec_options, bwc)

                _merge_command(run, full_result, results)
                last_result = results[-1][1]
                client._receive_cluster_time(last_result)

                # We're supposed to continue if errors are
                # at the write concern level (e.g. wtimeout)
                if self.ordered and full_result['writeErrors']:
                    break

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result
Example #3
0
    def execute_command(self, sock_info, generator, write_concern):
        """Execute using write commands.
        """
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        op_id = _randint()
        db_name = self.collection.database.name
        listeners = self.collection.database.client._event_listeners

        for run in generator:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', self.ordered)])
            if write_concern.document:
                cmd['writeConcern'] = write_concern.document
            if self.bypass_doc_val and sock_info.max_wire_version >= 4:
                cmd['bypassDocumentValidation'] = True

            bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id, listeners)
            results = _do_batched_write_command(
                self.namespace, run.op_type, cmd,
                run.ops, True, self.collection.codec_options, bwc)

            _merge_command(run, full_result, results)
            # We're supposed to continue if errors are
            # at the write concern level (e.g. wtimeout)
            if self.ordered and full_result['writeErrors']:
                break

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result
Example #4
0
    def execute_command(self, sock_info, generator, write_concern):
        """Execute using write commands.
        """
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        op_id = _randint()
        db_name = self.collection.database.name
        listeners = self.collection.database.client._event_listeners

        for run in generator:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', self.ordered)])
            if write_concern.document:
                cmd['writeConcern'] = write_concern.document

            bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id, listeners)
            results = _do_batched_write_command(self.namespace, run.op_type,
                                                cmd, run.ops, True,
                                                self.collection.codec_options,
                                                bwc)

            _merge_command(run, full_result, results)
            # We're supposed to continue if errors are
            # at the write concern level (e.g. wtimeout)
            if self.ordered and full_result['writeErrors']:
                break

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result
Example #5
0
    def execute_command(self, generator, write_concern):
        """Execute using write commands.
        """
        uuid_subtype = self.collection.uuid_subtype
        client = self.collection.database.connection
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        for run in generator:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', self.ordered)])
            if write_concern:
                cmd['writeConcern'] = write_concern

            results = _do_batched_write_command(self.namespace, run.op_type,
                                                cmd, run.ops, True,
                                                uuid_subtype, client)

            _merge_command(run, full_result, results)
            # We're supposed to continue if errors are
            # at the write concern level (e.g. wtimeout)
            if self.ordered and full_result['writeErrors']:
                break

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result
    def execute_command(self, sock_info, generator, write_concern):
        """Execute using write commands.
        """
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        for run in generator:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', self.ordered)])
            if write_concern.document:
                cmd['writeConcern'] = write_concern.document

            results = _do_batched_write_command(
                self.namespace, run.op_type, cmd,
                run.ops, True, self.collection.codec_options, sock_info)

            _merge_command(run, full_result, results)
            # We're supposed to continue if errors are
            # at the write concern level (e.g. wtimeout)
            if self.ordered and full_result['writeErrors']:
                break

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result
Example #7
0
    def _execute_command(self, generator, write_concern, session,
                         sock_info, op_id, retryable, full_result):
        if sock_info.max_wire_version < 5 and self.uses_collation:
            raise ConfigurationError(
                'Must be connected to MongoDB 3.4+ to use a collation.')
        if sock_info.max_wire_version < 6 and self.uses_array_filters:
            raise ConfigurationError(
                'Must be connected to MongoDB 3.6+ to use arrayFilters.')

        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners

        if not self.current_run:
            self.current_run = next(generator)
        run = self.current_run

        # sock_info.command validates the session, but we use
        # sock_info.write_command.
        sock_info.validate_session(client, session)
        while run:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', self.ordered)])
            if write_concern.document:
                cmd['writeConcern'] = write_concern.document
            if self.bypass_doc_val and sock_info.max_wire_version >= 4:
                cmd['bypassDocumentValidation'] = True
            if session:
                cmd['lsid'] = session._use_lsid()
            bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id,
                                    listeners, session)

            results = []
            while run.idx_offset < len(run.ops):
                if session and retryable:
                    cmd['txnNumber'] = session._transaction_id()
                sock_info.send_cluster_time(cmd, session, client)
                check_keys = run.op_type == _INSERT
                ops = islice(run.ops, run.idx_offset, None)
                # Run as many ops as possible.
                request_id, msg, to_send = _do_batched_write_command(
                    self.namespace, run.op_type, cmd, ops, check_keys,
                    self.collection.codec_options, bwc)
                if not to_send:
                    raise InvalidOperation("cannot do an empty bulk write")
                result = bwc.write_command(request_id, msg, to_send)
                client._receive_cluster_time(result, session)
                results.append((run.idx_offset, result))
                # We're no longer in a retry once a command succeeds.
                self.retrying = False
                if self.ordered and "writeErrors" in result:
                    break
                run.idx_offset += len(to_send)

            _merge_command(run, full_result, results)

            # We're supposed to continue if errors are
            # at the write concern level (e.g. wtimeout)
            if self.ordered and full_result['writeErrors']:
                break
            # Reset our state
            self.current_run = run = next(generator, None)
Example #8
0
    def _execute_command(self, generator, write_concern, session,
                         sock_info, op_id, retryable, full_result):
        if sock_info.max_wire_version < 5 and self.uses_collation:
            raise ConfigurationError(
                'Must be connected to MongoDB 3.4+ to use a collation.')
        if sock_info.max_wire_version < 6 and self.uses_array_filters:
            raise ConfigurationError(
                'Must be connected to MongoDB 3.6+ to use arrayFilters.')

        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners

        if not self.current_run:
            self.current_run = next(generator)
        run = self.current_run

        # sock_info.command validates the session, but we use
        # sock_info.write_command.
        sock_info.validate_session(client, session)
        while run:
            cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                       ('ordered', self.ordered)])
            if write_concern.document:
                cmd['writeConcern'] = write_concern.document
            if self.bypass_doc_val and sock_info.max_wire_version >= 4:
                cmd['bypassDocumentValidation'] = True
            if session:
                cmd['lsid'] = session._use_lsid()
            bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id,
                                    listeners, session)

            results = []
            while run.idx_offset < len(run.ops):
                if session and retryable:
                    cmd['txnNumber'] = session._transaction_id()
                sock_info.send_cluster_time(cmd, session, client)
                check_keys = run.op_type == _INSERT
                ops = islice(run.ops, run.idx_offset, None)
                # Run as many ops as possible.
                request_id, msg, to_send = _do_batched_write_command(
                    self.namespace, run.op_type, cmd, ops, check_keys,
                    self.collection.codec_options, bwc)
                if not to_send:
                    raise InvalidOperation("cannot do an empty bulk write")
                result = bwc.write_command(request_id, msg, to_send)
                client._receive_cluster_time(result, session)
                results.append((run.idx_offset, result))
                # We're no longer in a retry once a command succeeds.
                self.retrying = False
                if self.ordered and "writeErrors" in result:
                    break
                run.idx_offset += len(to_send)

            _merge_command(run, full_result, results)

            # We're supposed to continue if errors are
            # at the write concern level (e.g. wtimeout)
            if self.ordered and full_result['writeErrors']:
                break
            # Reset our state
            self.current_run = run = next(generator, None)
Example #9
0
    def execute_command(self, sock_info, generator, write_concern, session):
        """Execute using write commands.
        """
        # nModified is only reported for write commands, not legacy ops.
        full_result = {
            "writeErrors": [],
            "writeConcernErrors": [],
            "nInserted": 0,
            "nUpserted": 0,
            "nMatched": 0,
            "nModified": 0,
            "nRemoved": 0,
            "upserted": [],
        }
        op_id = _randint()
        db_name = self.collection.database.name
        client = self.collection.database.client
        listeners = client._event_listeners

        with self.collection.database.client._tmp_session(session) as s:
            # sock_info.command checks auth, but we use sock_info.write_command.
            sock_info.check_session_auth_matches(s)
            for run in generator:
                cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
                           ('ordered', self.ordered)])
                if write_concern.document:
                    cmd['writeConcern'] = write_concern.document
                if self.bypass_doc_val and sock_info.max_wire_version >= 4:
                    cmd['bypassDocumentValidation'] = True
                if s:
                    cmd['lsid'] = s._use_lsid()
                bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id,
                                        listeners, s)

                results = []
                idx_offset = 0
                while idx_offset < len(run.ops):
                    check_keys = run.op_type == _INSERT
                    ops = islice(run.ops, idx_offset, None)
                    # Run as many ops as possible.
                    client._send_cluster_time(cmd, s)
                    request_id, msg, to_send = _do_batched_write_command(
                        self.namespace, run.op_type, cmd, ops, check_keys,
                        self.collection.codec_options, bwc)
                    if not to_send:
                        raise InvalidOperation("cannot do an empty bulk write")
                    result = bwc.write_command(request_id, msg, to_send)
                    client._receive_cluster_time(result)
                    if s is not None:
                        s._advance_cluster_time(result.get("$clusterTime"))
                        s._advance_operation_time(result.get("operationTime"))
                    results.append((idx_offset, result))
                    if self.ordered and "writeErrors" in result:
                        break
                    idx_offset += len(to_send)

                _merge_command(run, full_result, results)

                # We're supposed to continue if errors are
                # at the write concern level (e.g. wtimeout)
                if self.ordered and full_result['writeErrors']:
                    break

        if full_result["writeErrors"] or full_result["writeConcernErrors"]:
            if full_result['writeErrors']:
                full_result['writeErrors'].sort(
                    key=lambda error: error['index'])
            raise BulkWriteError(full_result)
        return full_result