Ejemplo n.º 1
0
def _first_batch(sock_info, namespace, query, ntoreturn, slave_ok, codec_options, read_preference, cmd, listeners):
    """Simple query helper for retrieving a first (and possibly only) batch."""
    query = _Query(0, namespace, 0, ntoreturn, query, None, codec_options, read_preference, 0, ntoreturn)

    name = next(iter(cmd))
    duration = None
    publish = listeners.enabled_for_commands
    if publish:
        start = datetime.datetime.now()

    request_id, msg, max_doc_size = query.get_message(slave_ok, sock_info.is_mongos)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        listeners.publish_command_start(cmd, namespace.split(".", 1)[0], request_id, sock_info.address)
        start = datetime.datetime.now()

    sock_info.send_message(msg, max_doc_size)
    response = sock_info.receive_message(1, request_id)
    try:
        result = _unpack_response(response, None, codec_options)
    except Exception as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            if isinstance(exc, (NotMasterError, OperationFailure)):
                failure = exc.details
            else:
                failure = _convert_exception(exc)
            listeners.publish_command_failure(duration, failure, name, request_id, sock_info.address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        listeners.publish_command_success(duration, result, name, request_id, sock_info.address)

    return result
Ejemplo n.º 2
0
def _first_batch(sock_info, namespace, query, ntoreturn, slave_ok,
                 codec_options, read_preference, cmd, listeners):
    """Simple query helper for retrieving a first (and possibly only) batch."""
    query = _Query(0, namespace, 0, ntoreturn, query, None, codec_options,
                   read_preference, 0, ntoreturn)

    name = next(iter(cmd))
    duration = None
    publish = listeners.enabled_for_commands
    if publish:
        start = datetime.datetime.now()

    request_id, msg, max_doc_size = query.get_message(slave_ok,
                                                      sock_info.is_mongos)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        listeners.publish_command_start(cmd,
                                        namespace.split('.', 1)[0], request_id,
                                        sock_info.address)
        start = datetime.datetime.now()

    sock_info.send_message(msg, max_doc_size)
    response = sock_info.receive_message(1, request_id)
    try:
        result = _unpack_response(response, None, codec_options)
    except Exception as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            if isinstance(exc, (NotMasterError, OperationFailure)):
                failure = exc.details
            else:
                failure = _convert_exception(exc)
            listeners.publish_command_failure(duration, failure, name,
                                              request_id, sock_info.address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        listeners.publish_command_success(duration, result, name, request_id,
                                          sock_info.address)

    return result
Ejemplo n.º 3
0
    def run_operation(self, sock_info, operation, set_secondary_okay,
                      listeners, unpack_res):
        """Run a _Query or _GetMore operation and return a Response object.

        This method is used only to run _Query/_GetMore operations from
        cursors.
        Can raise ConnectionFailure, OperationFailure, etc.

        :Parameters:
          - `operation`: A _Query or _GetMore object.
          - `set_secondary_okay`: Pass to operation.get_message.
          - `all_credentials`: dict, maps auth source to MongoCredential.
          - `listeners`: Instance of _EventListeners or None.
          - `unpack_res`: A callable that decodes the wire protocol response.
        """
        duration = None
        publish = listeners.enabled_for_commands
        if publish:
            start = datetime.now()

        use_cmd = operation.use_command(sock_info)
        more_to_come = (operation.sock_mgr and operation.sock_mgr.more_to_come)
        if more_to_come:
            request_id = 0
        else:
            message = operation.get_message(set_secondary_okay, sock_info,
                                            use_cmd)
            request_id, data, max_doc_size = self._split_message(message)

        if publish:
            cmd, dbn = operation.as_command(sock_info)
            listeners.publish_command_start(cmd,
                                            dbn,
                                            request_id,
                                            sock_info.address,
                                            service_id=sock_info.service_id)
            start = datetime.now()

        try:
            if more_to_come:
                reply = sock_info.receive_message(None)
            else:
                sock_info.send_message(data, max_doc_size)
                reply = sock_info.receive_message(request_id)

            # Unpack and check for command errors.
            if use_cmd:
                user_fields = _CURSOR_DOC_FIELDS
                legacy_response = False
            else:
                user_fields = None
                legacy_response = True
            docs = unpack_res(reply,
                              operation.cursor_id,
                              operation.codec_options,
                              legacy_response=legacy_response,
                              user_fields=user_fields)
            if use_cmd:
                first = docs[0]
                operation.client._process_response(first, operation.session)
                _check_command_response(first, sock_info.max_wire_version)
        except Exception as exc:
            if publish:
                duration = datetime.now() - start
                if isinstance(exc, (NotPrimaryError, OperationFailure)):
                    failure = exc.details
                else:
                    failure = _convert_exception(exc)
                listeners.publish_command_failure(
                    duration,
                    failure,
                    operation.name,
                    request_id,
                    sock_info.address,
                    service_id=sock_info.service_id)
            raise

        if publish:
            duration = datetime.now() - start
            # Must publish in find / getMore / explain command response
            # format.
            if use_cmd:
                res = docs[0]
            elif operation.name == "explain":
                res = docs[0] if docs else {}
            else:
                res = {
                    "cursor": {
                        "id": reply.cursor_id,
                        "ns": operation.namespace()
                    },
                    "ok": 1
                }
                if operation.name == "find":
                    res["cursor"]["firstBatch"] = docs
                else:
                    res["cursor"]["nextBatch"] = docs
            listeners.publish_command_success(duration,
                                              res,
                                              operation.name,
                                              request_id,
                                              sock_info.address,
                                              service_id=sock_info.service_id)

        # Decrypt response.
        client = operation.client
        if client and client._encrypter:
            if use_cmd:
                decrypted = client._encrypter.decrypt(
                    reply.raw_command_response())
                docs = _decode_all_selective(decrypted,
                                             operation.codec_options,
                                             user_fields)

        if client._should_pin_cursor(operation.session) or operation.exhaust:
            sock_info.pin_cursor()
            if isinstance(reply, _OpMsg):
                # In OP_MSG, the server keeps sending only if the
                # more_to_come flag is set.
                more_to_come = reply.more_to_come
            else:
                # In OP_REPLY, the server keeps sending until cursor_id is 0.
                more_to_come = bool(operation.exhaust and reply.cursor_id)
            if operation.sock_mgr:
                operation.sock_mgr.update_exhaust(more_to_come)
            response = PinnedResponse(data=reply,
                                      address=self._description.address,
                                      socket_info=sock_info,
                                      duration=duration,
                                      request_id=request_id,
                                      from_command=use_cmd,
                                      docs=docs,
                                      more_to_come=more_to_come)
        else:
            response = Response(data=reply,
                                address=self._description.address,
                                duration=duration,
                                request_id=request_id,
                                from_command=use_cmd,
                                docs=docs)

        return response
Ejemplo n.º 4
0
    def __send_message(self, operation):
        """Send a query or getmore operation and handles the response.

        If operation is ``None`` this is an exhaust cursor, which reads
        the next result batch off the exhaust socket instead of
        sending getMore messages to the server.

        Can raise ConnectionFailure.
        """
        client = self.__collection.database.client
        listeners = client._event_listeners
        publish = listeners.enabled_for_commands
        from_command = False
        start = datetime.datetime.now()

        def duration(): return datetime.datetime.now() - start

        if operation:
            kwargs = {
                "read_preference": self.__read_preference,
                "exhaust": self.__exhaust,
            }
            if self.__address is not None:
                kwargs["address"] = self.__address

            try:
                response = client._send_message_with_response(operation,
                                                              **kwargs)
                self.__address = response.address
                if self.__exhaust:
                    # 'response' is an ExhaustResponse.
                    self.__exhaust_mgr = _SocketManager(response.socket_info,
                                                        response.pool)

                cmd_name = operation.name
                reply = response.data
                rqst_id = response.request_id
                from_command = response.from_command
            except AutoReconnect:
                # Don't try to send kill cursors on another socket
                # or to another server. It can cause a _pinValue
                # assertion on some server releases if we get here
                # due to a socket timeout.
                self.__killed = True
                raise
        else:
            # Exhaust cursor - no getMore message.
            rqst_id = 0
            cmd_name = 'getMore'
            if publish:
                # Fake a getMore command.
                cmd = SON([('getMore', self.__id),
                           ('collection', self.__collection.name)])
                if self.__batch_size:
                    cmd['batchSize'] = self.__batch_size
                if self.__max_time_ms:
                    cmd['maxTimeMS'] = self.__max_time_ms
                listeners.publish_command_start(
                    cmd, self.__collection.database.name, 0, self.__address)
            try:
                reply = self.__exhaust_mgr.sock.receive_message(None)
            except Exception as exc:
                if publish:
                    listeners.publish_command_failure(
                        duration(), _convert_exception(exc), cmd_name, rqst_id,
                        self.__address)
                if isinstance(exc, ConnectionFailure):
                    self.__die()
                raise

        try:
            docs = self._unpack_response(response=reply,
                                         cursor_id=self.__id,
                                         codec_options=self.__codec_options)
            if from_command:
                first = docs[0]
                client._receive_cluster_time(first, self.__session)
                helpers._check_command_response(first)
        except OperationFailure as exc:
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                listeners.publish_command_failure(
                    duration(), exc.details, cmd_name, rqst_id, self.__address)

            # If this is a tailable cursor the error is likely
            # due to capped collection roll over. Setting
            # self.__killed to True ensures Cursor.alive will be
            # False. No need to re-raise.
            if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
                return
            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                listeners.publish_command_failure(
                    duration(), exc.details, cmd_name, rqst_id, self.__address)

            client._reset_server_and_request_check(self.__address)
            raise
        except Exception as exc:
            if publish:
                listeners.publish_command_failure(
                    duration(), _convert_exception(exc), cmd_name, rqst_id,
                    self.__address)
            raise

        if publish:
            # Must publish in find / getMore / explain command response format.
            if from_command:
                res = docs[0]
            elif cmd_name == "explain":
                res = docs[0] if reply.number_returned else {}
            else:
                res = {"cursor": {"id": reply.cursor_id,
                                  "ns": self.__collection.full_name},
                       "ok": 1}
                if cmd_name == "find":
                    res["cursor"]["firstBatch"] = docs
                else:
                    res["cursor"]["nextBatch"] = docs
            listeners.publish_command_success(
                duration(), res, cmd_name, rqst_id, self.__address)

        if from_command and cmd_name != "explain":
            cursor = docs[0]['cursor']
            self.__id = cursor['id']
            if cmd_name == 'find':
                documents = cursor['firstBatch']
            else:
                documents = cursor['nextBatch']
            self.__data = deque(documents)
            self.__retrieved += len(documents)
        else:
            self.__id = reply.cursor_id
            self.__data = deque(docs)
            self.__retrieved += reply.number_returned

        if self.__id == 0:
            self.__killed = True


        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()

        # Don't wait for garbage collection to call __del__, return the
        # socket to the pool now.
        if self.__exhaust and self.__id == 0:
            self.__exhaust_mgr.close()
Ejemplo n.º 5
0
def command(sock_info, dbname, spec, secondary_ok, is_mongos,
            read_preference, codec_options, session, client, check=True,
            allowable_errors=None, address=None,
            check_keys=False, listeners=None, max_bson_size=None,
            read_concern=None,
            parse_write_concern_error=False,
            collation=None,
            compression_ctx=None,
            use_op_msg=False,
            unacknowledged=False,
            user_fields=None,
            exhaust_allowed=False):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as an ordered dict type, eg SON.
      - `secondary_ok`: whether to set the secondaryOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `session`: optional ClientSession instance.
      - `client`: optional MongoClient instance for updating $clusterTime.
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
      - `address`: the (host, port) of `sock`
      - `check_keys`: if True, check `spec` for invalid keys
      - `listeners`: An instance of :class:`~pymongo.monitoring.EventListeners`
      - `max_bson_size`: The maximum encoded bson size for this server
      - `read_concern`: The read concern for this command.
      - `parse_write_concern_error`: Whether to parse the ``writeConcernError``
        field in the command response.
      - `collation`: The collation for this command.
      - `compression_ctx`: optional compression Context.
      - `use_op_msg`: True if we should use OP_MSG.
      - `unacknowledged`: True if this is an unacknowledged command.
      - `user_fields` (optional): Response fields that should be decoded
        using the TypeDecoders from codec_options, passed to
        bson._decode_all_selective.
      - `exhaust_allowed`: True if we should enable OP_MSG exhaustAllowed.
    """
    name = next(iter(spec))
    ns = dbname + '.$cmd'
    flags = 4 if secondary_ok else 0
    speculative_hello = False

    # Publish the original command document, perhaps with lsid and $clusterTime.
    orig = spec
    if is_mongos and not use_op_msg:
        spec = message._maybe_add_read_preference(spec, read_preference)
    if read_concern and not (session and session.in_transaction):
        if read_concern.level:
            spec['readConcern'] = read_concern.document
        if session:
            session._update_read_concern(spec, sock_info)
    if collation is not None:
        spec['collation'] = collation

    publish = listeners is not None and listeners.enabled_for_commands
    if publish:
        start = datetime.datetime.now()
        speculative_hello = _is_speculative_authenticate(name, spec)

    if compression_ctx and name.lower() in _NO_COMPRESSION:
        compression_ctx = None

    if (client and client._encrypter and
            not client._encrypter._bypass_auto_encryption):
        spec = orig = client._encrypter.encrypt(
            dbname, spec, check_keys, codec_options)
        # We already checked the keys, no need to do it again.
        check_keys = False

    if use_op_msg:
        flags = _OpMsg.MORE_TO_COME if unacknowledged else 0
        flags |= _OpMsg.EXHAUST_ALLOWED if exhaust_allowed else 0
        request_id, msg, size, max_doc_size = message._op_msg(
            flags, spec, dbname, read_preference, secondary_ok, check_keys,
            codec_options, ctx=compression_ctx)
        # If this is an unacknowledged write then make sure the encoded doc(s)
        # are small enough, otherwise rely on the server to return an error.
        if (unacknowledged and max_bson_size is not None and
                max_doc_size > max_bson_size):
            message._raise_document_too_large(name, size, max_bson_size)
    else:
        request_id, msg, size = message._query(
            flags, ns, 0, -1, spec, None, codec_options, check_keys,
            compression_ctx)

    if (max_bson_size is not None
            and size > max_bson_size + message._COMMAND_OVERHEAD):
        message._raise_document_too_large(
            name, size, max_bson_size + message._COMMAND_OVERHEAD)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        listeners.publish_command_start(orig, dbname, request_id, address,
                                        service_id=sock_info.service_id)
        start = datetime.datetime.now()

    try:
        sock_info.sock.sendall(msg)
        if use_op_msg and unacknowledged:
            # Unacknowledged, fake a successful command response.
            reply = None
            response_doc = {"ok": 1}
        else:
            reply = receive_message(sock_info, request_id)
            sock_info.more_to_come = reply.more_to_come
            unpacked_docs = reply.unpack_response(
                codec_options=codec_options, user_fields=user_fields)

            response_doc = unpacked_docs[0]
            if client:
                client._process_response(response_doc, session)
            if check:
                helpers._check_command_response(
                    response_doc, sock_info.max_wire_version, allowable_errors,
                    parse_write_concern_error=parse_write_concern_error)
    except Exception as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            if isinstance(exc, (NotPrimaryError, OperationFailure)):
                failure = exc.details
            else:
                failure = message._convert_exception(exc)
            listeners.publish_command_failure(
                duration, failure, name, request_id, address,
                service_id=sock_info.service_id)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        listeners.publish_command_success(
            duration, response_doc, name, request_id, address,
            service_id=sock_info.service_id,
            speculative_hello=speculative_hello)

    if client and client._encrypter and reply:
        decrypted = client._encrypter.decrypt(reply.raw_command_response())
        response_doc = _decode_all_selective(decrypted, codec_options,
                                             user_fields)[0]

    return response_doc
Ejemplo n.º 6
0
def command(sock, dbname, spec, slave_ok, is_mongos,
            read_preference, codec_options, session, client, check=True,
            allowable_errors=None, address=None,
            check_keys=False, listeners=None, max_bson_size=None,
            read_concern=None,
            parse_write_concern_error=False,
            collation=None,
            compression_ctx=None,
            use_op_msg=False,
            unacknowledged=False,
            user_fields=None):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as an ordered dict type, eg SON.
      - `slave_ok`: whether to set the SlaveOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `session`: optional ClientSession instance.
      - `client`: optional MongoClient instance for updating $clusterTime.
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
      - `address`: the (host, port) of `sock`
      - `check_keys`: if True, check `spec` for invalid keys
      - `listeners`: An instance of :class:`~pymongo.monitoring.EventListeners`
      - `max_bson_size`: The maximum encoded bson size for this server
      - `read_concern`: The read concern for this command.
      - `parse_write_concern_error`: Whether to parse the ``writeConcernError``
        field in the command response.
      - `collation`: The collation for this command.
      - `compression_ctx`: optional compression Context.
      - `use_op_msg`: True if we should use OP_MSG.
      - `unacknowledged`: True if this is an unacknowledged command.
      - `user_fields` (optional): Response fields that should be decoded
        using the TypeDecoders from codec_options, passed to
        bson._decode_all_selective.
    """
    name = next(iter(spec))
    ns = dbname + '.$cmd'
    flags = 4 if slave_ok else 0

    # Publish the original command document, perhaps with lsid and $clusterTime.
    orig = spec
    if is_mongos and not use_op_msg:
        spec = message._maybe_add_read_preference(spec, read_preference)
    if read_concern and not (session and session._in_transaction):
        if read_concern.level:
            spec['readConcern'] = read_concern.document
        if (session and session.options.causal_consistency
                and session.operation_time is not None):
            spec.setdefault(
                'readConcern', {})['afterClusterTime'] = session.operation_time
    if collation is not None:
        spec['collation'] = collation

    publish = listeners is not None and listeners.enabled_for_commands
    if publish:
        start = datetime.datetime.now()

    if compression_ctx and name.lower() in _NO_COMPRESSION:
        compression_ctx = None

    if use_op_msg:
        flags = 2 if unacknowledged else 0
        request_id, msg, size, max_doc_size = message._op_msg(
            flags, spec, dbname, read_preference, slave_ok, check_keys,
            codec_options, ctx=compression_ctx)
        # If this is an unacknowledged write then make sure the encoded doc(s)
        # are small enough, otherwise rely on the server to return an error.
        if (unacknowledged and max_bson_size is not None and
                max_doc_size > max_bson_size):
            message._raise_document_too_large(name, size, max_bson_size)
    else:
        request_id, msg, size = message.query(
            flags, ns, 0, -1, spec, None, codec_options, check_keys,
            compression_ctx)

    if (max_bson_size is not None
            and size > max_bson_size + message._COMMAND_OVERHEAD):
        message._raise_document_too_large(
            name, size, max_bson_size + message._COMMAND_OVERHEAD)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        listeners.publish_command_start(orig, dbname, request_id, address)
        start = datetime.datetime.now()

    try:
        sock.sendall(msg)
        if use_op_msg and unacknowledged:
            # Unacknowledged, fake a successful command response.
            response_doc = {"ok": 1}
        else:
            reply = receive_message(sock, request_id)
            unpacked_docs = reply.unpack_response(
                codec_options=codec_options, user_fields=user_fields)

            response_doc = unpacked_docs[0]
            if client:
                client._process_response(response_doc, session)
            if check:
                helpers._check_command_response(
                    response_doc, None, allowable_errors,
                    parse_write_concern_error=parse_write_concern_error)
    except Exception as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            if isinstance(exc, (NotMasterError, OperationFailure)):
                failure = exc.details
            else:
                failure = message._convert_exception(exc)
            listeners.publish_command_failure(
                duration, failure, name, request_id, address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        listeners.publish_command_success(
            duration, response_doc, name, request_id, address)
    return response_doc
Ejemplo n.º 7
0
    def __send_message(self, operation):
        """Send a getmore message and handle the response.
        """
        client = self.__collection.database.client
        listeners = client._event_listeners
        publish = listeners.enabled_for_commands
        try:
            response = client._send_message_with_response(
                operation, address=self.__address)
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            self.__killed = True
            raise

        cmd_duration = response.duration
        rqst_id = response.request_id
        from_command = response.from_command

        if publish:
            start = datetime.datetime.now()
        try:
            doc = self._unpack_response(response.data, self.__id,
                                        self.__collection.codec_options)
            if from_command:
                helpers._check_command_response(doc['data'][0])

        except OperationFailure as exc:
            self.__killed = True

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(duration, exc.details,
                                                  "getMore", rqst_id,
                                                  self.__address)

            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(duration, exc.details,
                                                  "getMore", rqst_id,
                                                  self.__address)

            client._reset_server_and_request_check(self.address)
            raise
        except Exception as exc:
            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(duration,
                                                  _convert_exception(exc),
                                                  "getMore", rqst_id,
                                                  self.__address)
            raise

        if from_command:
            cursor = doc['data'][0]['cursor']
            documents = cursor['nextBatch']
            self.__id = cursor['id']
        else:
            documents = doc["data"]
            self.__id = doc["cursor_id"]

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in getMore command response format.
            res = {
                "cursor": {
                    "id": self.__id,
                    "ns": self.__collection.full_name,
                    "nextBatch": documents
                },
                "ok": 1
            }
            listeners.publish_command_success(duration, res, "getMore",
                                              rqst_id, self.__address)

        if self.__id == 0:
            self.__killed = True
        self.__data = deque(documents)
Ejemplo n.º 8
0
    def __send_message(self, operation):
        """Send a getmore message and handle the response.
        """
        client = self.__collection.database.client
        listeners = client._event_listeners
        publish = listeners.enabled_for_commands
        try:
            response = client._send_message_with_response(
                operation, address=self.__address)
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            self.__killed = True
            raise

        cmd_duration = response.duration
        rqst_id = response.request_id
        if publish:
            start = datetime.datetime.now()
        try:
            doc = helpers._unpack_response(response.data,
                                           self.__id,
                                           self.__collection.codec_options)
        except OperationFailure as exc:
            self.__killed = True

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, exc.details, "getMore", rqst_id, self.__address)

            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, exc.details, "getMore", rqst_id, self.__address)

            client._reset_server_and_request_check(self.address)
            raise
        except Exception as exc:
            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, _convert_exception(exc), "getMore", rqst_id,
                    self.__address)
            raise

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in getMore command response format.
            res = {"cursor": {"id": doc["cursor_id"],
                              "ns": self.__collection.full_name,
                              "nextBatch": doc["data"]},
                   "ok": 1}
            listeners.publish_command_success(
                duration, res, "getMore", rqst_id, self.__address)

        self.__id = doc["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        self.__retrieved += doc["number_returned"]
        self.__data = deque(doc["data"])
Ejemplo n.º 9
0
def command(sock, dbname, spec, slave_ok, is_mongos,
            read_preference, codec_options, check=True,
            allowable_errors=None, address=None,
            check_keys=False, listeners=None, max_bson_size=None,
            read_concern=DEFAULT_READ_CONCERN):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as a dict, SON, or mapping object
      - `slave_ok`: whether to set the SlaveOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
      - `address`: the (host, port) of `sock`
      - `check_keys`: if True, check `spec` for invalid keys
      - `listeners`: An instance of :class:`~pymongo.monitoring.EventListeners`
      - `max_bson_size`: The maximum encoded bson size for this server
      - `read_concern`: The read concern for this command.
    """
    name = next(iter(spec))
    ns = dbname + '.$cmd'
    flags = 4 if slave_ok else 0
    # Publish the original command document.
    orig = spec
    if is_mongos:
        spec = message._maybe_add_read_preference(spec, read_preference)
    if read_concern.level:
        spec['readConcern'] = read_concern.document

    publish = listeners is not None and listeners.enabled_for_commands
    if publish:
        start = datetime.datetime.now()

    request_id, msg, size = message.query(flags, ns, 0, -1, spec,
                                          None, codec_options, check_keys)

    if (max_bson_size is not None
            and size > max_bson_size + message._COMMAND_OVERHEAD):
        message._raise_document_too_large(
            name, size, max_bson_size + message._COMMAND_OVERHEAD)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        listeners.publish_command_start(orig, dbname, request_id, address)
        start = datetime.datetime.now()

    try:
        sock.sendall(msg)
        response = receive_message(sock, 1, request_id)
        unpacked = helpers._unpack_response(
            response, codec_options=codec_options)

        response_doc = unpacked['data'][0]
        if check:
            helpers._check_command_response(response_doc, None, allowable_errors)
    except Exception as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            if isinstance(exc, (NotMasterError, OperationFailure)):
                failure = exc.details
            else:
                failure = message._convert_exception(exc)
            listeners.publish_command_failure(
                duration, failure, name, request_id, address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        listeners.publish_command_success(
            duration, response_doc, name, request_id, address)
    return response_doc
Ejemplo n.º 10
0
    def run_operation_with_response(self, sock_info, operation, set_slave_okay,
                                    listeners, exhaust, unpack_res):
        """Run a _Query or _GetMore operation and return a Response object.

        This method is used only to run _Query/_GetMore operations from
        cursors.
        Can raise ConnectionFailure, OperationFailure, etc.

        :Parameters:
          - `operation`: A _Query or _GetMore object.
          - `set_slave_okay`: Pass to operation.get_message.
          - `all_credentials`: dict, maps auth source to MongoCredential.
          - `listeners`: Instance of _EventListeners or None.
          - `exhaust`: If True, then this is an exhaust cursor operation.
          - `unpack_res`: A callable that decodes the wire protocol response.
        """
        duration = None
        publish = listeners.enabled_for_commands
        if publish:
            start = datetime.now()

        send_message = not operation.exhaust_mgr

        if send_message:
            use_cmd = operation.use_command(sock_info, exhaust)
            message = operation.get_message(set_slave_okay, sock_info, use_cmd)
            request_id, data, max_doc_size = self._split_message(message)
        else:
            use_cmd = False
            request_id = 0

        if publish:
            cmd, dbn = operation.as_command(sock_info)
            listeners.publish_command_start(cmd, dbn, request_id,
                                            sock_info.address)
            start = datetime.now()

        try:
            if send_message:
                sock_info.send_message(data, max_doc_size)
                reply = sock_info.receive_message(request_id)
            else:
                reply = sock_info.receive_message(None)

            # Unpack and check for command errors.
            if use_cmd:
                user_fields = _CURSOR_DOC_FIELDS
                legacy_response = False
            else:
                user_fields = None
                legacy_response = True
            docs = unpack_res(reply,
                              operation.cursor_id,
                              operation.codec_options,
                              legacy_response=legacy_response,
                              user_fields=user_fields)
            if use_cmd:
                first = docs[0]
                operation.client._process_response(first, operation.session)
                _check_command_response(first)
        except Exception as exc:
            if publish:
                duration = datetime.now() - start
                if isinstance(exc, (NotMasterError, OperationFailure)):
                    failure = exc.details
                else:
                    failure = _convert_exception(exc)
                listeners.publish_command_failure(duration, failure,
                                                  operation.name, request_id,
                                                  sock_info.address)
            raise

        if publish:
            duration = datetime.now() - start
            # Must publish in find / getMore / explain command response
            # format.
            if use_cmd:
                res = docs[0]
            elif operation.name == "explain":
                res = docs[0] if docs else {}
            else:
                res = {
                    "cursor": {
                        "id": reply.cursor_id,
                        "ns": operation.namespace()
                    },
                    "ok": 1
                }
                if operation.name == "find":
                    res["cursor"]["firstBatch"] = docs
                else:
                    res["cursor"]["nextBatch"] = docs
            listeners.publish_command_success(duration, res, operation.name,
                                              request_id, sock_info.address)

        # Decrypt response.
        client = operation.client
        if client and client._encrypter:
            if use_cmd:
                decrypted = client._encrypter.decrypt(
                    reply.raw_command_response())
                docs = _decode_all_selective(decrypted,
                                             operation.codec_options,
                                             user_fields)

        if exhaust:
            response = ExhaustResponse(data=reply,
                                       address=self._description.address,
                                       socket_info=sock_info,
                                       pool=self._pool,
                                       duration=duration,
                                       request_id=request_id,
                                       from_command=use_cmd,
                                       docs=docs)
        else:
            response = Response(data=reply,
                                address=self._description.address,
                                duration=duration,
                                request_id=request_id,
                                from_command=use_cmd,
                                docs=docs)

        return response
Ejemplo n.º 11
0
    def send_message_with_response(self,
                                   operation,
                                   set_slave_okay,
                                   all_credentials,
                                   listeners,
                                   exhaust=False):
        """Send a message to MongoDB and return a Response object.

        Can raise ConnectionFailure.

        :Parameters:
          - `operation`: A _Query or _GetMore object.
          - `set_slave_okay`: Pass to operation.get_message.
          - `all_credentials`: dict, maps auth source to MongoCredential.
          - `exhaust` (optional): If True, the socket used stays checked out.
            It is returned along with its Pool in the Response.
        """
        with self.get_socket(all_credentials, exhaust) as sock_info:

            duration = None
            publish = listeners.enabled_for_commands
            if publish:
                start = datetime.now()

            use_find_cmd = False
            if sock_info.max_wire_version >= 4:
                if not exhaust:
                    use_find_cmd = True
            elif (isinstance(operation, _Query)
                  and not operation.read_concern.ok_for_legacy):
                raise ConfigurationError(
                    'read concern level of %s is not valid '
                    'with a max wire version of %d.' %
                    (operation.read_concern.level, sock_info.max_wire_version))

            message = operation.get_message(set_slave_okay,
                                            sock_info.is_mongos, use_find_cmd)
            request_id, data, max_doc_size = self._split_message(message)

            if publish:
                encoding_duration = datetime.now() - start
                cmd, dbn = operation.as_command()
                listeners.publish_command_start(cmd, dbn, request_id,
                                                sock_info.address)
                start = datetime.now()

            try:
                sock_info.send_message(data, max_doc_size)
                response_data = sock_info.receive_message(1, request_id)
            except Exception as exc:
                if publish:
                    duration = (datetime.now() - start) + encoding_duration
                    failure = _convert_exception(exc)
                    listeners.publish_command_failure(duration, failure,
                                                      next(iter(cmd)),
                                                      request_id,
                                                      sock_info.address)
                raise

            if publish:
                duration = (datetime.now() - start) + encoding_duration

            if exhaust:
                return ExhaustResponse(data=response_data,
                                       address=self._description.address,
                                       socket_info=sock_info,
                                       pool=self._pool,
                                       duration=duration,
                                       request_id=request_id,
                                       from_command=use_find_cmd)
            else:
                return Response(data=response_data,
                                address=self._description.address,
                                duration=duration,
                                request_id=request_id,
                                from_command=use_find_cmd)
Ejemplo n.º 12
0
    def _process_kill_cursors_queue(self):
        """Process any pending kill cursors requests."""
        address_to_cursor_ids = defaultdict(list)

        # Other threads or the GC may append to the queue concurrently.
        while True:
            try:
                address, cursor_ids = self.__kill_cursors_queue.pop()
            except IndexError:
                break

            address_to_cursor_ids[address].extend(cursor_ids)

        # Don't re-open topology if it's closed and there's no pending cursors.
        if address_to_cursor_ids:
            listeners = self._event_listeners
            publish = listeners.enabled_for_commands
            topology = self._get_topology()
            for address, cursor_ids in address_to_cursor_ids.items():
                try:
                    if address:
                        # address could be a tuple or _CursorAddress, but
                        # select_server_by_address needs (host, port).
                        server = topology.select_server_by_address(
                            tuple(address))
                    else:
                        # Application called close_cursor() with no address.
                        server = topology.select_server(
                            writable_server_selector)

                    if publish:
                        start = datetime.datetime.now()
                    data = message.kill_cursors(cursor_ids)
                    if publish:
                        duration = datetime.datetime.now() - start
                        try:
                            dbname, collname = address.namespace.split(".", 1)
                        except AttributeError:
                            dbname = collname = 'OP_KILL_CURSORS'
                        command = SON([('killCursors', collname),
                                       ('cursors', cursor_ids)])
                        listeners.publish_command_start(
                            command, dbname, data[0], address)
                        start = datetime.datetime.now()
                    try:
                        server.send_message(data, self.__all_credentials)
                    except Exception as exc:
                        if publish:
                            dur = (datetime.datetime.now() - start) + duration
                            listeners.publish_command_failure(
                                dur, message._convert_exception(exc),
                                'killCursors', data[0], address)
                        raise
                    if publish:
                        duration = (datetime.datetime.now() - start) + duration
                        # OP_KILL_CURSORS returns no reply, fake one.
                        reply = {'cursorsUnknown': cursor_ids, 'ok': 1}
                        listeners.publish_command_success(
                            duration, reply, 'killCursors', data[0], address)

                except ConnectionFailure as exc:
                    warnings.warn("couldn't close cursor on %s: %s"
                                  % (address, exc))
Ejemplo n.º 13
0
    def run_operation_with_response(
            self,
            sock_info,
            operation,
            set_slave_okay,
            listeners,
            exhaust,
            unpack_res):
        """Run a _Query or _GetMore operation and return a Response object.

        This method is used only to run _Query/_GetMore operations from
        cursors.
        Can raise ConnectionFailure, OperationFailure, etc.

        :Parameters:
          - `operation`: A _Query or _GetMore object.
          - `set_slave_okay`: Pass to operation.get_message.
          - `all_credentials`: dict, maps auth source to MongoCredential.
          - `listeners`: Instance of _EventListeners or None.
          - `exhaust`: If True, then this is an exhaust cursor operation.
          - `unpack_res`: A callable that decodes the wire protocol response.
        """
        duration = None
        publish = listeners.enabled_for_commands
        if publish:
            start = datetime.now()

        send_message = not operation.exhaust_mgr

        if send_message:
            use_cmd = operation.use_command(sock_info, exhaust)
            message = operation.get_message(
                set_slave_okay, sock_info, use_cmd)
            request_id, data, max_doc_size = self._split_message(message)
        else:
            use_cmd = False
            request_id = 0

        if publish:
            cmd, dbn = operation.as_command(sock_info)
            listeners.publish_command_start(
                cmd, dbn, request_id, sock_info.address)
            start = datetime.now()

        try:
            if send_message:
                sock_info.send_message(data, max_doc_size)
                reply = sock_info.receive_message(request_id)
            else:
                reply = sock_info.receive_message(None)

            # Unpack and check for command errors.
            if use_cmd:
                user_fields = _CURSOR_DOC_FIELDS
                legacy_response = False
            else:
                user_fields = None
                legacy_response = True
            docs = unpack_res(reply, operation.cursor_id,
                              operation.codec_options,
                              legacy_response=legacy_response,
                              user_fields=user_fields)
            if use_cmd:
                first = docs[0]
                operation.client._process_response(
                    first, operation.session)
                _check_command_response(first)
        except Exception as exc:
            if publish:
                duration = datetime.now() - start
                if isinstance(exc, (NotMasterError, OperationFailure)):
                    failure = exc.details
                else:
                    failure = _convert_exception(exc)
                listeners.publish_command_failure(
                    duration, failure, operation.name,
                    request_id, sock_info.address)
            raise

        if publish:
            duration = datetime.now() - start
            # Must publish in find / getMore / explain command response
            # format.
            if use_cmd:
                res = docs[0]
            elif operation.name == "explain":
                res = docs[0] if docs else {}
            else:
                res = {"cursor": {"id": reply.cursor_id,
                                  "ns": operation.namespace()},
                       "ok": 1}
                if operation.name == "find":
                    res["cursor"]["firstBatch"] = docs
                else:
                    res["cursor"]["nextBatch"] = docs
            listeners.publish_command_success(
                duration, res, operation.name, request_id,
                sock_info.address)

        if exhaust:
            response = ExhaustResponse(
                data=reply,
                address=self._description.address,
                socket_info=sock_info,
                pool=self._pool,
                duration=duration,
                request_id=request_id,
                from_command=use_cmd,
                docs=docs)
        else:
            response = Response(
                data=reply,
                address=self._description.address,
                duration=duration,
                request_id=request_id,
                from_command=use_cmd,
                docs=docs)

        return response
Ejemplo n.º 14
0
    def send_message_with_response(
            self,
            operation,
            set_slave_okay,
            all_credentials,
            listeners,
            exhaust=False):
        """Send a message to MongoDB and return a Response object.

        Can raise ConnectionFailure.

        :Parameters:
          - `operation`: A _Query or _GetMore object.
          - `set_slave_okay`: Pass to operation.get_message.
          - `all_credentials`: dict, maps auth source to MongoCredential.
          - `listeners`: Instance of _EventListeners or None.
          - `exhaust` (optional): If True, the socket used stays checked out.
            It is returned along with its Pool in the Response.
        """
        with self.get_socket(all_credentials, exhaust) as sock_info:

            duration = None
            publish = listeners.enabled_for_commands
            if publish:
                start = datetime.now()

            use_find_cmd = operation.use_command(sock_info, exhaust)
            message = operation.get_message(
                set_slave_okay, sock_info, use_find_cmd)
            request_id, data, max_doc_size = self._split_message(message)

            if publish:
                encoding_duration = datetime.now() - start
                cmd, dbn = operation.as_command(sock_info)
                listeners.publish_command_start(
                    cmd, dbn, request_id, sock_info.address)
                start = datetime.now()

            try:
                sock_info.send_message(data, max_doc_size)
                reply = sock_info.receive_message(request_id)
            except Exception as exc:
                if publish:
                    duration = (datetime.now() - start) + encoding_duration
                    failure = _convert_exception(exc)
                    listeners.publish_command_failure(
                        duration, failure, next(iter(cmd)), request_id,
                        sock_info.address)
                raise

            if publish:
                duration = (datetime.now() - start) + encoding_duration

            if exhaust:
                return ExhaustResponse(
                    data=reply,
                    address=self._description.address,
                    socket_info=sock_info,
                    pool=self._pool,
                    duration=duration,
                    request_id=request_id,
                    from_command=use_find_cmd)
            else:
                return Response(
                    data=reply,
                    address=self._description.address,
                    duration=duration,
                    request_id=request_id,
                    from_command=use_find_cmd)
Ejemplo n.º 15
0
    def __send_message(self, operation):
        """Send a getmore message and handle the response.
        """
        def kill():
            self.__killed = True
            self.__end_session(True)

        client = self.__collection.database.client
        listeners = client._event_listeners
        publish = listeners.enabled_for_commands
        start = datetime.datetime.now()

        def duration(): return datetime.datetime.now() - start

        try:
            response = client._send_message_with_response(
                operation, address=self.__address)
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            kill()
            raise

        rqst_id = response.request_id
        from_command = response.from_command
        reply = response.data

        try:
            docs = self._unpack_response(reply,
                                         self.__id,
                                         self.__collection.codec_options)
            if from_command:
                first = docs[0]
                client._receive_cluster_time(first, self.__session)
                helpers._check_command_response(first)

        except OperationFailure as exc:
            kill()

            if publish:
                listeners.publish_command_failure(
                    duration(), exc.details, "getMore", rqst_id, self.__address)

            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            kill()

            if publish:
                listeners.publish_command_failure(
                    duration(), exc.details, "getMore", rqst_id, self.__address)

            client._reset_server_and_request_check(self.address)
            raise
        except Exception as exc:
            if publish:
                listeners.publish_command_failure(
                    duration(), _convert_exception(exc), "getMore", rqst_id,
                    self.__address)
            raise

        if from_command:
            cursor = docs[0]['cursor']
            documents = cursor['nextBatch']
            self.__id = cursor['id']
            if publish:
                listeners.publish_command_success(
                    duration(), docs[0], "getMore", rqst_id,
                    self.__address)
        else:
            documents = docs
            self.__id = reply.cursor_id

            if publish:
                # Must publish in getMore command response format.
                res = {"cursor": {"id": self.__id,
                                  "ns": self.__collection.full_name,
                                  "nextBatch": documents},
                       "ok": 1}
                listeners.publish_command_success(
                    duration(), res, "getMore", rqst_id, self.__address)

        if self.__id == 0:
            kill()
        self.__data = deque(documents)
Ejemplo n.º 16
0
def command(sock,
            dbname,
            spec,
            slave_ok,
            is_mongos,
            read_preference,
            codec_options,
            session,
            client,
            check=True,
            allowable_errors=None,
            address=None,
            check_keys=False,
            listeners=None,
            max_bson_size=None,
            read_concern=None,
            parse_write_concern_error=False,
            collation=None,
            compression_ctx=None,
            use_op_msg=False,
            unacknowledged=False):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as an ordered dict type, eg SON.
      - `slave_ok`: whether to set the SlaveOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `session`: optional ClientSession instance.
      - `client`: optional MongoClient instance for updating $clusterTime.
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
      - `address`: the (host, port) of `sock`
      - `check_keys`: if True, check `spec` for invalid keys
      - `listeners`: An instance of :class:`~pymongo.monitoring.EventListeners`
      - `max_bson_size`: The maximum encoded bson size for this server
      - `read_concern`: The read concern for this command.
      - `parse_write_concern_error`: Whether to parse the ``writeConcernError``
        field in the command response.
      - `collation`: The collation for this command.
    """
    name = next(iter(spec))
    ns = dbname + '.$cmd'
    flags = 4 if slave_ok else 0

    # Publish the original command document, perhaps with lsid and $clusterTime.
    orig = spec
    if is_mongos and not use_op_msg:
        spec = message._maybe_add_read_preference(spec, read_preference)
    if read_concern and not (session and session._in_transaction):
        if read_concern.level:
            spec['readConcern'] = read_concern.document
        if (session and session.options.causal_consistency
                and session.operation_time is not None):
            spec.setdefault('readConcern',
                            {})['afterClusterTime'] = session.operation_time
    if collation is not None:
        spec['collation'] = collation

    publish = listeners is not None and listeners.enabled_for_commands
    if publish:
        start = datetime.datetime.now()

    if compression_ctx and name.lower() in _NO_COMPRESSION:
        compression_ctx = None

    if use_op_msg:
        flags = 2 if unacknowledged else 0
        request_id, msg, size, max_doc_size = message._op_msg(
            flags,
            spec,
            dbname,
            read_preference,
            slave_ok,
            check_keys,
            codec_options,
            ctx=compression_ctx)
        # If this is an unacknowledged write then make sure the encoded doc(s)
        # are small enough, otherwise rely on the server to return an error.
        if (unacknowledged and max_bson_size is not None
                and max_doc_size > max_bson_size):
            message._raise_document_too_large(name, size, max_bson_size)
    else:
        request_id, msg, size = message.query(flags, ns, 0, -1, spec, None,
                                              codec_options, check_keys,
                                              compression_ctx)

    if (max_bson_size is not None
            and size > max_bson_size + message._COMMAND_OVERHEAD):
        message._raise_document_too_large(
            name, size, max_bson_size + message._COMMAND_OVERHEAD)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        listeners.publish_command_start(orig, dbname, request_id, address)
        start = datetime.datetime.now()

    try:
        sock.sendall(msg)
        if use_op_msg and unacknowledged:
            # Unacknowledged, fake a successful command response.
            response_doc = {"ok": 1}
        else:
            reply = receive_message(sock, request_id)
            unpacked_docs = reply.unpack_response(codec_options=codec_options)

            response_doc = unpacked_docs[0]
            if client:
                client._receive_cluster_time(response_doc, session)
            if check:
                helpers._check_command_response(
                    response_doc,
                    None,
                    allowable_errors,
                    parse_write_concern_error=parse_write_concern_error)
    except Exception as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            if isinstance(exc, (NotMasterError, OperationFailure)):
                failure = exc.details
            else:
                failure = message._convert_exception(exc)
            listeners.publish_command_failure(duration, failure, name,
                                              request_id, address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        listeners.publish_command_success(duration, response_doc, name,
                                          request_id, address)
    return response_doc
Ejemplo n.º 17
0
    def _process_kill_cursors_queue(self):
        """Process any pending kill cursors requests."""
        address_to_cursor_ids = defaultdict(list)

        # Other threads or the GC may append to the queue concurrently.
        while True:
            try:
                address, cursor_ids = self.__kill_cursors_queue.pop()
            except IndexError:
                break

            address_to_cursor_ids[address].extend(cursor_ids)

        # Don't re-open topology if it's closed and there's no pending cursors.
        if address_to_cursor_ids:
            listeners = self._event_listeners
            publish = listeners.enabled_for_commands
            topology = self._get_topology()
            for address, cursor_ids in address_to_cursor_ids.items():
                try:
                    if address:
                        # address could be a tuple or _CursorAddress, but
                        # select_server_by_address needs (host, port).
                        server = topology.select_server_by_address(
                            tuple(address))
                    else:
                        # Application called close_cursor() with no address.
                        server = topology.select_server(
                            writable_server_selector)

                    if publish:
                        start = datetime.datetime.now()
                    data = message.kill_cursors(cursor_ids)
                    if publish:
                        duration = datetime.datetime.now() - start
                        try:
                            dbname, collname = address.namespace.split(".", 1)
                        except AttributeError:
                            dbname = collname = 'OP_KILL_CURSORS'
                        command = SON([('killCursors', collname),
                                       ('cursors', cursor_ids)])
                        listeners.publish_command_start(
                            command, dbname, data[0], address)
                        start = datetime.datetime.now()
                    try:
                        server.send_message(data, self.__all_credentials)
                    except Exception as exc:
                        if publish:
                            dur = (datetime.datetime.now() - start) + duration
                            listeners.publish_command_failure(
                                dur, message._convert_exception(exc),
                                'killCursors', data[0], address)
                        raise
                    if publish:
                        duration = (datetime.datetime.now() - start) + duration
                        # OP_KILL_CURSORS returns no reply, fake one.
                        reply = {'cursorsUnknown': cursor_ids, 'ok': 1}
                        listeners.publish_command_success(
                            duration, reply, 'killCursors', data[0], address)

                except ConnectionFailure as exc:
                    warnings.warn("couldn't close cursor on %s: %s" %
                                  (address, exc))
Ejemplo n.º 18
0
Archivo: server.py Proyecto: Alpus/Eth
    def send_message_with_response(
            self,
            operation,
            set_slave_okay,
            all_credentials,
            listeners,
            exhaust=False):
        """Send a message to MongoDB and return a Response object.

        Can raise ConnectionFailure.

        :Parameters:
          - `operation`: A _Query or _GetMore object.
          - `set_slave_okay`: Pass to operation.get_message.
          - `all_credentials`: dict, maps auth source to MongoCredential.
          - `exhaust` (optional): If True, the socket used stays checked out.
            It is returned along with its Pool in the Response.
        """
        with self.get_socket(all_credentials, exhaust) as sock_info:

            duration = None
            publish = listeners.enabled_for_commands
            if publish:
                start = datetime.now()

            use_find_cmd = False
            if sock_info.max_wire_version >= 4:
                if not exhaust:
                    use_find_cmd = True
            elif (isinstance(operation, _Query) and
                  not operation.read_concern.ok_for_legacy):
                raise ConfigurationError(
                    'read concern level of %s is not valid '
                    'with a max wire version of %d.'
                    % (operation.read_concern.level,
                       sock_info.max_wire_version))

            message = operation.get_message(
                set_slave_okay, sock_info.is_mongos, use_find_cmd)
            request_id, data, max_doc_size = self._split_message(message)

            if publish:
                encoding_duration = datetime.now() - start
                cmd, dbn = operation.as_command()
                listeners.publish_command_start(
                    cmd, dbn, request_id, sock_info.address)
                start = datetime.now()

            try:
                sock_info.send_message(data, max_doc_size)
                response_data = sock_info.receive_message(1, request_id)
            except Exception as exc:
                if publish:
                    duration = (datetime.now() - start) + encoding_duration
                    failure = _convert_exception(exc)
                    listeners.publish_command_failure(
                        duration, failure, next(iter(cmd)), request_id,
                        sock_info.address)
                raise

            if publish:
                duration = (datetime.now() - start) + encoding_duration

            if exhaust:
                return ExhaustResponse(
                    data=response_data,
                    address=self._description.address,
                    socket_info=sock_info,
                    pool=self._pool,
                    duration=duration,
                    request_id=request_id,
                    from_command=use_find_cmd)
            else:
                return Response(
                    data=response_data,
                    address=self._description.address,
                    duration=duration,
                    request_id=request_id,
                    from_command=use_find_cmd)
Ejemplo n.º 19
0
    def __send_message(self, operation):
        """Send a getmore message and handle the response.
        """
        def kill():
            self.__killed = True
            self.__end_session(True)

        client = self.__collection.database.client
        listeners = client._event_listeners
        publish = listeners.enabled_for_commands
        start = datetime.datetime.now()

        def duration():
            return datetime.datetime.now() - start

        try:
            response = client._send_message_with_response(
                operation, address=self.__address)
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            kill()
            raise

        rqst_id = response.request_id
        from_command = response.from_command
        reply = response.data

        try:
            docs = self._unpack_response(reply, self.__id,
                                         self.__collection.codec_options)
            if from_command:
                first = docs[0]
                client._receive_cluster_time(first, self.__session)
                helpers._check_command_response(first)

        except OperationFailure as exc:
            kill()

            if publish:
                listeners.publish_command_failure(duration(), exc.details,
                                                  "getMore", rqst_id,
                                                  self.__address)

            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            kill()

            if publish:
                listeners.publish_command_failure(duration(), exc.details,
                                                  "getMore", rqst_id,
                                                  self.__address)

            client._reset_server_and_request_check(self.address)
            raise
        except Exception as exc:
            if publish:
                listeners.publish_command_failure(duration(),
                                                  _convert_exception(exc),
                                                  "getMore", rqst_id,
                                                  self.__address)
            raise

        if from_command:
            cursor = docs[0]['cursor']
            documents = cursor['nextBatch']
            self.__id = cursor['id']
            if publish:
                listeners.publish_command_success(duration(), docs[0],
                                                  "getMore", rqst_id,
                                                  self.__address)
        else:
            documents = docs
            self.__id = reply.cursor_id

            if publish:
                # Must publish in getMore command response format.
                res = {
                    "cursor": {
                        "id": self.__id,
                        "ns": self.__collection.full_name,
                        "nextBatch": documents
                    },
                    "ok": 1
                }
                listeners.publish_command_success(duration(), res, "getMore",
                                                  rqst_id, self.__address)

        if self.__id == 0:
            kill()
        self.__data = deque(documents)
Ejemplo n.º 20
0
def command(sock,
            dbname,
            spec,
            slave_ok,
            is_mongos,
            read_preference,
            codec_options,
            check=True,
            allowable_errors=None,
            address=None,
            check_keys=False,
            listeners=None,
            max_bson_size=None,
            read_concern=DEFAULT_READ_CONCERN):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as a dict, SON, or mapping object
      - `slave_ok`: whether to set the SlaveOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
      - `address`: the (host, port) of `sock`
      - `check_keys`: if True, check `spec` for invalid keys
      - `listeners`: An instance of :class:`~pymongo.monitoring.EventListeners`
      - `max_bson_size`: The maximum encoded bson size for this server
      - `read_concern`: The read concern for this command.
    """
    name = next(iter(spec))
    ns = dbname + '.$cmd'
    flags = 4 if slave_ok else 0
    # Publish the original command document.
    orig = spec
    if is_mongos:
        spec = message._maybe_add_read_preference(spec, read_preference)
    if read_concern.level:
        spec['readConcern'] = read_concern.document

    publish = listeners is not None and listeners.enabled_for_commands
    if publish:
        start = datetime.datetime.now()

    request_id, msg, size = message.query(flags, ns, 0, -1, spec, None,
                                          codec_options, check_keys)

    if (max_bson_size is not None
            and size > max_bson_size + message._COMMAND_OVERHEAD):
        message._raise_document_too_large(
            name, size, max_bson_size + message._COMMAND_OVERHEAD)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        listeners.publish_command_start(orig, dbname, request_id, address)
        start = datetime.datetime.now()

    try:
        sock.sendall(msg)
        response = receive_message(sock, 1, request_id)
        unpacked = helpers._unpack_response(response,
                                            codec_options=codec_options)

        response_doc = unpacked['data'][0]
        if check:
            helpers._check_command_response(response_doc, None,
                                            allowable_errors)
    except Exception as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            if isinstance(exc, (NotMasterError, OperationFailure)):
                failure = exc.details
            else:
                failure = message._convert_exception(exc)
            listeners.publish_command_failure(duration, failure, name,
                                              request_id, address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        listeners.publish_command_success(duration, response_doc, name,
                                          request_id, address)
    return response_doc
Ejemplo n.º 21
0
    def send_message_with_response(self,
                                   operation,
                                   set_slave_okay,
                                   all_credentials,
                                   listeners,
                                   exhaust=False):
        """Send a message to MongoDB and return a Response object.

        Can raise ConnectionFailure.

        :Parameters:
          - `operation`: A _Query or _GetMore object.
          - `set_slave_okay`: Pass to operation.get_message.
          - `all_credentials`: dict, maps auth source to MongoCredential.
          - `listeners`: Instance of _EventListeners or None.
          - `exhaust` (optional): If True, the socket used stays checked out.
            It is returned along with its Pool in the Response.
        """
        with self.get_socket(all_credentials, exhaust) as sock_info:

            duration = None
            publish = listeners.enabled_for_commands
            if publish:
                start = datetime.now()

            use_find_cmd = operation.use_command(sock_info, exhaust)
            message = operation.get_message(set_slave_okay, sock_info,
                                            use_find_cmd)
            request_id, data, max_doc_size = self._split_message(message)

            if publish:
                encoding_duration = datetime.now() - start
                cmd, dbn = operation.as_command(sock_info)
                listeners.publish_command_start(cmd, dbn, request_id,
                                                sock_info.address)
                start = datetime.now()

            try:
                sock_info.send_message(data, max_doc_size)
                reply = sock_info.receive_message(request_id)
            except Exception as exc:
                if publish:
                    duration = (datetime.now() - start) + encoding_duration
                    failure = _convert_exception(exc)
                    listeners.publish_command_failure(duration, failure,
                                                      next(iter(cmd)),
                                                      request_id,
                                                      sock_info.address)
                raise

            if publish:
                duration = (datetime.now() - start) + encoding_duration

            if exhaust:
                return ExhaustResponse(data=reply,
                                       address=self._description.address,
                                       socket_info=sock_info,
                                       pool=self._pool,
                                       duration=duration,
                                       request_id=request_id,
                                       from_command=use_find_cmd)
            else:
                return Response(data=reply,
                                address=self._description.address,
                                duration=duration,
                                request_id=request_id,
                                from_command=use_find_cmd)
Ejemplo n.º 22
0
def command(sock,
            dbname,
            spec,
            slave_ok,
            is_mongos,
            read_preference,
            codec_options,
            session,
            client,
            check=True,
            allowable_errors=None,
            address=None,
            check_keys=False,
            listeners=None,
            max_bson_size=None,
            read_concern=DEFAULT_READ_CONCERN,
            parse_write_concern_error=False,
            collation=None,
            retryable_write=False):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as a dict, SON, or mapping object
      - `slave_ok`: whether to set the SlaveOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `session`: optional ClientSession instance.
      - `client`: optional MongoClient instance for updating $clusterTime.
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
      - `address`: the (host, port) of `sock`
      - `check_keys`: if True, check `spec` for invalid keys
      - `listeners`: An instance of :class:`~pymongo.monitoring.EventListeners`
      - `max_bson_size`: The maximum encoded bson size for this server
      - `read_concern`: The read concern for this command.
      - `parse_write_concern_error`: Whether to parse the ``writeConcernError``
        field in the command response.
      - `collation`: The collation for this command.
      - `retryable_write`: True if this command is a retryable write.
    """
    name = next(iter(spec))
    ns = dbname + '.$cmd'
    flags = 4 if slave_ok else 0
    if (client or session) and not isinstance(spec, ORDERED_TYPES):
        # Ensure command name remains in first place.
        spec = SON(spec)
    if session:
        spec['lsid'] = session._use_lsid()
        if retryable_write:
            spec['txnNumber'] = session._transaction_id()
    if client:
        client._send_cluster_time(spec, session)

    # Publish the original command document, perhaps with lsid and $clusterTime.
    orig = spec
    if is_mongos:
        spec = message._maybe_add_read_preference(spec, read_preference)
    if read_concern.level:
        spec['readConcern'] = read_concern.document
    if (session and session.options.causal_consistency
            and session.operation_time is not None):
        spec.setdefault('readConcern',
                        {})['afterClusterTime'] = session.operation_time
    if collation is not None:
        spec['collation'] = collation

    publish = listeners is not None and listeners.enabled_for_commands
    if publish:
        start = datetime.datetime.now()

    request_id, msg, size = message.query(flags, ns, 0, -1, spec, None,
                                          codec_options, check_keys)

    if (max_bson_size is not None
            and size > max_bson_size + message._COMMAND_OVERHEAD):
        message._raise_document_too_large(
            name, size, max_bson_size + message._COMMAND_OVERHEAD)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        listeners.publish_command_start(orig, dbname, request_id, address)
        start = datetime.datetime.now()

    try:
        sock.sendall(msg)
        reply = receive_message(sock, request_id)
        unpacked_docs = reply.unpack_response(codec_options=codec_options)

        response_doc = unpacked_docs[0]
        if client:
            client._receive_cluster_time(response_doc)
            if session:
                session._advance_cluster_time(response_doc.get('$clusterTime'))
                session._advance_operation_time(
                    response_doc.get('operationTime'))
        if check:
            helpers._check_command_response(
                response_doc,
                None,
                allowable_errors,
                parse_write_concern_error=parse_write_concern_error)
    except Exception as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            if isinstance(exc, (NotMasterError, OperationFailure)):
                failure = exc.details
            else:
                failure = message._convert_exception(exc)
            listeners.publish_command_failure(duration, failure, name,
                                              request_id, address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        listeners.publish_command_success(duration, response_doc, name,
                                          request_id, address)
    return response_doc
Ejemplo n.º 23
0
    def __send_message(self, operation):
        """Send a query or getmore operation and handles the response.

        If operation is ``None`` this is an exhaust cursor, which reads
        the next result batch off the exhaust socket instead of
        sending getMore messages to the server.

        Can raise ConnectionFailure.
        """
        client = self.__collection.database.client
        listeners = client._event_listeners
        publish = listeners.enabled_for_commands
        from_command = False

        if operation:
            kwargs = {
                "read_preference": self.__read_preference,
                "exhaust": self.__exhaust,
            }
            if self.__address is not None:
                kwargs["address"] = self.__address

            try:
                response = client._send_message_with_response(
                    operation, **kwargs)
                self.__address = response.address
                if self.__exhaust:
                    # 'response' is an ExhaustResponse.
                    self.__exhaust_mgr = _SocketManager(
                        response.socket_info, response.pool)

                cmd_name = operation.name
                data = response.data
                cmd_duration = response.duration
                rqst_id = response.request_id
                from_command = response.from_command
            except AutoReconnect:
                # Don't try to send kill cursors on another socket
                # or to another server. It can cause a _pinValue
                # assertion on some server releases if we get here
                # due to a socket timeout.
                self.__killed = True
                raise
        else:
            # Exhaust cursor - no getMore message.
            rqst_id = 0
            cmd_name = 'getMore'
            if publish:
                # Fake a getMore command.
                cmd = SON([('getMore', self.__id),
                           ('collection', self.__collection.name)])
                if self.__batch_size:
                    cmd['batchSize'] = self.__batch_size
                if self.__max_time_ms:
                    cmd['maxTimeMS'] = self.__max_time_ms
                listeners.publish_command_start(
                    cmd, self.__collection.database.name, 0, self.__address)
                start = datetime.datetime.now()
            try:
                data = self.__exhaust_mgr.sock.receive_message(1, None)
            except Exception as exc:
                if publish:
                    duration = datetime.datetime.now() - start
                    listeners.publish_command_failure(duration,
                                                      _convert_exception(exc),
                                                      cmd_name, rqst_id,
                                                      self.__address)
                if isinstance(exc, ConnectionFailure):
                    self.__die()
                raise
            if publish:
                cmd_duration = datetime.datetime.now() - start

        if publish:
            start = datetime.datetime.now()
        try:
            doc = helpers._unpack_response(response=data,
                                           cursor_id=self.__id,
                                           codec_options=self.__codec_options)
            if from_command:
                helpers._check_command_response(doc['data'][0])
        except OperationFailure as exc:
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(duration, exc.details,
                                                  cmd_name, rqst_id,
                                                  self.__address)

            # If this is a tailable cursor the error is likely
            # due to capped collection roll over. Setting
            # self.__killed to True ensures Cursor.alive will be
            # False. No need to re-raise.
            if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
                return
            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(duration, exc.details,
                                                  cmd_name, rqst_id,
                                                  self.__address)

            client._reset_server_and_request_check(self.__address)
            raise
        except Exception as exc:
            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(duration,
                                                  _convert_exception(exc),
                                                  cmd_name, rqst_id,
                                                  self.__address)
            raise

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in find / getMore / explain command response format.
            if from_command:
                res = doc['data'][0]
            elif cmd_name == "explain":
                res = doc["data"][0] if doc["number_returned"] else {}
            else:
                res = {
                    "cursor": {
                        "id": doc["cursor_id"],
                        "ns": self.__collection.full_name
                    },
                    "ok": 1
                }
                if cmd_name == "find":
                    res["cursor"]["firstBatch"] = doc["data"]
                else:
                    res["cursor"]["nextBatch"] = doc["data"]
            listeners.publish_command_success(duration, res, cmd_name, rqst_id,
                                              self.__address)

        if from_command and cmd_name != "explain":
            cursor = doc['data'][0]['cursor']
            self.__id = cursor['id']
            if cmd_name == 'find':
                documents = cursor['firstBatch']
            else:
                documents = cursor['nextBatch']
            self.__data = deque(documents)
            self.__retrieved += len(documents)
        else:
            self.__id = doc["cursor_id"]
            self.__data = deque(doc["data"])
            self.__retrieved += doc["number_returned"]

        if self.__id == 0:
            self.__killed = True

        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()

        # Don't wait for garbage collection to call __del__, return the
        # socket to the pool now.
        if self.__exhaust and self.__id == 0:
            self.__exhaust_mgr.close()
Ejemplo n.º 24
0
def command(sock, dbname, spec, slave_ok, is_mongos,
            read_preference, codec_options, session, client, check=True,
            allowable_errors=None, address=None,
            check_keys=False, listeners=None, max_bson_size=None,
            read_concern=None,
            parse_write_concern_error=False,
            collation=None,
            compression_ctx=None):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as a dict, SON, or mapping object
      - `slave_ok`: whether to set the SlaveOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `session`: optional ClientSession instance.
      - `client`: optional MongoClient instance for updating $clusterTime.
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
      - `address`: the (host, port) of `sock`
      - `check_keys`: if True, check `spec` for invalid keys
      - `listeners`: An instance of :class:`~pymongo.monitoring.EventListeners`
      - `max_bson_size`: The maximum encoded bson size for this server
      - `read_concern`: The read concern for this command.
      - `parse_write_concern_error`: Whether to parse the ``writeConcernError``
        field in the command response.
      - `collation`: The collation for this command.
    """
    name = next(iter(spec))
    ns = dbname + '.$cmd'
    flags = 4 if slave_ok else 0

    # Publish the original command document, perhaps with lsid and $clusterTime.
    orig = spec
    if is_mongos:
        spec = message._maybe_add_read_preference(spec, read_preference)
    if read_concern:
        if read_concern.level:
            spec['readConcern'] = read_concern.document
        if (session and session.options.causal_consistency
                and session.operation_time is not None
                and not session._in_transaction):
            spec.setdefault(
                'readConcern', {})['afterClusterTime'] = session.operation_time
    if collation is not None:
        spec['collation'] = collation

    publish = listeners is not None and listeners.enabled_for_commands
    if publish:
        start = datetime.datetime.now()

    if name.lower() not in _NO_COMPRESSION and compression_ctx:
        request_id, msg, size = message.query(
            flags, ns, 0, -1, spec, None, codec_options, check_keys, compression_ctx)
    else:
        request_id, msg, size = message.query(
            flags, ns, 0, -1, spec, None, codec_options, check_keys)

    if (max_bson_size is not None
            and size > max_bson_size + message._COMMAND_OVERHEAD):
        message._raise_document_too_large(
            name, size, max_bson_size + message._COMMAND_OVERHEAD)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        listeners.publish_command_start(orig, dbname, request_id, address)
        start = datetime.datetime.now()

    try:
        sock.sendall(msg)
        reply = receive_message(sock, request_id)
        unpacked_docs = reply.unpack_response(codec_options=codec_options)

        response_doc = unpacked_docs[0]
        if client:
            client._receive_cluster_time(response_doc, session)
        if check:
            helpers._check_command_response(
                response_doc, None, allowable_errors,
                parse_write_concern_error=parse_write_concern_error)
    except Exception as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            if isinstance(exc, (NotMasterError, OperationFailure)):
                failure = exc.details
            else:
                failure = message._convert_exception(exc)
            listeners.publish_command_failure(
                duration, failure, name, request_id, address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        listeners.publish_command_success(
            duration, response_doc, name, request_id, address)
    return response_doc