Ejemplo n.º 1
0
def _first_batch(sock_info, namespace, query,
                 ntoreturn, slave_ok, codec_options, read_preference, cmd):
    """Simple query helper for retrieving a first (and possibly only) batch."""
    query = _Query(
        0, namespace, 0, ntoreturn, query, None,
        codec_options, read_preference, 0, ntoreturn)

    name = next(iter(cmd))
    duration = None
    publish = monitoring.enabled()
    if publish:
        start = datetime.datetime.now()

    request_id, msg, max_doc_size = query.get_message(slave_ok,
                                                      sock_info.is_mongos)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        monitoring.publish_command_start(
            cmd, namespace.split('.', 1)[0], request_id, sock_info.address)
        start = datetime.datetime.now()

    sock_info.send_message(msg, max_doc_size)
    response = sock_info.receive_message(1, request_id)
    try:
        result = _unpack_response(response, None, codec_options)
    except (NotMasterError, OperationFailure) as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            monitoring.publish_command_failure(
                duration, exc.details, name, request_id, sock_info.address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        monitoring.publish_command_success(
            duration, result, name, request_id, sock_info.address)

    return result
Ejemplo n.º 2
0
def command(
    sock,
    dbname,
    spec,
    slave_ok,
    is_mongos,
    read_preference,
    codec_options,
    check=True,
    allowable_errors=None,
    address=None,
    user=False,
    check_keys=False,
):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as a dict, SON, or mapping object
      - `slave_ok`: whether to set the SlaveOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
      - `address`: the (host, port) of `sock`
      - `user`: is this a user command or internal?
      - `check_keys`: if True, check `spec` for invalid keys
    """
    name = next(iter(spec))
    ns = dbname + ".$cmd"
    flags = 4 if slave_ok else 0
    if is_mongos:
        spec = message._maybe_add_read_preference(spec, read_preference)

    publish = user and monitoring.enabled()
    if publish:
        start = datetime.datetime.now()

    request_id, msg, _ = message.query(flags, ns, 0, -1, spec, None, codec_options, check_keys)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        monitoring.publish_command_start(spec, dbname, request_id, address)
        start = datetime.datetime.now()

    sock.sendall(msg)
    response = receive_message(sock, 1, request_id)
    try:
        unpacked = helpers._unpack_response(response, codec_options=codec_options)

        response_doc = unpacked["data"][0]
        if check:
            msg = "command %s on namespace %s failed: %%s" % (repr(spec).replace("%", "%%"), ns)
            helpers._check_command_response(response_doc, msg, allowable_errors)
    except (NotMasterError, OperationFailure) as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            monitoring.publish_command_failure(duration, exc.details, name, request_id, address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        monitoring.publish_command_success(duration, response_doc, name, request_id, address)
    return response_doc
Ejemplo n.º 3
0
def command(sock,
            dbname,
            spec,
            slave_ok,
            is_mongos,
            read_preference,
            codec_options,
            check=True,
            allowable_errors=None,
            address=None,
            user=False,
            check_keys=False):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as a dict, SON, or mapping object
      - `slave_ok`: whether to set the SlaveOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
      - `address`: the (host, port) of `sock`
      - `user`: is this a user command or internal?
      - `check_keys`: if True, check `spec` for invalid keys
    """
    name = next(iter(spec))
    ns = dbname + '.$cmd'
    flags = 4 if slave_ok else 0
    if is_mongos:
        spec = message._maybe_add_read_preference(spec, read_preference)

    publish = user and monitoring.enabled()
    if publish:
        start = datetime.datetime.now()

    request_id, msg, _ = message.query(flags, ns, 0, -1, spec, None,
                                       codec_options, check_keys)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        monitoring.publish_command_start(spec, dbname, request_id, address)
        start = datetime.datetime.now()

    sock.sendall(msg)
    response = receive_message(sock, 1, request_id)
    try:
        unpacked = helpers._unpack_response(response,
                                            codec_options=codec_options)

        response_doc = unpacked['data'][0]
        if check:
            msg = "command %s on namespace %s failed: %%s" % (
                repr(spec).replace("%", "%%"), ns)
            helpers._check_command_response(response_doc, msg,
                                            allowable_errors)
    except (NotMasterError, OperationFailure) as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            monitoring.publish_command_failure(duration, exc.details, name,
                                               request_id, address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        monitoring.publish_command_success(duration, response_doc, name,
                                           request_id, address)
    return response_doc
Ejemplo n.º 4
0
    def __send_message(self, operation):
        """Send a query or getmore operation and handles the response.

        If operation is ``None`` this is an exhaust cursor, which reads
        the next result batch off the exhaust socket instead of
        sending getMore messages to the server.

        Can raise ConnectionFailure.
        """
        client = self.__collection.database.client
        publish = monitoring.enabled()

        if operation:
            kwargs = {
                "read_preference": self.__read_preference,
                "exhaust": self.__exhaust,
            }
            if self.__address is not None:
                kwargs["address"] = self.__address

            try:
                response = client._send_message_with_response(operation,
                                                              **kwargs)
                self.__address = response.address
                if self.__exhaust:
                    # 'response' is an ExhaustResponse.
                    self.__exhaust_mgr = _SocketManager(response.socket_info,
                                                        response.pool)

                cmd_name = operation.name
                data = response.data
                cmd_duration = response.duration
                rqst_id = response.request_id
            except AutoReconnect:
                # Don't try to send kill cursors on another socket
                # or to another server. It can cause a _pinValue
                # assertion on some server releases if we get here
                # due to a socket timeout.
                self.__killed = True
                raise
        else:
            # Exhaust cursor - no getMore message.
            rqst_id = 0
            cmd_name = 'getMore'
            if publish:
                # Fake a getMore command.
                cmd = SON([('getMore', self.__id),
                           ('collection', self.__collection.name)])
                if self.__batch_size:
                    cmd['batchSize'] = self.__batch_size
                if self.__max_time_ms:
                    cmd['maxTimeMS'] = self.__max_time_ms
                monitoring.publish_command_start(
                    cmd, self.__collection.database.name, 0, self.__address)
                start = datetime.datetime.now()
            try:
                data = self.__exhaust_mgr.sock.receive_message(1, None)
            except ConnectionFailure:
                self.__die()
                raise
            if publish:
                cmd_duration = datetime.datetime.now() - start

        if publish:
            start = datetime.datetime.now()
        try:
            doc = helpers._unpack_response(response=data,
                                           cursor_id=self.__id,
                                           codec_options=self.__codec_options)
        except OperationFailure as exc:
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                monitoring.publish_command_failure(
                    duration, exc.details, cmd_name, rqst_id, self.__address)

            # If this is a tailable cursor the error is likely
            # due to capped collection roll over. Setting
            # self.__killed to True ensures Cursor.alive will be
            # False. No need to re-raise.
            if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
                return
            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                monitoring.publish_command_failure(
                    duration, exc.details, cmd_name, rqst_id, self.__address)

            client._reset_server_and_request_check(self.__address)
            raise

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in find / getMore / explain command response format.
            if cmd_name == "explain":
                res = doc["data"][0] if doc["number_returned"] else {}
            else:
                res = {"cursor": {"id": doc["cursor_id"],
                                  "ns": self.__collection.full_name},
                       "ok": 1}
                if cmd_name == "find":
                    res["cursor"]["firstBatch"] = doc["data"]
                else:
                    res["cursor"]["nextBatch"] = doc["data"]
            monitoring.publish_command_success(
                duration, res, cmd_name, rqst_id, self.__address)

        self.__id = doc["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        self.__retrieved += doc["number_returned"]
        self.__data = deque(doc["data"])

        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()

        # Don't wait for garbage collection to call __del__, return the
        # socket to the pool now.
        if self.__exhaust and self.__id == 0:
            self.__exhaust_mgr.close()
Ejemplo n.º 5
0
 def _fail(self, request_id, failure, duration):
     """Publish a CommandFailedEvent."""
     monitoring.publish_command_failure(
         duration, failure, self.name,
         request_id, self.sock_info.address, self.op_id)
Ejemplo n.º 6
0
    def __send_message(self, operation):
        """Send a query or getmore operation and handles the response.

        If operation is ``None`` this is an exhaust cursor, which reads
        the next result batch off the exhaust socket instead of
        sending getMore messages to the server.

        Can raise ConnectionFailure.
        """
        client = self.__collection.database.client
        publish = monitoring.enabled()

        if operation:
            kwargs = {
                "read_preference": self.__read_preference,
                "exhaust": self.__exhaust,
            }
            if self.__address is not None:
                kwargs["address"] = self.__address

            try:
                response = client._send_message_with_response(
                    operation, **kwargs)
                self.__address = response.address
                if self.__exhaust:
                    # 'response' is an ExhaustResponse.
                    self.__exhaust_mgr = _SocketManager(
                        response.socket_info, response.pool)

                cmd_name = operation.name
                data = response.data
                cmd_duration = response.duration
                rqst_id = response.request_id
            except AutoReconnect:
                # Don't try to send kill cursors on another socket
                # or to another server. It can cause a _pinValue
                # assertion on some server releases if we get here
                # due to a socket timeout.
                self.__killed = True
                raise
        else:
            # Exhaust cursor - no getMore message.
            rqst_id = 0
            cmd_name = 'getMore'
            if publish:
                # Fake a getMore command.
                cmd = SON([('getMore', self.__id),
                           ('collection', self.__collection.name)])
                if self.__batch_size:
                    cmd['batchSize'] = self.__batch_size
                if self.__max_time_ms:
                    cmd['maxTimeMS'] = self.__max_time_ms
                monitoring.publish_command_start(
                    cmd, self.__collection.database.name, 0, self.__address)
                start = datetime.datetime.now()
            try:
                data = self.__exhaust_mgr.sock.receive_message(1, None)
            except ConnectionFailure:
                self.__die()
                raise
            if publish:
                cmd_duration = datetime.datetime.now() - start

        if publish:
            start = datetime.datetime.now()
        try:
            doc = helpers._unpack_response(response=data,
                                           cursor_id=self.__id,
                                           codec_options=self.__codec_options)
        except OperationFailure as exc:
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                monitoring.publish_command_failure(duration, exc.details,
                                                   cmd_name, rqst_id,
                                                   self.__address)

            # If this is a tailable cursor the error is likely
            # due to capped collection roll over. Setting
            # self.__killed to True ensures Cursor.alive will be
            # False. No need to re-raise.
            if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
                return
            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                monitoring.publish_command_failure(duration, exc.details,
                                                   cmd_name, rqst_id,
                                                   self.__address)

            client._reset_server_and_request_check(self.__address)
            raise

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in find / getMore / explain command response format.
            if cmd_name == "explain":
                res = doc["data"][0] if doc["number_returned"] else {}
            else:
                res = {
                    "cursor": {
                        "id": doc["cursor_id"],
                        "ns": self.__collection.full_name
                    },
                    "ok": 1
                }
                if cmd_name == "find":
                    res["cursor"]["firstBatch"] = doc["data"]
                else:
                    res["cursor"]["nextBatch"] = doc["data"]
            monitoring.publish_command_success(duration, res, cmd_name,
                                               rqst_id, self.__address)

        self.__id = doc["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        self.__retrieved += doc["number_returned"]
        self.__data = deque(doc["data"])

        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()

        # Don't wait for garbage collection to call __del__, return the
        # socket to the pool now.
        if self.__exhaust and self.__id == 0:
            self.__exhaust_mgr.close()
Ejemplo n.º 7
0
    def __send_message(self, operation):
        """Send a getmore message and handle the response.
        """
        client = self.__collection.database.client
        try:
            response = client._send_message_with_response(
                operation, address=self.__address)
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            self.__killed = True
            raise

        publish = monitoring.enabled()
        cmd_duration = response.duration
        rqst_id = response.request_id
        if publish:
            start = datetime.datetime.now()
        try:
            doc = helpers._unpack_response(response.data, self.__id,
                                           self.__collection.codec_options)
        except OperationFailure as exc:
            self.__killed = True

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                monitoring.publish_command_failure(duration, exc.details,
                                                   "getMore", rqst_id,
                                                   self.__address)

            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                monitoring.publish_command_failure(duration, exc.details,
                                                   "getMore", rqst_id,
                                                   self.__address)

            client._reset_server_and_request_check(self.address)
            raise

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in getMore command response format.
            res = {
                "cursor": {
                    "id": doc["cursor_id"],
                    "ns": self.__collection.full_name,
                    "nextBatch": doc["data"]
                },
                "ok": 1
            }
            monitoring.publish_command_success(duration, res, "getMore",
                                               rqst_id, self.__address)

        self.__id = doc["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        self.__retrieved += doc["number_returned"]
        self.__data = deque(doc["data"])
    def __send_message(self, operation):
        """Send a getmore message and handle the response.
        """
        client = self.__collection.database.client
        try:
            response = client._send_message_with_response(
                operation, address=self.__address)
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            self.__killed = True
            raise

        publish = monitoring.enabled()
        cmd_duration = response.duration
        rqst_id = response.request_id
        if publish:
            start = datetime.datetime.now()
        try:
            doc = helpers._unpack_response(response.data,
                                           self.__id,
                                           self.__collection.codec_options)
        except OperationFailure as exc:
            self.__killed = True

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                monitoring.publish_command_failure(
                    duration, exc.details, "getMore", rqst_id, self.__address)

            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                monitoring.publish_command_failure(
                    duration, exc.details, "getMore", rqst_id, self.__address)

            client._reset_server_and_request_check(self.address)
            raise

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in getMore command response format.
            res = {"cursor": {"id": doc["cursor_id"],
                              "ns": self.__collection.full_name,
                              "nextBatch": doc["data"]},
                   "ok": 1}
            monitoring.publish_command_success(
                duration, res, "getMore", rqst_id, self.__address)

        self.__id = doc["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        self.__retrieved += doc["number_returned"]
        self.__data = deque(doc["data"])