def _process_kill_cursors_queue(self):
        """Process any pending kill cursors requests."""
        address_to_cursor_ids = defaultdict(list)

        # Other threads or the GC may append to the queue concurrently.
        while True:
            try:
                address, cursor_ids = self.__kill_cursors_queue.pop()
            except IndexError:
                break

            address_to_cursor_ids[address].extend(cursor_ids)

        # Don't re-open topology if it's closed and there's no pending cursors.
        if address_to_cursor_ids:
            publish = monitoring.enabled()
            topology = self._get_topology()
            for address, cursor_ids in address_to_cursor_ids.items():
                try:
                    if address:
                        # address could be a tuple or _CursorAddress, but
                        # select_server_by_address needs (host, port).
                        server = topology.select_server_by_address(
                            tuple(address))
                    else:
                        # Application called close_cursor() with no address.
                        server = topology.select_server(
                            writable_server_selector)

                    if publish:
                        start = datetime.datetime.now()
                    data = message.kill_cursors(cursor_ids)
                    if publish:
                        duration = datetime.datetime.now() - start
                        try:
                            dbname, collname = address.namespace.split(".", 1)
                        except AttributeError:
                            dbname = collname = 'OP_KILL_CURSORS'
                        command = SON([('killCursors', collname),
                                       ('cursors', cursor_ids)])
                        monitoring.publish_command_start(
                            command, dbname, data[0], address)
                        start = datetime.datetime.now()
                    server.send_message(data, self.__all_credentials)
                    if publish:
                        duration = (datetime.datetime.now() - start) + duration
                        # OP_KILL_CURSORS returns no reply, fake one.
                        reply = {'cursorsUnknown': cursor_ids, 'ok': 1}
                        monitoring.publish_command_success(
                            duration, reply, 'killCursors', data[0], address)

                except ConnectionFailure as exc:
                    warnings.warn("couldn't close cursor on %s: %s"
                                  % (address, exc))
Beispiel #2
0
    def _process_kill_cursors_queue(self):
        """Process any pending kill cursors requests."""
        address_to_cursor_ids = defaultdict(list)

        # Other threads or the GC may append to the queue concurrently.
        while True:
            try:
                address, cursor_ids = self.__kill_cursors_queue.pop()
            except IndexError:
                break

            address_to_cursor_ids[address].extend(cursor_ids)

        # Don't re-open topology if it's closed and there's no pending cursors.
        if address_to_cursor_ids:
            publish = monitoring.enabled()
            topology = self._get_topology()
            for address, cursor_ids in address_to_cursor_ids.items():
                try:
                    if address:
                        # address could be a tuple or _CursorAddress, but
                        # select_server_by_address needs (host, port).
                        server = topology.select_server_by_address(
                            tuple(address))
                    else:
                        # Application called close_cursor() with no address.
                        server = topology.select_server(
                            writable_server_selector)

                    if publish:
                        start = datetime.datetime.now()
                    data = message.kill_cursors(cursor_ids)
                    if publish:
                        duration = datetime.datetime.now() - start
                        try:
                            dbname, collname = address.namespace.split(".", 1)
                        except AttributeError:
                            dbname = collname = 'OP_KILL_CURSORS'
                        command = SON([('killCursors', collname),
                                       ('cursors', cursor_ids)])
                        monitoring.publish_command_start(
                            command, dbname, data[0], address)
                        start = datetime.datetime.now()
                    server.send_message(data, self.__all_credentials)
                    if publish:
                        duration = (datetime.datetime.now() - start) + duration
                        # OP_KILL_CURSORS returns no reply, fake one.
                        reply = {'cursorsUnknown': cursor_ids, 'ok': 1}
                        monitoring.publish_command_success(
                            duration, reply, 'killCursors', data[0], address)

                except ConnectionFailure as exc:
                    warnings.warn("couldn't close cursor on %s: %s" %
                                  (address, exc))
Beispiel #3
0
def _first_batch(sock_info, namespace, query,
                 ntoreturn, slave_ok, codec_options, read_preference, cmd):
    """Simple query helper for retrieving a first (and possibly only) batch."""
    query = _Query(
        0, namespace, 0, ntoreturn, query, None,
        codec_options, read_preference, 0, ntoreturn)

    name = next(iter(cmd))
    duration = None
    publish = monitoring.enabled()
    if publish:
        start = datetime.datetime.now()

    request_id, msg, max_doc_size = query.get_message(slave_ok,
                                                      sock_info.is_mongos)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        monitoring.publish_command_start(
            cmd, namespace.split('.', 1)[0], request_id, sock_info.address)
        start = datetime.datetime.now()

    sock_info.send_message(msg, max_doc_size)
    response = sock_info.receive_message(1, request_id)
    try:
        result = _unpack_response(response, None, codec_options)
    except (NotMasterError, OperationFailure) as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            monitoring.publish_command_failure(
                duration, exc.details, name, request_id, sock_info.address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        monitoring.publish_command_success(
            duration, result, name, request_id, sock_info.address)

    return result
def command(
    sock,
    dbname,
    spec,
    slave_ok,
    is_mongos,
    read_preference,
    codec_options,
    check=True,
    allowable_errors=None,
    address=None,
    user=False,
    check_keys=False,
):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as a dict, SON, or mapping object
      - `slave_ok`: whether to set the SlaveOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
      - `address`: the (host, port) of `sock`
      - `user`: is this a user command or internal?
      - `check_keys`: if True, check `spec` for invalid keys
    """
    name = next(iter(spec))
    ns = dbname + ".$cmd"
    flags = 4 if slave_ok else 0
    if is_mongos:
        spec = message._maybe_add_read_preference(spec, read_preference)

    publish = user and monitoring.enabled()
    if publish:
        start = datetime.datetime.now()

    request_id, msg, _ = message.query(flags, ns, 0, -1, spec, None, codec_options, check_keys)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        monitoring.publish_command_start(spec, dbname, request_id, address)
        start = datetime.datetime.now()

    sock.sendall(msg)
    response = receive_message(sock, 1, request_id)
    try:
        unpacked = helpers._unpack_response(response, codec_options=codec_options)

        response_doc = unpacked["data"][0]
        if check:
            msg = "command %s on namespace %s failed: %%s" % (repr(spec).replace("%", "%%"), ns)
            helpers._check_command_response(response_doc, msg, allowable_errors)
    except (NotMasterError, OperationFailure) as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            monitoring.publish_command_failure(duration, exc.details, name, request_id, address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        monitoring.publish_command_success(duration, response_doc, name, request_id, address)
    return response_doc
Beispiel #5
0
def command(sock,
            dbname,
            spec,
            slave_ok,
            is_mongos,
            read_preference,
            codec_options,
            check=True,
            allowable_errors=None,
            address=None,
            user=False,
            check_keys=False):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as a dict, SON, or mapping object
      - `slave_ok`: whether to set the SlaveOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
      - `address`: the (host, port) of `sock`
      - `user`: is this a user command or internal?
      - `check_keys`: if True, check `spec` for invalid keys
    """
    name = next(iter(spec))
    ns = dbname + '.$cmd'
    flags = 4 if slave_ok else 0
    if is_mongos:
        spec = message._maybe_add_read_preference(spec, read_preference)

    publish = user and monitoring.enabled()
    if publish:
        start = datetime.datetime.now()

    request_id, msg, _ = message.query(flags, ns, 0, -1, spec, None,
                                       codec_options, check_keys)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        monitoring.publish_command_start(spec, dbname, request_id, address)
        start = datetime.datetime.now()

    sock.sendall(msg)
    response = receive_message(sock, 1, request_id)
    try:
        unpacked = helpers._unpack_response(response,
                                            codec_options=codec_options)

        response_doc = unpacked['data'][0]
        if check:
            msg = "command %s on namespace %s failed: %%s" % (
                repr(spec).replace("%", "%%"), ns)
            helpers._check_command_response(response_doc, msg,
                                            allowable_errors)
    except (NotMasterError, OperationFailure) as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            monitoring.publish_command_failure(duration, exc.details, name,
                                               request_id, address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        monitoring.publish_command_success(duration, response_doc, name,
                                           request_id, address)
    return response_doc
    def __send_message(self, operation):
        """Send a query or getmore operation and handles the response.

        If operation is ``None`` this is an exhaust cursor, which reads
        the next result batch off the exhaust socket instead of
        sending getMore messages to the server.

        Can raise ConnectionFailure.
        """
        client = self.__collection.database.client
        publish = monitoring.enabled()

        if operation:
            kwargs = {
                "read_preference": self.__read_preference,
                "exhaust": self.__exhaust,
            }
            if self.__address is not None:
                kwargs["address"] = self.__address

            try:
                response = client._send_message_with_response(operation,
                                                              **kwargs)
                self.__address = response.address
                if self.__exhaust:
                    # 'response' is an ExhaustResponse.
                    self.__exhaust_mgr = _SocketManager(response.socket_info,
                                                        response.pool)

                cmd_name = operation.name
                data = response.data
                cmd_duration = response.duration
                rqst_id = response.request_id
            except AutoReconnect:
                # Don't try to send kill cursors on another socket
                # or to another server. It can cause a _pinValue
                # assertion on some server releases if we get here
                # due to a socket timeout.
                self.__killed = True
                raise
        else:
            # Exhaust cursor - no getMore message.
            rqst_id = 0
            cmd_name = 'getMore'
            if publish:
                # Fake a getMore command.
                cmd = SON([('getMore', self.__id),
                           ('collection', self.__collection.name)])
                if self.__batch_size:
                    cmd['batchSize'] = self.__batch_size
                if self.__max_time_ms:
                    cmd['maxTimeMS'] = self.__max_time_ms
                monitoring.publish_command_start(
                    cmd, self.__collection.database.name, 0, self.__address)
                start = datetime.datetime.now()
            try:
                data = self.__exhaust_mgr.sock.receive_message(1, None)
            except ConnectionFailure:
                self.__die()
                raise
            if publish:
                cmd_duration = datetime.datetime.now() - start

        if publish:
            start = datetime.datetime.now()
        try:
            doc = helpers._unpack_response(response=data,
                                           cursor_id=self.__id,
                                           codec_options=self.__codec_options)
        except OperationFailure as exc:
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                monitoring.publish_command_failure(
                    duration, exc.details, cmd_name, rqst_id, self.__address)

            # If this is a tailable cursor the error is likely
            # due to capped collection roll over. Setting
            # self.__killed to True ensures Cursor.alive will be
            # False. No need to re-raise.
            if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
                return
            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                monitoring.publish_command_failure(
                    duration, exc.details, cmd_name, rqst_id, self.__address)

            client._reset_server_and_request_check(self.__address)
            raise

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in find / getMore / explain command response format.
            if cmd_name == "explain":
                res = doc["data"][0] if doc["number_returned"] else {}
            else:
                res = {"cursor": {"id": doc["cursor_id"],
                                  "ns": self.__collection.full_name},
                       "ok": 1}
                if cmd_name == "find":
                    res["cursor"]["firstBatch"] = doc["data"]
                else:
                    res["cursor"]["nextBatch"] = doc["data"]
            monitoring.publish_command_success(
                duration, res, cmd_name, rqst_id, self.__address)

        self.__id = doc["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        self.__retrieved += doc["number_returned"]
        self.__data = deque(doc["data"])

        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()

        # Don't wait for garbage collection to call __del__, return the
        # socket to the pool now.
        if self.__exhaust and self.__id == 0:
            self.__exhaust_mgr.close()
Beispiel #7
0
 def _succeed(self, request_id, reply, duration):
     """Publish a CommandSucceededEvent."""
     monitoring.publish_command_success(
         duration, reply, self.name,
         request_id, self.sock_info.address, self.op_id)
Beispiel #8
0
    def __send_message(self, operation):
        """Send a query or getmore operation and handles the response.

        If operation is ``None`` this is an exhaust cursor, which reads
        the next result batch off the exhaust socket instead of
        sending getMore messages to the server.

        Can raise ConnectionFailure.
        """
        client = self.__collection.database.client
        publish = monitoring.enabled()

        if operation:
            kwargs = {
                "read_preference": self.__read_preference,
                "exhaust": self.__exhaust,
            }
            if self.__address is not None:
                kwargs["address"] = self.__address

            try:
                response = client._send_message_with_response(
                    operation, **kwargs)
                self.__address = response.address
                if self.__exhaust:
                    # 'response' is an ExhaustResponse.
                    self.__exhaust_mgr = _SocketManager(
                        response.socket_info, response.pool)

                cmd_name = operation.name
                data = response.data
                cmd_duration = response.duration
                rqst_id = response.request_id
            except AutoReconnect:
                # Don't try to send kill cursors on another socket
                # or to another server. It can cause a _pinValue
                # assertion on some server releases if we get here
                # due to a socket timeout.
                self.__killed = True
                raise
        else:
            # Exhaust cursor - no getMore message.
            rqst_id = 0
            cmd_name = 'getMore'
            if publish:
                # Fake a getMore command.
                cmd = SON([('getMore', self.__id),
                           ('collection', self.__collection.name)])
                if self.__batch_size:
                    cmd['batchSize'] = self.__batch_size
                if self.__max_time_ms:
                    cmd['maxTimeMS'] = self.__max_time_ms
                monitoring.publish_command_start(
                    cmd, self.__collection.database.name, 0, self.__address)
                start = datetime.datetime.now()
            try:
                data = self.__exhaust_mgr.sock.receive_message(1, None)
            except ConnectionFailure:
                self.__die()
                raise
            if publish:
                cmd_duration = datetime.datetime.now() - start

        if publish:
            start = datetime.datetime.now()
        try:
            doc = helpers._unpack_response(response=data,
                                           cursor_id=self.__id,
                                           codec_options=self.__codec_options)
        except OperationFailure as exc:
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                monitoring.publish_command_failure(duration, exc.details,
                                                   cmd_name, rqst_id,
                                                   self.__address)

            # If this is a tailable cursor the error is likely
            # due to capped collection roll over. Setting
            # self.__killed to True ensures Cursor.alive will be
            # False. No need to re-raise.
            if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
                return
            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                monitoring.publish_command_failure(duration, exc.details,
                                                   cmd_name, rqst_id,
                                                   self.__address)

            client._reset_server_and_request_check(self.__address)
            raise

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in find / getMore / explain command response format.
            if cmd_name == "explain":
                res = doc["data"][0] if doc["number_returned"] else {}
            else:
                res = {
                    "cursor": {
                        "id": doc["cursor_id"],
                        "ns": self.__collection.full_name
                    },
                    "ok": 1
                }
                if cmd_name == "find":
                    res["cursor"]["firstBatch"] = doc["data"]
                else:
                    res["cursor"]["nextBatch"] = doc["data"]
            monitoring.publish_command_success(duration, res, cmd_name,
                                               rqst_id, self.__address)

        self.__id = doc["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        self.__retrieved += doc["number_returned"]
        self.__data = deque(doc["data"])

        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()

        # Don't wait for garbage collection to call __del__, return the
        # socket to the pool now.
        if self.__exhaust and self.__id == 0:
            self.__exhaust_mgr.close()
    def __send_message(self, operation):
        """Send a getmore message and handle the response.
        """
        client = self.__collection.database.client
        try:
            response = client._send_message_with_response(
                operation, address=self.__address)
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            self.__killed = True
            raise

        publish = monitoring.enabled()
        cmd_duration = response.duration
        rqst_id = response.request_id
        if publish:
            start = datetime.datetime.now()
        try:
            doc = helpers._unpack_response(response.data, self.__id,
                                           self.__collection.codec_options)
        except OperationFailure as exc:
            self.__killed = True

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                monitoring.publish_command_failure(duration, exc.details,
                                                   "getMore", rqst_id,
                                                   self.__address)

            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                monitoring.publish_command_failure(duration, exc.details,
                                                   "getMore", rqst_id,
                                                   self.__address)

            client._reset_server_and_request_check(self.address)
            raise

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in getMore command response format.
            res = {
                "cursor": {
                    "id": doc["cursor_id"],
                    "ns": self.__collection.full_name,
                    "nextBatch": doc["data"]
                },
                "ok": 1
            }
            monitoring.publish_command_success(duration, res, "getMore",
                                               rqst_id, self.__address)

        self.__id = doc["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        self.__retrieved += doc["number_returned"]
        self.__data = deque(doc["data"])
    def __send_message(self, operation):
        """Send a getmore message and handle the response.
        """
        client = self.__collection.database.client
        try:
            response = client._send_message_with_response(
                operation, address=self.__address)
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            self.__killed = True
            raise

        publish = monitoring.enabled()
        cmd_duration = response.duration
        rqst_id = response.request_id
        if publish:
            start = datetime.datetime.now()
        try:
            doc = helpers._unpack_response(response.data,
                                           self.__id,
                                           self.__collection.codec_options)
        except OperationFailure as exc:
            self.__killed = True

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                monitoring.publish_command_failure(
                    duration, exc.details, "getMore", rqst_id, self.__address)

            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                monitoring.publish_command_failure(
                    duration, exc.details, "getMore", rqst_id, self.__address)

            client._reset_server_and_request_check(self.address)
            raise

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in getMore command response format.
            res = {"cursor": {"id": doc["cursor_id"],
                              "ns": self.__collection.full_name,
                              "nextBatch": doc["data"]},
                   "ok": 1}
            monitoring.publish_command_success(
                duration, res, "getMore", rqst_id, self.__address)

        self.__id = doc["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        self.__retrieved += doc["number_returned"]
        self.__data = deque(doc["data"])