Esempio n. 1
0
    def __check_response_to_last_error(self, response):
        """Check a response to a lastError message for errors.

        `response` is a byte string representing a response to the message.
        If it represents an error response we raise OperationFailure.

        Return the response as a document.
        """
        response = helpers._unpack_response(response)

        assert response["number_returned"] == 1
        error = response["data"][0]

        # TODO unify logic with database.error method
        if error.get("err", 0) is None:
            return error
        if error["err"] == "not master":
            self._reset()

        if "code" in error and error["code"] in [11000, 11001]:
            raise DuplicateKeyError(error["err"])
        else:
            raise OperationFailure(error["err"])

        return error
Esempio n. 2
0
    def __check_response_to_last_error(self, response):
        """Check a response to a lastError message for errors.

        `response` is a byte string representing a response to the message.
        If it represents an error response we raise OperationFailure.

        Return the response as a document.
        """
        response = helpers._unpack_response(response)

        assert response["number_returned"] == 1
        error = response["data"][0]

        helpers._check_command_response(error, self.disconnect)

        # TODO unify logic with database.error method
        if error.get("err", 0) is None:
            return error
        if error["err"] == "not master":
            self.disconnect()
            raise AutoReconnect("not master")

        if "code" in error:
            if error["code"] in [11000, 11001]:
                raise DuplicateKeyError(error["err"])
            else:
                raise OperationFailure(error["err"], error["code"])
        else:
            raise OperationFailure(error["err"])
Esempio n. 3
0
    def __check_response_to_last_error(self, response):
        """Check a response to a lastError message for errors.

        `response` is a byte string representing a response to the message.
        If it represents an error response we raise OperationFailure.

        Return the response as a document.
        """
        response = helpers._unpack_response(response)

        assert response["number_returned"] == 1
        error = response["data"][0]

        helpers._check_command_response(error, self.disconnect)

        error_msg = error.get("err", "")
        if error_msg is None:
            return error
        if error_msg.startswith("not master"):
            self.disconnect()
            raise AutoReconnect(error_msg)

        if "code" in error:
            if error["code"] in [11000, 11001, 12582]:
                raise DuplicateKeyError(error["err"])
            else:
                raise OperationFailure(error["err"], error["code"])
        else:
            raise OperationFailure(error["err"])
    def __send_message(self, operation):
        """Send a getmore message and handle the response.
        """
        client = self.__collection.database.client
        try:
            response = client._send_message_with_response(
                operation, address=self.__address)
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            self.__killed = True
            raise

        try:
            doc = helpers._unpack_response(response.data,
                                           self.__id,
                                           self.__collection.codec_options)
        except CursorNotFound:
            self.__killed = True
            raise
        except NotMasterError:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True
            client._reset_server_and_request_check(self.address)
            raise
        self.__id = doc["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        self.__retrieved += doc["number_returned"]
        self.__data = deque(doc["data"])
Esempio n. 5
0
    def __send_message(self, message):
        """Send a query or getmore message and handles the response.
        """
        db = self.__collection.database
        kwargs = {"_sock": self.__socket,
                  "_must_use_master": self.__must_use_master}
        if self.__connection_id is not None:
            kwargs["_connection_to_use"] = self.__connection_id

        response = db.connection._send_message_with_response(message,
                                                             **kwargs)

        if isinstance(response, tuple):
            (connection_id, response) = response
        else:
            connection_id = None

        self.__connection_id = connection_id

        try:
            response = helpers._unpack_response(response, self.__id)
        except AutoReconnect:
            db.connection._reset()
            raise
        self.__id = response["cursor_id"]

        # starting from doesn't get set on getmore's for tailable cursors
        if not self.__tailable:
            assert response["starting_from"] == self.__retrieved

        self.__retrieved += response["number_returned"]
        self.__data = response["data"]

        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()
Esempio n. 6
0
    def __send_message(self, operation):
        """Send a getmore message and handle the response.
        """
        client = self.__collection.database.client
        try:
            response = client._send_message_with_response(
                operation, address=self.__address)
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            self.__killed = True
            raise

        try:
            doc = helpers._unpack_response(response.data, self.__id,
                                           self.__collection.codec_options)
        except CursorNotFound:
            self.__killed = True
            raise
        except NotMasterError:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True
            client._reset_server_and_request_check(self.address)
            raise
        self.__id = doc["cursor_id"]

        assert doc["starting_from"] == self.__retrieved, (
            "Result batch started from %s, expected %s" %
            (doc['starting_from'], self.__retrieved))

        self.__retrieved += doc["number_returned"]
        self.__data = deque(doc["data"])
Esempio n. 7
0
def command(sock, dbname, spec, slave_ok, is_mongos, read_preference,
            codec_options, check=True, allowable_errors=None):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as a dict, SON, or mapping object
      - `slave_ok`: whether to set the SlaveOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
    """
    ns = dbname + '.$cmd'
    flags = 4 if slave_ok else 0
    if is_mongos:
        spec = message._maybe_add_read_preference(spec, read_preference)
    request_id, msg, _ = message.query(flags, ns, 0, -1, spec,
                                       None, codec_options)
    sock.sendall(msg)
    response = receive_message(sock, 1, request_id)
    unpacked = helpers._unpack_response(response, codec_options=codec_options)
    response_doc = unpacked['data'][0]
    msg = "command %s on namespace %s failed: %%s" % (
        repr(spec).replace("%", "%%"), ns)
    if check:
        helpers._check_command_response(response_doc, msg, allowable_errors)
    return response_doc
Esempio n. 8
0
    async def command(
            self,
            dbname: str,
            spec: SON,
            read_preference: Optional[Union[_ALL_READ_PREFERENCES]] = None,
            codec_options: Optional[CodecOptions] = None,
            check: bool = True,
            allowable_errors: Optional[List[str]] = None,
            check_keys: bool = False,
            read_concern: ReadConcern = DEFAULT_READ_CONCERN
    ) -> MutableMapping:

        if self.max_wire_version < 4 and not read_concern.ok_for_legacy:
            raise ConfigurationError(
                'Read concern of level {} is not valid with max wire version of {}'
                .format(read_concern.level, self.max_wire_version))

        read_preference = read_preference or self.options.read_preference
        codec_options = codec_options or self.options.codec_options

        name = next(iter(spec))
        ns = dbname + '.$cmd'

        if read_preference != ReadPreference.PRIMARY:
            flags = 4
        else:
            flags = 0

        if self.is_mongos:
            spec = message._maybe_add_read_preference(spec, read_preference)
        if read_concern.level:
            spec['readConcern'] = read_concern.document

        # See explanation in perform_operation method
        request_id = None
        while request_id is None or request_id in self.__request_futures:
            request_id, msg, size = message.query(flags, ns, 0, -1, spec, None,
                                                  codec_options, check_keys)

        if size > self.max_bson_size + message._COMMAND_OVERHEAD:
            message._raise_document_too_large(
                name, size, self.max_bson_size + message._COMMAND_OVERHEAD)

        response_future = asyncio.Future()
        self.__request_futures[request_id] = response_future

        self.send_message(msg)

        response = await response_future

        unpacked = helpers._unpack_response(response,
                                            codec_options=codec_options)
        response_doc = unpacked['data'][0]
        if check:
            helpers._check_command_response(response_doc, None,
                                            allowable_errors)

        return response_doc
    def __auth(self, sock, dbase, user, passwd):
        """Athenticate `sock` against `dbase`
        """
        # TODO: Error handling...
        # Get a nonce
        request_id, msg, _ = message.query(0, dbase + '.$cmd',
                                           0, -1, {'getnonce': 1})
        sock.sendall(msg)
        raw = self.__recv_msg(1, request_id, sock)
        nonce = helpers._unpack_response(raw)['data'][0]['nonce']
        key = helpers._auth_key(nonce, user, passwd)

        # Actually authenticate
        query = {'authenticate': 1, 'user': user, 'nonce': nonce, 'key': key}
        request_id, msg, _ = message.query(0, dbase + '.$cmd', 0, -1, query)
        sock.sendall(msg)
        raw = self.__recv_msg(1, request_id, sock)
        print helpers._unpack_response(raw)['data'][0]
 def __simple_command(self, sock, dbname, spec):
     """Send a command to the server.
     """
     rqst_id, msg, _ = message.query(0, dbname + '.$cmd', 0, -1, spec)
     sock.sendall(msg)
     response = self.__recv_msg(1, rqst_id, sock)
     response = helpers._unpack_response(response)['data'][0]
     msg = "command %r failed: %%s" % spec
     helpers._check_command_response(response, None, msg)
     return response
Esempio n. 11
0
 def __simple_command(self, sock_info, dbname, spec):
     """Send a command to the server.
     """
     rqst_id, msg, _ = message.query(0, dbname + '.$cmd', 0, -1, spec)
     sock_info.sock.sendall(msg)
     response = self.__recv_msg(1, rqst_id, sock_info)
     response = helpers._unpack_response(response)['data'][0]
     msg = "command %r failed: %%s" % spec
     helpers._check_command_response(response, None, msg)
     return response
Esempio n. 12
0
 def __simple_command(self, sock_info, dbname, spec):
     """Send a command to the server.
     """
     rqst_id, msg, _ = message.query(0, dbname + ".$cmd", 0, -1, spec)
     sock_info.sock.sendall(msg)
     response = self.__receive_message_on_socket(1, rqst_id, sock_info)
     response = helpers._unpack_response(response)["data"][0]
     msg = "command %r failed: %%s" % spec
     helpers._check_command_response(response, None, msg)
     return response
Esempio n. 13
0
    def __send_message(self, message):
        """Send a query or getmore message and handles the response.
        """
        db = self.__collection.database
        kwargs = {"_must_use_master": self.__must_use_master}
        kwargs["read_preference"] = self.__read_preference
        kwargs["tag_sets"] = self.__tag_sets
        kwargs["secondary_acceptable_latency_ms"] = (
            self.__secondary_acceptable_latency_ms)
        if self.__connection_id is not None:
            kwargs["_connection_to_use"] = self.__connection_id
        kwargs.update(self.__kwargs)

        try:
            response = db.connection._send_message_with_response(message,
                                                                 **kwargs)
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            self.__killed = True
            raise

        if isinstance(response, tuple):
            (connection_id, response) = response
        else:
            connection_id = None

        self.__connection_id = connection_id

        try:
            response = helpers._unpack_response(response, self.__id,
                                                self.__as_class,
                                                self.__tz_aware,
                                                self.__uuid_subtype)
        except AutoReconnect:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True
            db.connection.disconnect()
            raise
        self.__id = response["cursor_id"]

        # starting from doesn't get set on getmore's for tailable cursors
        if not self.__tailable:
            assert response["starting_from"] == self.__retrieved, (
                "Result batch started from %s, expected %s" % (
                    response['starting_from'], self.__retrieved))

        self.__retrieved += response["number_returned"]
        self.__data = deque(response["data"])

        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()
Esempio n. 14
0
    def __send_message(self, message):
        """Send a query or getmore message and handles the response.
        """
        db = self.__collection.database
        kwargs = {"_must_use_master": self.__must_use_master}
        kwargs["read_preference"] = self.__read_preference
        kwargs["tag_sets"] = self.__tag_sets
        kwargs["secondary_acceptable_latency_ms"] = (
            self.__secondary_acceptable_latency_ms)
        if self.__connection_id is not None:
            kwargs["_connection_to_use"] = self.__connection_id
        kwargs.update(self.__kwargs)

        try:
            response = db.connection._send_message_with_response(message,
                                                                 **kwargs)
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            self.__killed = True
            raise

        if isinstance(response, tuple):
            (connection_id, response) = response
        else:
            connection_id = None

        self.__connection_id = connection_id

        try:
            response = helpers._unpack_response(response, self.__id,
                                                self.__as_class,
                                                self.__tz_aware,
                                                self.__uuid_subtype)
        except AutoReconnect:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True
            db.connection.disconnect()
            raise
        self.__id = response["cursor_id"]

        # starting from doesn't get set on getmore's for tailable cursors
        if not self.__tailable:
            assert response["starting_from"] == self.__retrieved, (
                "Result batch started from %s, expected %s" % (
                    response['starting_from'], self.__retrieved))

        self.__retrieved += response["number_returned"]
        self.__data = deque(response["data"])

        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()
 def __is_master(self, candidate):
     """Directly call ismaster.
     """
     # TODO: Error handling...
     request_id, msg, _ = message.query(0, 'admin.$cmd',
                                        0, -1, {'ismaster': 1})
     mongo = pool.Pool(candidate, self.__max_pool_size,
                       self.__net_timeout, self.__conn_timeout)
     sock = mongo.get_socket()[0]
     sock.sendall(msg)
     raw = self.__recv_msg(1, request_id, sock)
     response = helpers._unpack_response(raw)['data'][0]
     return response, mongo
Esempio n. 16
0
    async def write_command(self, request_id: int, msg: bytes) -> dict:
        response_future = asyncio.Future(loop=self.loop)
        self.__request_futures[request_id] = response_future

        self.send_message(msg)

        response_data = await response_future
        response = helpers._unpack_response(response_data)
        assert response['number_returned'] == 1

        result = response['data'][0]

        # Raises NotMasterError or OperationFailure.
        helpers._check_command_response(result)
        return result
Esempio n. 17
0
    def _check_with_socket(self, sock_info):
        """Return (IsMaster, round_trip_time).

        Can raise ConnectionFailure or OperationFailure.
        """
        start = _time()
        request_id, msg, max_doc_size = message.query(0, 'admin.$cmd', 0, -1,
                                                      {'ismaster': 1}, None,
                                                      DEFAULT_CODEC_OPTIONS)

        # TODO: use sock_info.command()
        sock_info.send_message(msg, max_doc_size)
        raw_response = sock_info.receive_message(1, request_id)
        result = helpers._unpack_response(raw_response)
        return IsMaster(result['data'][0]), _time() - start
Esempio n. 18
0
File: monitor.py Progetto: Alpus/Eth
    def _check_with_socket(self, sock_info):
        """Return (IsMaster, round_trip_time).

        Can raise ConnectionFailure or OperationFailure.
        """
        start = _time()
        request_id, msg, max_doc_size = message.query(
            0, 'admin.$cmd', 0, -1, {'ismaster': 1},
            None, DEFAULT_CODEC_OPTIONS)

        # TODO: use sock_info.command()
        sock_info.send_message(msg, max_doc_size)
        raw_response = sock_info.receive_message(1, request_id)
        result = helpers._unpack_response(raw_response)
        return IsMaster(result['data'][0]), _time() - start
Esempio n. 19
0
    async def _refresh(self) -> None:
        """Refreshes the cursor with more data from the server.

        Returns the length of self.__data after refresh. Will exit early if
        self.__data is already non-empty. Raises OperationFailure when the
        cursor cannot be refreshed due to an error on the query.
        """
        if len(self.__data) or self.__killed:
            return len(self.__data)

        if self.__id:  # Get More
            dbname, collname = self.__ns.split('.', 1)

            try:
                data = await self.__connection.perform_operation(
                    _GetMore(dbname,
                             collname,
                             self.__batch_size,
                             self.__id,
                             self.__collection.codec_options))
            except EOFError:
                self.__killed = True
                raise

            try:
                doc = helpers._unpack_response(data,
                                               self.__id,
                                               self.__collection.codec_options)
                helpers._check_command_response(doc['data'][0])
            except OperationFailure:
                self.__killed = True

                raise

            cursor = doc['data'][0]['cursor']
            documents = cursor['nextBatch']
            self.__id = cursor['id']
            self.__retrieved += len(documents)

            if self.__id == 0:
                self.__killed = True
            self.__data = deque(documents)

        else:  # Cursor id is zero nothing else to return
            self.__killed = True

        return len(self.__data)
Esempio n. 20
0
    def __simple_command(self, sock_info, dbname, spec):
        """Send a command to the server.
        """
        rqst_id, msg, _ = message.query(0, dbname + '.$cmd', 0, -1, spec)
        start = time.time()
        try:
            sock_info.sock.sendall(msg)
            response = self.__receive_message_on_socket(1, rqst_id, sock_info)
        except:
            sock_info.close()
            raise

        end = time.time()
        response = helpers._unpack_response(response)['data'][0]
        msg = "command %r failed: %%s" % spec
        helpers._check_command_response(response, None, msg)
        return response, end - start
Esempio n. 21
0
    def __simple_command(self, sock_info, dbname, spec):
        """Send a command to the server.
        """
        rqst_id, msg, _ = message.query(0, dbname + '.$cmd', 0, -1, spec)
        start = time.time()
        try:
            sock_info.sock.sendall(msg)
            response = self.__receive_message_on_socket(1, rqst_id, sock_info)
        except:
            sock_info.close()
            raise

        end = time.time()
        response = helpers._unpack_response(response)['data'][0]
        msg = "command %r failed: %%s" % spec
        helpers._check_command_response(response, None, msg)
        return response, end - start
Esempio n. 22
0
    def write_command(self, request_id, msg):
        """Send "insert" etc. command, returning response as a dict.

        Can raise ConnectionFailure or OperationFailure.

        :Parameters:
          - `request_id`: an int.
          - `msg`: bytes, the command message.
        """
        self.send_message(msg, 0)
        response = helpers._unpack_response(self.receive_message(1, request_id))
        assert response['number_returned'] == 1
        result = response['data'][0]

        # Raises NotMasterError or OperationFailure.
        helpers._check_command_response(result)
        return result
Esempio n. 23
0
    def nextback(self,callback,response):
       
        if response is not None:
            if isinstance(response, tuple):
                (connection_id, response) = response
            else:
                connection_id = None
    
            self.__connection_id = connection_id
    
            try:
                response = helpers._unpack_response(response, self.__id,as_class=self.__as_class)
            except AutoReconnect:
                db.connection._reset()
                raise
                
     
            self.__id = response["cursor_id"]
            
            # starting from doesn't get set on getmore's for tailable selfs
            if not self.__tailable:
                assert response["starting_from"] == self.__retrieved
    
            self.__retrieved += response["number_returned"]

            self.__data = response["data"]
         
            
            self.__id = response["cursor_id"]
    
    
        if self.__limit and self.__id and self.__limit <= self.__retrieved or not self.__id:
            self.__die()
    
        db = self.__collection.database
        
        if len(self.__data):
            callback(db._fix_outgoing(self.__data.pop(0), self.__collection)) 
        else:
            callback(StopIteration)
Esempio n. 24
0
    def __send_message(self, msg):
        """Send a getmore message and handle the response.
        """
        client = self.__collection.database.connection
        try:
            res = client._send_message_with_response(
                msg, _connection_to_use=self.__conn_id)
            self.__conn_id, (response, dummy0, dummy1) = res
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            self.__killed = True
            raise

        try:
            response = helpers._unpack_response(
                response,
                self.__id,
                self.__codec_options.document_class,
                self.__codec_options.tz_aware,
                self.__codec_options.uuid_representation,
                self.__compile_re)
        except CursorNotFound:
            self.__killed = True
            raise
        except AutoReconnect:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True
            client.disconnect()
            raise
        self.__id = response["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        self.__retrieved += response["number_returned"]
        self.__data = deque(response["data"])
    def __send_message(self, msg):
        """Send a getmore message and handle the response.
        """
        client = self.__collection.database.connection
        try:
            res = client._send_message_with_response(
                msg, _connection_to_use=self.__conn_id)
            self.__conn_id, (response, dummy0, dummy1) = res
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            self.__killed = True
            raise

        try:
            response = helpers._unpack_response(response,
                                                self.__id,
                                                *self.__decode_opts)
        except CursorNotFound:
            self.__killed = True
            raise
        except AutoReconnect:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True
            client.disconnect()
            raise
        self.__id = response["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        assert response["starting_from"] == self.__retrieved, (
            "Result batch started from %s, expected %s" % (
                response['starting_from'], self.__retrieved))

        self.__retrieved += response["number_returned"]
        self.__data = deque(response["data"])
Esempio n. 26
0
    def __send_message(self, message):
        """Send a query or getmore message and handles the response.
        """
        db = self.__collection.database
        kwargs = {"_must_use_master": self.__must_use_master}
        kwargs["read_preference"] = self.__read_preference
        if self.__connection_id is not None:
            kwargs["_connection_to_use"] = self.__connection_id
        kwargs.update(self.__kwargs)

        response = db.connection._send_message_with_response(message,
                                                             **kwargs)

        if isinstance(response, tuple):
            (connection_id, response) = response
        else:
            connection_id = None

        self.__connection_id = connection_id

        try:
            response = helpers._unpack_response(response, self.__id,
                                                self.__as_class,
                                                self.__tz_aware)
        except AutoReconnect:
            db.connection.disconnect()
            raise
        self.__id = response["cursor_id"]

        # starting from doesn't get set on getmore's for tailable cursors
        if not self.__tailable:
            assert response["starting_from"] == self.__retrieved

        self.__retrieved += response["number_returned"]
        self.__data = response["data"]

        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()
Esempio n. 27
0
    def __check_response_to_last_error(self, response):
        """Check a response to a lastError message for errors.

        `response` is a byte string representing a response to the message.
        If it represents an error response we raise OperationFailure.

        Return the response as a document.
        """
        response = helpers._unpack_response(response)

        assert response["number_returned"] == 1
        error = response["data"][0]

        helpers._check_command_response(error, self.disconnect)

        error_msg = error.get("err", "")
        if error_msg is None:
            return error
        if error_msg.startswith("not master"):
            self.disconnect()
            raise AutoReconnect(error_msg)

        details = error
        # mongos returns the error code in an error object
        # for some errors.
        if "errObjects" in error:
            for errobj in error["errObjects"]:
                if errobj["err"] == error_msg:
                    details = errobj
                    break

        if "code" in details:
            if details["code"] in (11000, 11001, 12582):
                raise DuplicateKeyError(details["err"], details["code"])
            else:
                raise OperationFailure(details["err"], details["code"])
        else:
            raise OperationFailure(details["err"])
Esempio n. 28
0
    def __check_response_to_last_error(self, response):
        """Check a response to a lastError message for errors.

        `response` is a byte string representing a response to the message.
        If it represents an error response we raise OperationFailure.

        Return the response as a document.
        """
        response = helpers._unpack_response(response)

        assert response["number_returned"] == 1
        error = response["data"][0]

        helpers._check_command_response(error, self.disconnect)

        error_msg = error.get("err", "")
        if error_msg is None:
            return error
        if error_msg.startswith("not master"):
            self.disconnect()
            raise AutoReconnect(error_msg)

        details = error
        # mongos returns the error code in an error object
        # for some errors.
        if "errObjects" in error:
            for errobj in error["errObjects"]:
                if errobj["err"] == error_msg:
                    details = errobj
                    break

        if "code" in details:
            if details["code"] in (11000, 11001, 12582):
                raise DuplicateKeyError(details["err"], details["code"])
            else:
                raise OperationFailure(details["err"], details["code"])
        else:
            raise OperationFailure(details["err"])
Esempio n. 29
0
    def __send_message(self, msg):
        """Send a getmore message and handle the response.
        """
        client = self.__collection.database.connection
        try:
            res = client._send_message_with_response(
                msg, _connection_to_use=self.__conn_id)
            self.__conn_id, (response, dummy0, dummy1) = res
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            self.__killed = True
            raise

        try:
            response = helpers._unpack_response(response, self.__id,
                                                *self.__decode_opts)
        except CursorNotFound:
            self.__killed = True
            raise
        except AutoReconnect:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True
            client.disconnect()
            raise
        self.__id = response["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        assert response["starting_from"] == self.__retrieved, (
            "Result batch started from %s, expected %s" %
            (response['starting_from'], self.__retrieved))

        self.__retrieved += response["number_returned"]
        self.__data = deque(response["data"])
Esempio n. 30
0
    async def write_command(self, request_id: int, msg: bytes) -> dict:
        self._check_connected()

        response_future = asyncio.Future()
        self.__request_futures[request_id] = response_future

        self.send_message(msg)

        try:
            response_data = await response_future
        except asyncio.CancelledError:
            if request_id in self.__request_futures:
                del self.__request_futures[request_id]
            raise

        response = helpers._unpack_response(response_data)
        assert response['number_returned'] == 1

        result = response['data'][0]

        # Raises NotMasterError or OperationFailure.
        helpers._check_command_response(result)
        return result
Esempio n. 31
0
def command(sock,
            dbname,
            spec,
            slave_ok,
            is_mongos,
            read_preference,
            codec_options,
            check=True,
            allowable_errors=None):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as a dict, SON, or mapping object
      - `slave_ok`: whether to set the SlaveOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
    """
    ns = dbname + '.$cmd'
    flags = 4 if slave_ok else 0
    if is_mongos:
        spec = message._maybe_add_read_preference(spec, read_preference)
    request_id, msg, _ = message.query(flags, ns, 0, -1, spec, None,
                                       codec_options)
    sock.sendall(msg)
    response = receive_message(sock, 1, request_id)
    unpacked = helpers._unpack_response(response, codec_options=codec_options)
    response_doc = unpacked['data'][0]
    msg = "command %s on namespace %s failed: %%s" % (repr(spec).replace(
        "%", "%%"), ns)
    if check:
        helpers._check_command_response(response_doc, msg, allowable_errors)
    return response_doc
Esempio n. 32
0
    def __send_message(self, msg):
        """Send a getmore message and handle the response.
        """
        client = self.__collection.database.connection
        try:
            res = client._send_message_with_response(
                msg, _connection_to_use=self.__conn_id)
            self.__conn_id, (response, dummy0, dummy1) = res
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            self.__killed = True
            raise

        try:
            response = helpers._unpack_response(
                response, self.__id, self.__codec_options.document_class,
                self.__codec_options.tz_aware,
                self.__codec_options.uuid_representation, self.__compile_re)
        except CursorNotFound:
            self.__killed = True
            raise
        except AutoReconnect:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True
            client.disconnect()
            raise
        self.__id = response["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        self.__retrieved += response["number_returned"]
        self.__data = deque(response["data"])
Esempio n. 33
0
    def __send_message(self, operation):
        """Send a query or getmore operation and handles the response.

        If operation is ``None`` this is an exhaust cursor, which reads
        the next result batch off the exhaust socket instead of
        sending getMore messages to the server.

        Can raise ConnectionFailure.
        """
        client = self.__collection.database.client

        if operation:
            kwargs = {
                "read_preference": self.__read_preference,
                "exhaust": self.__exhaust,
            }
            if self.__address is not None:
                kwargs["address"] = self.__address

            try:
                response = client._send_message_with_response(operation,
                                                              **kwargs)
                self.__address = response.address
                if self.__exhaust:
                    # 'response' is an ExhaustResponse.
                    self.__exhaust_mgr = _SocketManager(response.socket_info,
                                                        response.pool)

                data = response.data
            except AutoReconnect:
                # Don't try to send kill cursors on another socket
                # or to another server. It can cause a _pinValue
                # assertion on some server releases if we get here
                # due to a socket timeout.
                self.__killed = True
                raise
        else:
            # Exhaust cursor - no getMore message.
            try:
                data = self.__exhaust_mgr.sock.receive_message(1, None)
            except ConnectionFailure:
                self.__die()
                raise

        try:
            doc = helpers._unpack_response(response=data,
                                           cursor_id=self.__id,
                                           codec_options=self.__codec_options)
        except OperationFailure:
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            # If this is a tailable cursor the error is likely
            # due to capped collection roll over. Setting
            # self.__killed to True ensures Cursor.alive will be
            # False. No need to re-raise.
            if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
                return
            raise
        except NotMasterError:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            client._reset_server_and_request_check(self.__address)
            raise
        self.__id = doc["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        # starting from doesn't get set on getmore's for tailable cursors
        if not self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
            assert doc["starting_from"] == self.__retrieved, (
                "Result batch started from %s, expected %s" % (
                    doc['starting_from'], self.__retrieved))

        self.__retrieved += doc["number_returned"]
        self.__data = deque(doc["data"])

        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()

        # Don't wait for garbage collection to call __del__, return the
        # socket to the pool now.
        if self.__exhaust and self.__id == 0:
            self.__exhaust_mgr.close()
Esempio n. 34
0
    async def command(self,
                      dbname: str,
                      spec: SON,
                      read_preference: Optional[
                          Union[_ALL_READ_PREFERENCES]] = None,
                      codec_options: Optional[CodecOptions] = None,
                      check: bool = True,
                      allowable_errors: Optional[List[str]] = None,
                      check_keys: bool = False,
                      read_concern: ReadConcern = DEFAULT_READ_CONCERN,
                      write_concern: Optional[WriteConcern] = None,
                      parse_write_concern_error: bool = False,
                      collation: Optional[Union[Collation, dict]] = None,
                      ignore_connected: bool = False) -> MutableMapping:

        if self.max_wire_version < 4 and not read_concern.ok_for_legacy:
            raise ConfigurationError(
                'Read concern of level {} is not valid with max wire version of {}'
                .format(read_concern.level, self.max_wire_version))
        if not (write_concern is None or write_concern.acknowledged
                or collation is None):
            raise ConfigurationError(
                'Collation is unsupported for unacknowledged writes.')
        if self.max_wire_version >= 5 and write_concern:
            spec['writeConcern'] = write_concern.document
        elif self.max_wire_version < 5 and collation is not None:
            raise ConfigurationError(
                'Must be connected to MongoDB 3.4+ to use a collation.')

        read_preference = read_preference or self.options.read_preference
        codec_options = codec_options or self.options.codec_options

        name = next(iter(spec))
        ns = dbname + '.$cmd'

        if read_preference != ReadPreference.PRIMARY:
            flags = 4
        else:
            flags = 0

        if self.is_mongos:
            spec = message._maybe_add_read_preference(spec, read_preference)
        if read_concern.level:
            spec['readConcern'] = read_concern.document
        if collation:
            spec['collation'] = collation

        # See explanation in perform_operation method
        request_id = None
        while request_id is None or request_id in self.__request_futures:
            request_id, msg, size = message.query(flags, ns, 0, -1, spec, None,
                                                  codec_options, check_keys)

        if size > self.max_bson_size + message._COMMAND_OVERHEAD:
            message._raise_document_too_large(
                name, size, self.max_bson_size + message._COMMAND_OVERHEAD)

        if not ignore_connected:
            self._check_connected()

        response_future = asyncio.Future()
        self.__request_futures[request_id] = response_future

        self.send_message(msg)

        try:
            response = await response_future
        except asyncio.CancelledError:
            if request_id in self.__request_futures:
                del self.__request_futures[request_id]
            raise

        unpacked = helpers._unpack_response(response,
                                            codec_options=codec_options)
        response_doc = unpacked['data'][0]
        if check:
            helpers._check_command_response(
                response_doc,
                None,
                allowable_errors,
                parse_write_concern_error=parse_write_concern_error)

        return response_doc
Esempio n. 35
0
 def _unpack_response(self, response, cursor_id, codec_options):
     return helpers._unpack_response(response, cursor_id, codec_options)
Esempio n. 36
0
    def __send_message(self, operation):
        """Send a query or getmore operation and handles the response.

        If operation is ``None`` this is an exhaust cursor, which reads
        the next result batch off the exhaust socket instead of
        sending getMore messages to the server.

        Can raise ConnectionFailure.
        """
        client = self.__collection.database.client
        listeners = client._event_listeners
        publish = listeners.enabled_for_commands
        from_command = False

        if operation:
            kwargs = {
                "read_preference": self.__read_preference,
                "exhaust": self.__exhaust,
            }
            if self.__address is not None:
                kwargs["address"] = self.__address

            try:
                response = client._send_message_with_response(
                    operation, **kwargs)
                self.__address = response.address
                if self.__exhaust:
                    # 'response' is an ExhaustResponse.
                    self.__exhaust_mgr = _SocketManager(
                        response.socket_info, response.pool)

                cmd_name = operation.name
                data = response.data
                cmd_duration = response.duration
                rqst_id = response.request_id
                from_command = response.from_command
            except AutoReconnect:
                # Don't try to send kill cursors on another socket
                # or to another server. It can cause a _pinValue
                # assertion on some server releases if we get here
                # due to a socket timeout.
                self.__killed = True
                raise
        else:
            # Exhaust cursor - no getMore message.
            rqst_id = 0
            cmd_name = 'getMore'
            if publish:
                # Fake a getMore command.
                cmd = SON([('getMore', self.__id),
                           ('collection', self.__collection.name)])
                if self.__batch_size:
                    cmd['batchSize'] = self.__batch_size
                if self.__max_time_ms:
                    cmd['maxTimeMS'] = self.__max_time_ms
                listeners.publish_command_start(
                    cmd, self.__collection.database.name, 0, self.__address)
                start = datetime.datetime.now()
            try:
                data = self.__exhaust_mgr.sock.receive_message(1, None)
            except Exception as exc:
                if publish:
                    duration = datetime.datetime.now() - start
                    listeners.publish_command_failure(duration,
                                                      _convert_exception(exc),
                                                      cmd_name, rqst_id,
                                                      self.__address)
                if isinstance(exc, ConnectionFailure):
                    self.__die()
                raise
            if publish:
                cmd_duration = datetime.datetime.now() - start

        if publish:
            start = datetime.datetime.now()
        try:
            doc = helpers._unpack_response(response=data,
                                           cursor_id=self.__id,
                                           codec_options=self.__codec_options)
            if from_command:
                helpers._check_command_response(doc['data'][0])
        except OperationFailure as exc:
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(duration, exc.details,
                                                  cmd_name, rqst_id,
                                                  self.__address)

            # If this is a tailable cursor the error is likely
            # due to capped collection roll over. Setting
            # self.__killed to True ensures Cursor.alive will be
            # False. No need to re-raise.
            if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
                return
            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(duration, exc.details,
                                                  cmd_name, rqst_id,
                                                  self.__address)

            client._reset_server_and_request_check(self.__address)
            raise
        except Exception as exc:
            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(duration,
                                                  _convert_exception(exc),
                                                  cmd_name, rqst_id,
                                                  self.__address)
            raise

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in find / getMore / explain command response format.
            if from_command:
                res = doc['data'][0]
            elif cmd_name == "explain":
                res = doc["data"][0] if doc["number_returned"] else {}
            else:
                res = {
                    "cursor": {
                        "id": doc["cursor_id"],
                        "ns": self.__collection.full_name
                    },
                    "ok": 1
                }
                if cmd_name == "find":
                    res["cursor"]["firstBatch"] = doc["data"]
                else:
                    res["cursor"]["nextBatch"] = doc["data"]
            listeners.publish_command_success(duration, res, cmd_name, rqst_id,
                                              self.__address)

        if from_command and cmd_name != "explain":
            cursor = doc['data'][0]['cursor']
            self.__id = cursor['id']
            if cmd_name == 'find':
                documents = cursor['firstBatch']
            else:
                documents = cursor['nextBatch']
            self.__data = deque(documents)
            self.__retrieved += len(documents)
        else:
            self.__id = doc["cursor_id"]
            self.__data = deque(doc["data"])
            self.__retrieved += doc["number_returned"]

        if self.__id == 0:
            self.__killed = True

        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()

        # Don't wait for garbage collection to call __del__, return the
        # socket to the pool now.
        if self.__exhaust and self.__id == 0:
            self.__exhaust_mgr.close()
Esempio n. 37
0
def command(sock,
            dbname,
            spec,
            slave_ok,
            is_mongos,
            read_preference,
            codec_options,
            check=True,
            allowable_errors=None,
            address=None,
            user=False,
            check_keys=False):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as a dict, SON, or mapping object
      - `slave_ok`: whether to set the SlaveOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
      - `address`: the (host, port) of `sock`
      - `user`: is this a user command or internal?
      - `check_keys`: if True, check `spec` for invalid keys
    """
    name = next(iter(spec))
    ns = dbname + '.$cmd'
    flags = 4 if slave_ok else 0
    if is_mongos:
        spec = message._maybe_add_read_preference(spec, read_preference)

    publish = user and monitoring.enabled()
    if publish:
        start = datetime.datetime.now()

    request_id, msg, _ = message.query(flags, ns, 0, -1, spec, None,
                                       codec_options, check_keys)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        monitoring.publish_command_start(spec, dbname, request_id, address)
        start = datetime.datetime.now()

    sock.sendall(msg)
    response = receive_message(sock, 1, request_id)
    try:
        unpacked = helpers._unpack_response(response,
                                            codec_options=codec_options)

        response_doc = unpacked['data'][0]
        if check:
            msg = "command %s on namespace %s failed: %%s" % (
                repr(spec).replace("%", "%%"), ns)
            helpers._check_command_response(response_doc, msg,
                                            allowable_errors)
    except (NotMasterError, OperationFailure) as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            monitoring.publish_command_failure(duration, exc.details, name,
                                               request_id, address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        monitoring.publish_command_success(duration, response_doc, name,
                                           request_id, address)
    return response_doc
Esempio n. 38
0
    async def _refresh(self) -> None:

        if len(self.__data) or self.__killed:
            return 0

        if self.__connection is None:
            self.__connection = await self.__collection.database.client.get_connection()

        is_query = False
        if self.__id is None:
            is_query = True
            data = await self.__connection.perform_operation(
                _Query(self.__query_flags,
                       self.__collection.database.name,
                       self.__collection.name,
                       self.__skip,
                       self.__query_spec(),
                       self.__projection,
                       self.__codec_options,
                       self.__read_preference,
                       self.__limit,
                       self.__batch_size,
                       self.__read_concern,
                       self.__collation)
            )
        elif self.__id:
            if self.__limit:
                limit = self.__limit - self.__retrieved
                if self.__batch_size:
                    limit = min(limit, self.__batch_size)
            else:
                limit = self.__batch_size

            try:
                data = await self.__connection.perform_operation(
                    _GetMore(self.__collection.database.name,
                             self.__collection.name,
                             limit,
                             self.__id,
                             self.__codec_options,
                             self.__max_await_time_ms)
                )
            except EOFError:
                self.__killed = True
                raise
        else:
            self.__killed = True
            self.__data = data = None

        if data:
            doc = helpers._unpack_response(response=data,
                                           cursor_id=self.__id,
                                           codec_options=self.__codec_options)

            helpers._check_command_response(doc['data'][0])

            if not self.__explain:
                cursor = doc['data'][0]['cursor']
                self.__id = cursor['id']

                if is_query:
                    documents = cursor['firstBatch']
                else:
                    documents = cursor['nextBatch']
                self.__data = deque(documents)

                self.__retrieved += len(documents)
            else:
                self.__id = doc['cursor_id']
                self.__data = deque(doc['data'])
                self.__retrieved += doc['number_returned']

        if self.__id == 0:
            self.__killed = True

        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            await self.close()

        return len(self.__data)
Esempio n. 39
0
def command(sock,
            dbname,
            spec,
            slave_ok,
            is_mongos,
            read_preference,
            codec_options,
            check=True,
            allowable_errors=None,
            address=None,
            check_keys=False,
            listeners=None,
            max_bson_size=None,
            read_concern=DEFAULT_READ_CONCERN):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as a dict, SON, or mapping object
      - `slave_ok`: whether to set the SlaveOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
      - `address`: the (host, port) of `sock`
      - `check_keys`: if True, check `spec` for invalid keys
      - `listeners`: An instance of :class:`~pymongo.monitoring.EventListeners`
      - `max_bson_size`: The maximum encoded bson size for this server
      - `read_concern`: The read concern for this command.
    """
    name = next(iter(spec))
    ns = dbname + '.$cmd'
    flags = 4 if slave_ok else 0
    # Publish the original command document.
    orig = spec
    if is_mongos:
        spec = message._maybe_add_read_preference(spec, read_preference)
    if read_concern.level:
        spec['readConcern'] = read_concern.document

    publish = listeners is not None and listeners.enabled_for_commands
    if publish:
        start = datetime.datetime.now()

    request_id, msg, size = message.query(flags, ns, 0, -1, spec, None,
                                          codec_options, check_keys)

    if (max_bson_size is not None
            and size > max_bson_size + message._COMMAND_OVERHEAD):
        message._raise_document_too_large(
            name, size, max_bson_size + message._COMMAND_OVERHEAD)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        listeners.publish_command_start(orig, dbname, request_id, address)
        start = datetime.datetime.now()

    try:
        sock.sendall(msg)
        response = receive_message(sock, 1, request_id)
        unpacked = helpers._unpack_response(response,
                                            codec_options=codec_options)

        response_doc = unpacked['data'][0]
        if check:
            helpers._check_command_response(response_doc, None,
                                            allowable_errors)
    except Exception as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            if isinstance(exc, (NotMasterError, OperationFailure)):
                failure = exc.details
            else:
                failure = message._convert_exception(exc)
            listeners.publish_command_failure(duration, failure, name,
                                              request_id, address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        listeners.publish_command_success(duration, response_doc, name,
                                          request_id, address)
    return response_doc
Esempio n. 40
0
    def __send_message(self, message):
        """Send a query or getmore message and handles the response.

        If message is ``None`` this is an exhaust cursor, which reads
        the next result batch off the exhaust socket instead of
        sending getMore messages to the server.
        """
        client = self.__collection.database.connection

        if message:
            kwargs = {"_must_use_master": self.__must_use_master}
            kwargs["read_preference"] = self.__read_preference
            kwargs["tag_sets"] = self.__tag_sets
            kwargs["secondary_acceptable_latency_ms"] = (
                self.__secondary_acceptable_latency_ms)
            kwargs['exhaust'] = self.__exhaust
            if self.__connection_id is not None:
                kwargs["_connection_to_use"] = self.__connection_id
            kwargs.update(self.__kwargs)

            try:
                res = client._send_message_with_response(message, **kwargs)
                self.__connection_id, (response, sock, pool) = res
                if self.__exhaust:
                    self.__exhaust_mgr = _SocketManager(sock, pool)
            except AutoReconnect:
                # Don't try to send kill cursors on another socket
                # or to another server. It can cause a _pinValue
                # assertion on some server releases if we get here
                # due to a socket timeout.
                self.__killed = True
                raise
        else:  # exhaust cursor - no getMore message
            response = client._exhaust_next(self.__exhaust_mgr.sock)

        try:
            response = helpers._unpack_response(response, self.__id,
                                                self.__as_class,
                                                self.__tz_aware,
                                                self.__uuid_subtype,
                                                self.__compile_re)
        except AutoReconnect:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True
            client.disconnect()
            raise
        self.__id = response["cursor_id"]

        # starting from doesn't get set on getmore's for tailable cursors
        if not (self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]):
            assert response["starting_from"] == self.__retrieved, (
                "Result batch started from %s, expected %s" %
                (response['starting_from'], self.__retrieved))

        self.__retrieved += response["number_returned"]
        self.__data = deque(response["data"])

        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()

        # Don't wait for garbage collection to call __del__, return the
        # socket to the pool now.
        if self.__exhaust and self.__id == 0:
            self.__exhaust_mgr.close()
Esempio n. 41
0
    def __send_message(self, operation):
        """Send a query or getmore operation and handles the response.

        If operation is ``None`` this is an exhaust cursor, which reads
        the next result batch off the exhaust socket instead of
        sending getMore messages to the server.

        Can raise ConnectionFailure.
        """
        client = self.__collection.database.client

        if operation:
            kwargs = {
                "read_preference": self.__read_preference,
                "exhaust": self.__exhaust,
            }
            if self.__address is not None:
                kwargs["address"] = self.__address

            try:
                response = client._send_message_with_response(
                    operation, **kwargs)
                self.__address = response.address
                if self.__exhaust:
                    # 'response' is an ExhaustResponse.
                    self.__exhaust_mgr = _SocketManager(
                        response.socket_info, response.pool)

                data = response.data
            except AutoReconnect:
                # Don't try to send kill cursors on another socket
                # or to another server. It can cause a _pinValue
                # assertion on some server releases if we get here
                # due to a socket timeout.
                self.__killed = True
                raise
        else:
            # Exhaust cursor - no getMore message.
            try:
                data = self.__exhaust_mgr.sock.receive_message(1, None)
            except ConnectionFailure:
                self.__die()
                raise

        try:
            doc = helpers._unpack_response(response=data,
                                           cursor_id=self.__id,
                                           codec_options=self.__codec_options)
        except OperationFailure:
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            # If this is a tailable cursor the error is likely
            # due to capped collection roll over. Setting
            # self.__killed to True ensures Cursor.alive will be
            # False. No need to re-raise.
            if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
                return
            raise
        except NotMasterError:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            client._reset_server_and_request_check(self.__address)
            raise
        self.__id = doc["cursor_id"]

        # starting from doesn't get set on getmore's for tailable cursors
        if not self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
            assert doc["starting_from"] == self.__retrieved, (
                "Result batch started from %s, expected %s" %
                (doc['starting_from'], self.__retrieved))

        self.__retrieved += doc["number_returned"]
        self.__data = deque(doc["data"])

        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()

        # Don't wait for garbage collection to call __del__, return the
        # socket to the pool now.
        if self.__exhaust and self.__id == 0:
            self.__exhaust_mgr.close()
Esempio n. 42
0
    def __send_message(self, operation):
        """Send a query or getmore operation and handles the response.

        If operation is ``None`` this is an exhaust cursor, which reads
        the next result batch off the exhaust socket instead of
        sending getMore messages to the server.

        Can raise ConnectionFailure.
        """
        client = self.__collection.database.client
        listeners = client._event_listeners
        publish = listeners.enabled_for_commands
        from_command = False

        if operation:
            kwargs = {
                "read_preference": self.__read_preference,
                "exhaust": self.__exhaust,
            }
            if self.__address is not None:
                kwargs["address"] = self.__address

            try:
                response = client._send_message_with_response(operation,
                                                              **kwargs)
                self.__address = response.address
                if self.__exhaust:
                    # 'response' is an ExhaustResponse.
                    self.__exhaust_mgr = _SocketManager(response.socket_info,
                                                        response.pool)

                cmd_name = operation.name
                data = response.data
                cmd_duration = response.duration
                rqst_id = response.request_id
                from_command = response.from_command
            except AutoReconnect:
                # Don't try to send kill cursors on another socket
                # or to another server. It can cause a _pinValue
                # assertion on some server releases if we get here
                # due to a socket timeout.
                self.__killed = True
                raise
        else:
            # Exhaust cursor - no getMore message.
            rqst_id = 0
            cmd_name = 'getMore'
            if publish:
                # Fake a getMore command.
                cmd = SON([('getMore', self.__id),
                           ('collection', self.__collection.name)])
                if self.__batch_size:
                    cmd['batchSize'] = self.__batch_size
                if self.__max_time_ms:
                    cmd['maxTimeMS'] = self.__max_time_ms
                listeners.publish_command_start(
                    cmd, self.__collection.database.name, 0, self.__address)
                start = datetime.datetime.now()
            try:
                data = self.__exhaust_mgr.sock.receive_message(1, None)
            except Exception as exc:
                if publish:
                    duration = datetime.datetime.now() - start
                    listeners.publish_command_failure(
                        duration, _convert_exception(exc), cmd_name, rqst_id,
                        self.__address)
                if isinstance(exc, ConnectionFailure):
                    self.__die()
                raise
            if publish:
                cmd_duration = datetime.datetime.now() - start

        if publish:
            start = datetime.datetime.now()
        try:
            doc = helpers._unpack_response(response=data,
                                           cursor_id=self.__id,
                                           codec_options=self.__codec_options)
            if from_command:
                helpers._check_command_response(doc['data'][0])
        except OperationFailure as exc:
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, exc.details, cmd_name, rqst_id, self.__address)

            # If this is a tailable cursor the error is likely
            # due to capped collection roll over. Setting
            # self.__killed to True ensures Cursor.alive will be
            # False. No need to re-raise.
            if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
                return
            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, exc.details, cmd_name, rqst_id, self.__address)

            client._reset_server_and_request_check(self.__address)
            raise
        except Exception as exc:
            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, _convert_exception(exc), cmd_name, rqst_id,
                    self.__address)
            raise

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in find / getMore / explain command response format.
            if from_command:
                res = doc['data'][0]
            elif cmd_name == "explain":
                res = doc["data"][0] if doc["number_returned"] else {}
            else:
                res = {"cursor": {"id": doc["cursor_id"],
                                  "ns": self.__collection.full_name},
                       "ok": 1}
                if cmd_name == "find":
                    res["cursor"]["firstBatch"] = doc["data"]
                else:
                    res["cursor"]["nextBatch"] = doc["data"]
            listeners.publish_command_success(
                duration, res, cmd_name, rqst_id, self.__address)

        if from_command and cmd_name != "explain":
            cursor = doc['data'][0]['cursor']
            self.__id = cursor['id']
            if cmd_name == 'find':
                documents = cursor['firstBatch']
            else:
                documents = cursor['nextBatch']
            self.__data = deque(documents)
            self.__retrieved += len(documents)
        else:
            self.__id = doc["cursor_id"]
            self.__data = deque(doc["data"])
            self.__retrieved += doc["number_returned"]

        if self.__id == 0:
            self.__killed = True


        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()

        # Don't wait for garbage collection to call __del__, return the
        # socket to the pool now.
        if self.__exhaust and self.__id == 0:
            self.__exhaust_mgr.close()
Esempio n. 43
0
    def __send_message(self, message):
        """Send a query or getmore message and handles the response.

        If message is ``None`` this is an exhaust cursor, which reads
        the next result batch off the exhaust socket instead of
        sending getMore messages to the server.
        """
        client = self.__collection.database.connection

        if message:
            kwargs = {"_must_use_master": self.__must_use_master}
            kwargs["read_preference"] = self.__read_preference
            kwargs["tag_sets"] = self.__tag_sets
            kwargs["secondary_acceptable_latency_ms"] = (
                self.__secondary_acceptable_latency_ms)
            kwargs['exhaust'] = self.__exhaust
            if self.__connection_id is not None:
                kwargs["_connection_to_use"] = self.__connection_id
            kwargs.update(self.__kwargs)

            try:
                res = client._send_message_with_response(message, **kwargs)
                self.__connection_id, (response, sock, pool) = res
                if self.__exhaust:
                    self.__exhaust_mgr = _SocketManager(sock, pool)
            except AutoReconnect:
                # Don't try to send kill cursors on another socket
                # or to another server. It can cause a _pinValue
                # assertion on some server releases if we get here
                # due to a socket timeout.
                self.__killed = True
                raise
        else:
            # Exhaust cursor - no getMore message.
            try:
                response = client._exhaust_next(self.__exhaust_mgr.sock)
            except AutoReconnect:
                self.__killed = True
                self.__exhaust_mgr.error()
                raise

        try:
            response = helpers._unpack_response(
                response, self.__id, self.__codec_options.document_class,
                self.__codec_options.tz_aware,
                self.__codec_options.uuid_representation, self.__compile_re)
        except OperationFailure:
            self.__killed = True
            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()
            # If this is a tailable cursor the error is likely
            # due to capped collection roll over. Setting
            # self.__killed to True ensures Cursor.alive will be
            # False. No need to re-raise.
            if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
                return
            raise
        except AutoReconnect:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True
            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()
            client._disconnect()
            raise

        self.__id = response["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        self.__retrieved += response["number_returned"]
        self.__data = deque(response["data"])

        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()

        # Don't wait for garbage collection to call __del__, return the
        # socket to the pool now.
        if self.__exhaust and self.__id == 0:
            self.__exhaust_mgr.close()
Esempio n. 44
0
    def __send_message(self, operation):
        """Send a getmore message and handle the response.
        """
        client = self.__collection.database.client
        listeners = client._event_listeners
        publish = listeners.enabled_for_commands
        try:
            response = client._send_message_with_response(
                operation, address=self.__address)
        except AutoReconnect:
            # Don't try to send kill cursors on another socket
            # or to another server. It can cause a _pinValue
            # assertion on some server releases if we get here
            # due to a socket timeout.
            self.__killed = True
            raise

        cmd_duration = response.duration
        rqst_id = response.request_id
        if publish:
            start = datetime.datetime.now()
        try:
            doc = helpers._unpack_response(response.data,
                                           self.__id,
                                           self.__collection.codec_options)
        except OperationFailure as exc:
            self.__killed = True

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, exc.details, "getMore", rqst_id, self.__address)

            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, exc.details, "getMore", rqst_id, self.__address)

            client._reset_server_and_request_check(self.address)
            raise
        except Exception as exc:
            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, _convert_exception(exc), "getMore", rqst_id,
                    self.__address)
            raise

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in getMore command response format.
            res = {"cursor": {"id": doc["cursor_id"],
                              "ns": self.__collection.full_name,
                              "nextBatch": doc["data"]},
                   "ok": 1}
            listeners.publish_command_success(
                duration, res, "getMore", rqst_id, self.__address)

        self.__id = doc["cursor_id"]
        if self.__id == 0:
            self.__killed = True

        self.__retrieved += doc["number_returned"]
        self.__data = deque(doc["data"])
Esempio n. 45
0
def command(
    sock,
    dbname,
    spec,
    slave_ok,
    is_mongos,
    read_preference,
    codec_options,
    check=True,
    allowable_errors=None,
    address=None,
    user=False,
    check_keys=False,
):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as a dict, SON, or mapping object
      - `slave_ok`: whether to set the SlaveOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
      - `address`: the (host, port) of `sock`
      - `user`: is this a user command or internal?
      - `check_keys`: if True, check `spec` for invalid keys
    """
    name = next(iter(spec))
    ns = dbname + ".$cmd"
    flags = 4 if slave_ok else 0
    if is_mongos:
        spec = message._maybe_add_read_preference(spec, read_preference)

    publish = user and monitoring.enabled()
    if publish:
        start = datetime.datetime.now()

    request_id, msg, _ = message.query(flags, ns, 0, -1, spec, None, codec_options, check_keys)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        monitoring.publish_command_start(spec, dbname, request_id, address)
        start = datetime.datetime.now()

    sock.sendall(msg)
    response = receive_message(sock, 1, request_id)
    try:
        unpacked = helpers._unpack_response(response, codec_options=codec_options)

        response_doc = unpacked["data"][0]
        if check:
            msg = "command %s on namespace %s failed: %%s" % (repr(spec).replace("%", "%%"), ns)
            helpers._check_command_response(response_doc, msg, allowable_errors)
    except (NotMasterError, OperationFailure) as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            monitoring.publish_command_failure(duration, exc.details, name, request_id, address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        monitoring.publish_command_success(duration, response_doc, name, request_id, address)
    return response_doc
Esempio n. 46
0
def command(sock, dbname, spec, slave_ok, is_mongos,
            read_preference, codec_options, check=True,
            allowable_errors=None, address=None,
            check_keys=False, listeners=None, max_bson_size=None,
            read_concern=DEFAULT_READ_CONCERN):
    """Execute a command over the socket, or raise socket.error.

    :Parameters:
      - `sock`: a raw socket instance
      - `dbname`: name of the database on which to run the command
      - `spec`: a command document as a dict, SON, or mapping object
      - `slave_ok`: whether to set the SlaveOkay wire protocol bit
      - `is_mongos`: are we connected to a mongos?
      - `read_preference`: a read preference
      - `codec_options`: a CodecOptions instance
      - `check`: raise OperationFailure if there are errors
      - `allowable_errors`: errors to ignore if `check` is True
      - `address`: the (host, port) of `sock`
      - `check_keys`: if True, check `spec` for invalid keys
      - `listeners`: An instance of :class:`~pymongo.monitoring.EventListeners`
      - `max_bson_size`: The maximum encoded bson size for this server
      - `read_concern`: The read concern for this command.
    """
    name = next(iter(spec))
    ns = dbname + '.$cmd'
    flags = 4 if slave_ok else 0
    # Publish the original command document.
    orig = spec
    if is_mongos:
        spec = message._maybe_add_read_preference(spec, read_preference)
    if read_concern.level:
        spec['readConcern'] = read_concern.document

    publish = listeners is not None and listeners.enabled_for_commands
    if publish:
        start = datetime.datetime.now()

    request_id, msg, size = message.query(flags, ns, 0, -1, spec,
                                          None, codec_options, check_keys)

    if (max_bson_size is not None
            and size > max_bson_size + message._COMMAND_OVERHEAD):
        message._raise_document_too_large(
            name, size, max_bson_size + message._COMMAND_OVERHEAD)

    if publish:
        encoding_duration = datetime.datetime.now() - start
        listeners.publish_command_start(orig, dbname, request_id, address)
        start = datetime.datetime.now()

    try:
        sock.sendall(msg)
        response = receive_message(sock, 1, request_id)
        unpacked = helpers._unpack_response(
            response, codec_options=codec_options)

        response_doc = unpacked['data'][0]
        if check:
            helpers._check_command_response(response_doc, None, allowable_errors)
    except Exception as exc:
        if publish:
            duration = (datetime.datetime.now() - start) + encoding_duration
            if isinstance(exc, (NotMasterError, OperationFailure)):
                failure = exc.details
            else:
                failure = message._convert_exception(exc)
            listeners.publish_command_failure(
                duration, failure, name, request_id, address)
        raise
    if publish:
        duration = (datetime.datetime.now() - start) + encoding_duration
        listeners.publish_command_success(
            duration, response_doc, name, request_id, address)
    return response_doc
Esempio n. 47
0
    def __send_message(self, message):
        """Send a query or getmore message and handles the response.

        If message is ``None`` this is an exhaust cursor, which reads
        the next result batch off the exhaust socket instead of
        sending getMore messages to the server.
        """
        client = self.__collection.database.connection

        if message:
            kwargs = {"_must_use_master": self.__must_use_master}
            kwargs["read_preference"] = self.__read_preference
            kwargs["tag_sets"] = self.__tag_sets
            kwargs["secondary_acceptable_latency_ms"] = self.__secondary_acceptable_latency_ms
            kwargs["exhaust"] = self.__exhaust
            if self.__connection_id is not None:
                kwargs["_connection_to_use"] = self.__connection_id
            kwargs.update(self.__kwargs)

            try:
                res = client._send_message_with_response(message, **kwargs)
                self.__connection_id, (response, sock, pool) = res
                if self.__exhaust:
                    self.__exhaust_mgr = _SocketManager(sock, pool)
            except AutoReconnect:
                # Don't try to send kill cursors on another socket
                # or to another server. It can cause a _pinValue
                # assertion on some server releases if we get here
                # due to a socket timeout.
                self.__killed = True
                raise
        else:
            # Exhaust cursor - no getMore message.
            try:
                response = client._exhaust_next(self.__exhaust_mgr.sock)
            except AutoReconnect:
                self.__killed = True
                self.__exhaust_mgr.error()
                raise

        try:
            response = helpers._unpack_response(
                response, self.__id, self.__as_class, self.__tz_aware, self.__uuid_subtype, self.__compile_re
            )
        except OperationFailure:
            self.__killed = True
            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()
            # If this is a tailable cursor the error is likely
            # due to capped collection roll over. Setting
            # self.__killed to True ensures Cursor.alive will be
            # False. No need to re-raise.
            if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
                return
            raise
        except AutoReconnect:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True
            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()
            client.disconnect()
            raise

        self.__id = response["cursor_id"]

        # starting from doesn't get set on getmore's for tailable cursors
        if not (self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]):
            assert response["starting_from"] == self.__retrieved, "Result batch started from %s, expected %s" % (
                response["starting_from"],
                self.__retrieved,
            )

        self.__retrieved += response["number_returned"]
        self.__data = deque(response["data"])

        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()

        # Don't wait for garbage collection to call __del__, return the
        # socket to the pool now.
        if self.__exhaust and self.__id == 0:
            self.__exhaust_mgr.close()