def test_EncodeDecodeQuery(self): request = Query(collection="coll", n_to_skip=123, n_to_return=456, query=BSON.encode({'x': 42}), fields=BSON.encode({'y': 1})) self.__test_encode_decode(request)
def find(self, spec=None, skip=0, limit=0, fields=None, filter=None, **kwargs): if spec is None: spec = SON() if not isinstance(spec, types.DictType): raise TypeError("spec must be an instance of dict") if not isinstance(fields, (types.DictType, types.ListType, types.NoneType)): raise TypeError("fields must be an instance of dict or list") if not isinstance(skip, types.IntType): raise TypeError("skip must be an instance of int") if not isinstance(limit, types.IntType): raise TypeError("limit must be an instance of int") if fields is not None: if not isinstance(fields, types.DictType): if not fields: fields = ["_id"] fields = self._fields_list_to_dict(fields) if isinstance(filter, (qf.sort, qf.hint, qf.explain, qf.snapshot)): if '$query' not in spec: spec = {'$query': spec} for k,v in filter.iteritems(): spec['$' + k] = dict(v) proto = yield self._database.connection.getprotocol() flags = kwargs.get('flags', 0) query = Query(flags=flags, collection=str(self), n_to_skip=skip, n_to_return=limit, query=spec, fields=fields) reply = yield proto.send_QUERY(query) documents = reply.documents while reply.cursor_id: if limit <= 0: to_fetch = 0 else: to_fetch = -1 if len(documents) < limit else limit - len(documents) if to_fetch < 0: break getmore = Getmore(collection=str(self), n_to_return=to_fetch, cursor_id=reply.cursor_id) reply = yield proto.send_GETMORE(getmore) documents.extend(reply.documents) if limit > 0: documents = documents[:limit] as_class = kwargs.get('as_class', dict) defer.returnValue([d.decode(as_class=as_class) for d in documents])
def _execute_batch_command(self, command_type, documents, ordered): assert command_type in _OP_MAP cmd_collname = str(self._database["$cmd"]) proto = yield self._database.connection.getprotocol() results = [] def accumulate_result(reply, idx_offset): result = reply.documents[0].decode() results.append((idx_offset, result)) return result # There are four major cases with different behavior of insert_many: # * Unack, Unordered: sending all batches and not handling responses at all # so ignoring any errors # # * Ack, Unordered: sending all batches, accumulating all responses and # returning aggregated response # # * Unack, Ordered: handling DB responses despite unacknowledged write_concern # because we must stop on first error (not raising it though) # # * Ack, Ordered: stopping on first error and raising BulkWriteError actual_write_concern = self.write_concern if ordered and self.write_concern.acknowledged is False: actual_write_concern = WriteConcern(w=1) batches = self._generate_batch_commands( self._collection_name, _COMMANDS[command_type], _OP_MAP[command_type], documents, ordered, actual_write_concern, proto.max_bson_size, proto.max_write_batch_size) all_responses = [] for idx_offset, batch in batches: batch_result = proto.send_QUERY( Query(collection=cmd_collname, query=batch)) if self.write_concern.acknowledged or ordered: batch_result.addCallback(accumulate_result, idx_offset) if ordered: result = yield batch_result if "writeErrors" in result: break else: all_responses.append(batch_result) if self.write_concern.acknowledged and not ordered: try: yield defer.gatherResults(all_responses, consumeErrors=True) except defer.FirstError as e: e.subFailure.raiseException() defer.returnValue(results)
def after_connection(protocol): flags = kwargs.get("flags", 0) check_deadline(kwargs.pop("_deadline", None)) query = Query(flags=flags, collection=str(self), n_to_skip=skip, n_to_return=limit, query=spec, fields=fields) deferred_query = protocol.send_QUERY(query) deferred_query.addCallback(after_reply, protocol, after_reply) return deferred_query
def after_connection(proto): flags = kwargs.get('flags', 0) query = Query(flags=flags, collection=str(self), n_to_skip=skip, n_to_return=limit, query=spec, fields=fields) d = proto.send_QUERY(query) d.addCallback(after_reply, proto) return d
def configure(self, proto): """ Configures the protocol using the information gathered from the remote Mongo instance. Such information may contain the max BSON document size, replica set configuration, and the master status of the instance. """ if proto: query = Query(collection='admin.$cmd', query={'ismaster': 1}) df = proto.send_QUERY(query) df.addCallback(self._configureCallback, proto) return df return defer.succeed(None)
def authenticate_with_nonce(self, database, name, password): database_name = str(database) self.cred_cache[database_name] = (name, password) current_connection = self.__pool[self.__index] proto = yield self.getprotocol() collection_name = database_name + '.$cmd' query = Query(collection=collection_name, query={'getnonce': 1}) result = yield proto.send_QUERY(query) result = result.documents[0].decode() if result["ok"]: nonce = result["nonce"] else: defer.returnValue(result["errmsg"]) key = auth._auth_key(nonce, name, password) # hacky because order matters auth_command = SON(authenticate=1) auth_command['user'] = unicode(name) auth_command['nonce'] = nonce auth_command['key'] = key query = Query(collection=str(collection_name), query=auth_command) result = yield proto.send_QUERY(query) result = result.documents[0].decode() if result["ok"]: database._authenticated = True current_connection.auth_set.add(database_name) defer.returnValue(result["ok"]) else: del self.cred_cache[database_name] defer.returnValue(result["errmsg"])
def test_CursorNotFound(self): yield self.coll.insert([{'v': i} for i in range(140)], safe=True) protocol = yield self.conn.getprotocol() query = Query(query={},n_to_return=10,collection=str(self.coll)) query_result = yield protocol.send_QUERY(query) cursor_id = query_result.cursor_id yield protocol.send_KILL_CURSORS(KillCursors(cursors=[cursor_id])) self.assertFailure(protocol.send_GETMORE(Getmore(collection = str(self.coll),cursor_id = cursor_id,n_to_return = 10)), CursorNotFound)
def ping(self): def on_ok(result): if timeout_call.active(): timeout_call.cancel() self.__next_call = reactor.callLater(self.interval, self.ping) def on_fail(failure): if timeout_call.active(): timeout_call.cancel() on_timeout() def on_timeout(): self.transport.loseConnection() self.fail_callback(self.transport.getPeer()) timeout_call = reactor.callLater(self.timeout, on_timeout) self.send_QUERY(Query(collection="admin.$cmd", query={"ismaster": 1}))\ .addCallbacks(on_ok, on_fail)
def __send_ismaster(proto, **kwargs): query = Query(collection="admin.$cmd", query={"ismaster": 1}) return proto.send_QUERY(query)
def configure(self, proto): """ Configures the protocol using the information gathered from the remote Mongo instance. Such information may contain the max BSON document size, replica set configuration, and the master status of the instance. """ if not proto: defer.returnValue(None) query = Query(collection="admin.$cmd", query={"ismaster": 1}) reply = yield proto.send_QUERY(query) # Handle the reply from the "ismaster" query. The reply contains # configuration information about the peer. # Make sure we got a result document. if len(reply.documents) != 1: raise OperationFailure("TxMongo: invalid document length.") # Get the configuration document from the reply. config = reply.documents[0].decode() # Make sure the command was successful. if not config.get("ok"): code = config.get("code") msg = "TxMongo: " + config.get("err", "Unknown error") raise OperationFailure(msg, code) # Check that the replicaSet matches. set_name = config.get("setName") expected_set_name = self.uri["options"].get("replicaset") if expected_set_name and (expected_set_name != set_name): # Log the invalid replica set failure. msg = "TxMongo: Mongo instance does not match requested replicaSet." raise ConfigurationError(msg) # Track max bson object size limit. max_bson_size = config.get("maxBsonObjectSize") if max_bson_size: proto.max_bson_size = max_bson_size proto.set_wire_versions(config.get("minWireVersion", 0), config.get("maxWireVersion", 0)) # Track the other hosts in the replica set. hosts = config.get("hosts") if isinstance(hosts, list) and hosts: for host in hosts: if ':' not in host: host = (host, 27017) else: host = host.split(':', 1) host[1] = int(host[1]) host = tuple(host) if host not in self.__allnodes: self.__allnodes.append(host) # Check if this node is the master. ismaster = config.get("ismaster") if not ismaster: msg = "TxMongo: MongoDB host `%s` is not master." % config.get( 'me') raise AutoReconnect(msg)
def insert_many(self, documents, ordered=True, **kwargs): inserted_ids = [] for doc in documents: if isinstance(doc, collections.Mapping): inserted_ids.append(doc.setdefault("_id", ObjectId())) else: raise TypeError("TxMongo: insert_many takes list of documents.") cmd_collname = str(self._database["$cmd"]) proto = yield self._database.connection.getprotocol() error = { "nInserted": 0, "writeErrors": [], "writeConcernErrors": [] } def accumulate_response(reply): response = reply.documents[0].decode() error["nInserted"] += response.get('n', 0) error["writeErrors"].extend(response.get("writeErrors", [])) if "writeConcernError" in response: error["writeConcernErrors"].append(response["writeConcernError"]) def has_errors(): return error["writeErrors"] or error["writeConcernErrors"] def raise_error(): error["writeErrors"].sort(key=lambda error: error["index"]) for write_error in error["writeErrors"]: write_error[u"op"] = documents[write_error["index"]] raise BulkWriteError(error) # There are four major cases with different behavior of insert_many: # * Unack, Unordered: sending all batches and not handling responses at all # so ignoring any errors # # * Ack, Unordered: sending all batches, accumulating all responses and # returning aggregated response # # * Unack, Ordered: handling DB responses despite unacknowledged write_concern # because we must stop on first error (not raising it though) # # * Ack, Ordered: stopping on first error and raising BulkWriteError actual_write_concern = self.write_concern if ordered and self.write_concern.acknowledged is False: actual_write_concern = WriteConcern(w=1) batches = self._generate_insert_many_batches(self._collection_name, documents, ordered, actual_write_concern, proto.max_bson_size, proto.max_write_batch_size) all_responses = [] for batch in batches: batch_result = proto.send_QUERY(Query(collection=cmd_collname, query=batch)) if self.write_concern.acknowledged or ordered: batch_result.addCallback(accumulate_response) if ordered: yield batch_result if has_errors(): if self.write_concern.acknowledged: raise_error() else: break else: all_responses.append(batch_result) if self.write_concern.acknowledged and not ordered: yield defer.DeferredList(all_responses) if has_errors(): raise_error() defer.returnValue(InsertManyResult(inserted_ids, self.write_concern.acknowledged))
def __send_ismaster(proto, _deadline=None): query = Query(collection="admin.$cmd", query={"ismaster": 1}) return proto.send_QUERY(query)